summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/scan.c67
-rw-r--r--drivers/atm/fore200e.c12
-rw-r--r--drivers/atm/he.c2
-rw-r--r--drivers/atm/idt77252.c4
-rw-r--r--drivers/block/xen-blkback/blkback.c26
-rw-r--r--drivers/block/xen-blkback/common.h26
-rw-r--r--drivers/block/xen-blkback/xenbus.c15
-rw-r--r--drivers/char/random.c12
-rw-r--r--drivers/clk/meson/Kconfig1
-rw-r--r--drivers/clk/sunxi-ng/Kconfig1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-a64.h4
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun5i.c2
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun6i-a31.c2
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-h3.h4
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-v3s.c2
-rw-r--r--drivers/clocksource/arm_arch_timer.c4
-rw-r--r--drivers/clocksource/cadence_ttc_timer.c1
-rw-r--r--drivers/clocksource/timer-sun5i.c1
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c15
-rw-r--r--drivers/firmware/dmi-id.c4
-rw-r--r--drivers/firmware/dmi_scan.c49
-rw-r--r--drivers/gpio/gpio-mvebu.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_crtc.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c7
-rw-r--r--drivers/gpu/drm/bridge/synopsys/Kconfig1
-rw-r--r--drivers/gpu/drm/drm_connector.c38
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.h3
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c2
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c6
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c63
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c17
-rw-r--r--drivers/gpu/drm/i915/i915_gem_request.c2
-rw-r--r--drivers/gpu/drm/i915/i915_guc_submission.c4
-rw-r--r--drivers/gpu/drm/i915/i915_pvinfo.h8
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.c10
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c5
-rw-r--r--drivers/gpu/drm/i915/intel_display.c44
-rw-r--r--drivers/gpu/drm/i915/intel_dp_aux_backlight.c2
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c6
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c36
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c41
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h19
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c9
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_crtc.c42
-rw-r--r--drivers/gpu/drm/radeon/cik.c7
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c2
-rw-r--r--drivers/gpu/drm/radeon/si.c7
-rw-r--r--drivers/gpu/drm/tegra/drm.c22
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c1
-rw-r--r--drivers/gpu/host1x/dev.c2
-rw-r--r--drivers/hid/hid-ids.h3
-rw-r--r--drivers/hid/hid-magicmouse.c15
-rw-r--r--drivers/hid/usbhid/hid-quirks.c1
-rw-r--r--drivers/i2c/busses/i2c-imx.c8
-rw-r--r--drivers/i2c/busses/i2c-ismt.c2
-rw-r--r--drivers/i2c/busses/i2c-rcar.c2
-rw-r--r--drivers/iio/adc/meson_saradc.c4
-rw-r--r--drivers/iio/adc/mxs-lradc-adc.c7
-rw-r--r--drivers/iio/buffer/industrialio-buffer-dma.c1
-rw-r--r--drivers/iio/buffer/industrialio-buffer-dmaengine.c1
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_core.c39
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h3
-rw-r--r--drivers/infiniband/core/addr.c10
-rw-r--r--drivers/infiniband/hw/bnxt_re/bnxt_re.h4
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c471
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.h22
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c4
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c384
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.h18
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c314
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.h61
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.h4
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.c333
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.h2
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c6
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c6
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c10
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c6
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c3
-rw-r--r--drivers/infiniband/hw/mlx5/main.c125
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c4
-rw-r--r--drivers/infiniband/hw/qedr/main.c10
-rw-r--r--drivers/infiniband/hw/qedr/qedr.h9
-rw-r--r--drivers/infiniband/hw/qedr/qedr_cm.c2
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c74
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c9
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c1
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c15
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_netlink.c10
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_vlan.c11
-rw-r--r--drivers/input/misc/soc_button_array.c20
-rw-r--r--drivers/input/rmi4/rmi_f54.c17
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h7
-rw-r--r--drivers/irqchip/irq-mips-gic.c6
-rw-r--r--drivers/isdn/gigaset/asyncdata.c4
-rw-r--r--drivers/isdn/gigaset/isocdata.c2
-rw-r--r--drivers/isdn/hysdn/hycapi.c8
-rw-r--r--drivers/isdn/i4l/isdn_bsdcomp.c2
-rw-r--r--drivers/isdn/i4l/isdn_ppp.c2
-rw-r--r--drivers/isdn/mISDN/socket.c2
-rw-r--r--drivers/leds/leds-bcm6328.c4
-rw-r--r--drivers/leds/trigger/ledtrig-heartbeat.c31
-rw-r--r--drivers/md/dm-integrity.c12
-rw-r--r--drivers/md/dm-io.c4
-rw-r--r--drivers/md/dm-raid.c17
-rw-r--r--drivers/md/dm-raid1.c21
-rw-r--r--drivers/md/dm-thin.c26
-rw-r--r--drivers/mfd/arizona-core.c3
-rw-r--r--drivers/mmc/host/meson-gx-mmc.c9
-rw-r--r--drivers/net/arcnet/arcdevice.h4
-rw-r--r--drivers/net/arcnet/arcnet.c81
-rw-r--r--drivers/net/arcnet/capmode.c2
-rw-r--r--drivers/net/arcnet/com20020-pci.c64
-rw-r--r--drivers/net/arcnet/com20020.c2
-rw-r--r--drivers/net/bonding/bond_alb.c3
-rw-r--r--drivers/net/bonding/bond_netlink.c16
-rw-r--r--drivers/net/bonding/bond_options.c132
-rw-r--r--drivers/net/caif/caif_hsi.c12
-rw-r--r--drivers/net/caif/caif_serial.c3
-rw-r--r--drivers/net/caif/caif_spi.c3
-rw-r--r--drivers/net/can/dev.c11
-rw-r--r--drivers/net/can/vxcan.c3
-rw-r--r--drivers/net/dsa/bcm_sf2.c16
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c47
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.h19
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.c401
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.h311
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.c11
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.h4
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.c2
-rw-r--r--drivers/net/dummy.c3
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_admin_defs.h31
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.c85
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.h10
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_eth_com.c5
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_ethtool.c11
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c306
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.h30
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_regs_defs.h34
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-common.h53
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-desc.c94
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c244
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c245
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-i2c.c30
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-main.c14
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-mdio.c33
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-pci.c14
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c240
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-platform.c10
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ptp.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h56
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c61
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h6
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c24
-rw-r--r--drivers/net/ethernet/cadence/Kconfig10
-rw-r--r--drivers/net/ethernet/cadence/Makefile5
-rw-r--r--drivers/net/ethernet/cadence/macb.h159
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c (renamed from drivers/net/ethernet/cadence/macb.c)301
-rwxr-xr-xdrivers/net/ethernet/cadence/macb_ptp.c518
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c10
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c7
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn66xx_device.c8
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_core.c10
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c35
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_main.c5
-rw-r--r--drivers/net/ethernet/cavium/liquidio/liquidio_common.h6
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_config.h13
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_console.c6
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.c10
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_droq.c37
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_droq.h18
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c4
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.h2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_network.h29
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c33
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c12
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/sge.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h13
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c19
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c177
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h1
-rw-r--r--drivers/net/ethernet/cisco/enic/enic.h4
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c8
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c2
-rw-r--r--drivers/net/ethernet/freescale/fman/Kconfig1
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.c2
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c16
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c2
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c98
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h69
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.c6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.c15
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c48
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c524
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c244
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h12
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h5
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c18
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c58
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c87
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h18
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c62
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_resources.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c78
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h138
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h57
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c461
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h140
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c378
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h55
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c133
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_clock.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c212
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c128
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c178
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c107
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c34
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c77
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.c174
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c1042
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.h96
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c87
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c376
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h94
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.c164
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.h204
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c268
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c145
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/ipoib.c)78
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/ipoib.h)7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c154
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h43
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c66
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.c46
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.h27
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/Kconfig8
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/mlxfw.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c3
-rw-r--r--drivers/net/ethernet/netronome/Kconfig10
-rw-r--r--drivers/net/ethernet/netronome/nfp/Makefile11
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c211
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.c157
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.h317
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.c391
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.h159
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/match.c292
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/metadata.c438
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c400
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app.c28
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app.h88
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app_nic.c25
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_devlink.c18
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.c49
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.h15
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net.h6
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c210
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h27
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_main.c238
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.c396
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.h128
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_port.c117
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_port.h89
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h9
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c56
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c40
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h4
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c5
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c27
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c7
-rw-r--r--drivers/net/ethernet/packetengines/hamachi.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/Makefile2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.c24
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c37
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev_api.h5
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iscsi.c30
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c8
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_rdma.c1722
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_rdma.h201
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_reg_addr.h6
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.c1694
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.h156
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp.h17
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c62
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c16
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.h18
-rw-r--r--drivers/net/ethernet/qlogic/qede/Makefile2
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h5
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c21
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_rdma.c (renamed from drivers/net/ethernet/qlogic/qede/qede_roce.c)144
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-sgmii.c23
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c16
-rw-r--r--drivers/net/ethernet/rocker/rocker_ofdpa.c2
-rw-r--r--drivers/net/ethernet/sfc/ef10.c15
-rw-r--r--drivers/net/ethernet/sfc/ef10_sriov.c2
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c22
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c16
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c24
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c203
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c1
-rw-r--r--drivers/net/ethernet/ti/cpsw-common.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c6
-rw-r--r--drivers/net/ethernet/ti/cpts.h16
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c18
-rw-r--r--drivers/net/geneve.c6
-rw-r--r--drivers/net/gtp.c6
-rw-r--r--drivers/net/hyperv/hyperv_net.h7
-rw-r--r--drivers/net/hyperv/netvsc.c2
-rw-r--r--drivers/net/hyperv/netvsc_drv.c12
-rw-r--r--drivers/net/hyperv/rndis_filter.c4
-rw-r--r--drivers/net/ifb.c3
-rw-r--r--drivers/net/ipvlan/ipvlan.h3
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c9
-rw-r--r--drivers/net/ipvlan/ipvtap.c9
-rw-r--r--drivers/net/macsec.c9
-rw-r--r--drivers/net/macvlan.c97
-rw-r--r--drivers/net/macvtap.c7
-rw-r--r--drivers/net/nlmon.c3
-rw-r--r--drivers/net/phy/dp83640.c2
-rw-r--r--drivers/net/phy/lxt.c11
-rw-r--r--drivers/net/phy/micrel.c2
-rw-r--r--drivers/net/phy/smsc.c2
-rw-r--r--drivers/net/ppp/ppp_generic.c6
-rw-r--r--drivers/net/ppp/ppp_synctty.c2
-rw-r--r--drivers/net/ppp/pptp.c2
-rw-r--r--drivers/net/rionet.c2
-rw-r--r--drivers/net/team/team.c6
-rw-r--r--drivers/net/tun.c3
-rw-r--r--drivers/net/usb/asix_common.c5
-rw-r--r--drivers/net/usb/ax88179_178a.c16
-rw-r--r--drivers/net/usb/cdc_ncm.c4
-rw-r--r--drivers/net/usb/hso.c13
-rw-r--r--drivers/net/usb/int51x1.c2
-rw-r--r--drivers/net/usb/r8152.c22
-rw-r--r--drivers/net/veth.c12
-rw-r--r--drivers/net/virtio_net.c1
-rw-r--r--drivers/net/vrf.c6
-rw-r--r--drivers/net/vxlan.c421
-rw-r--r--drivers/net/wireless/ath/ath10k/bmi.h6
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c168
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.h132
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c53
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c27
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.c137
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h87
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c79
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c29
-rw-r--r--drivers/net/wireless/ath/ath10k/sdio.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/thermal.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c51
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h3
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_pipe.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/mci.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/rng.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/tx99.c13
-rw-r--r--drivers/net/wireless/ath/ath9k/wmi.c3
-rw-r--r--drivers/net/wireless/ath/wil6210/Makefile1
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c613
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c49
-rw-r--r--drivers/net/wireless/ath/wil6210/interrupt.c6
-rw-r--r--drivers/net/wireless/ath/wil6210/ioctl.c180
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c7
-rw-r--r--drivers/net/wireless/ath/wil6210/netdev.c8
-rw-r--r--drivers/net/wireless/ath/wil6210/pcie_bus.c98
-rw-r--r--drivers/net/wireless/ath/wil6210/pm.c228
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c71
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h40
-rw-r--r--drivers/net/wireless/ath/wil6210/wil_platform.h7
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c147
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.h27
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c15
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h18
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c73
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c29
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c35
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h43
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c17
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c444
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.h47
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c38
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/main.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-8000.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-9000.c14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-a000.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-csr.h12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h19
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.h12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fh.h30
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fw-api.h205
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fw-error-dump.h8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-io.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-modparams.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c97
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-prph.h17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h186
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/coex.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/constants.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c39
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c22
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c101
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api-coex.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api-d3.h29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api-mac.h11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api-power.h45
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rs.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h88
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api-scan.h36
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h46
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api-stats.h29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tof.h18
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h156
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h576
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c62
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c65
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/led.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c105
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h53
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/nvm.c172
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c71
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c252
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tdls.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tof.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tt.c14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c79
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c93
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h21
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c116
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c28
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c303
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c184
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c48
-rw-r--r--drivers/net/wireless/marvell/libertas/if_sdio.c3
-rw-r--r--drivers/net/wireless/marvell/libertas/mesh.c5
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfp.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/debugfs.c3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.h2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/scan.c4
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c4
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/commands.c12
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c2
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/qlink_util.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.c282
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.h16
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbt_precomp.h22
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.h32
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c52
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.h41
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.h3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.h38
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.h26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c889
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h45
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c85
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.h7
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.c8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/debug.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/debug.h7
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pci.c7
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/ps.c14
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/reg.h2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/reg.h2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/reg.h2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c82
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.h14
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/reg.h2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c81
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c10
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/reg.h2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.h16
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c88
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/reg.h2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.h17
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/wifi.h34
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_debugfs.c2
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_hal.c4
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_main.c9
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mgmt.c130
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_sdio.c21
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_usb.c1
-rw-r--r--drivers/net/wireless/rsi/rsi_boot_params.h15
-rw-r--r--drivers/net/wireless/rsi/rsi_hal.h3
-rw-r--r--drivers/net/wireless/rsi/rsi_main.h28
-rw-r--r--drivers/net/wireless/rsi/rsi_mgmt.h76
-rw-r--r--drivers/net/wireless/st/cw1200/cw1200_sdio.c2
-rw-r--r--drivers/net/wireless/st/cw1200/cw1200_spi.c2
-rw-r--r--drivers/net/wireless/ti/wl18xx/main.c6
-rw-r--r--drivers/net/wireless/ti/wlcore/rx.c3
-rw-r--r--drivers/net/wireless/ti/wlcore/sdio.c1
-rw-r--r--drivers/net/wireless/ti/wlcore/spi.c8
-rw-r--r--drivers/net/xen-netback/common.h1
-rw-r--r--drivers/net/xen-netback/interface.c6
-rw-r--r--drivers/net/xen-netback/netback.c6
-rw-r--r--drivers/nfc/pn533/pn533.c4
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_intel.c2
-rw-r--r--drivers/ntb/ntb_transport.c58
-rw-r--r--drivers/ntb/test/ntb_perf.c4
-rw-r--r--drivers/nvme/host/pci.c3
-rw-r--r--drivers/pci/access.c12
-rw-r--r--drivers/pci/endpoint/functions/Kconfig1
-rw-r--r--drivers/pinctrl/pinctrl-amd.c91
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.c2
-rw-r--r--drivers/platform/x86/intel_telemetry_debugfs.c16
-rw-r--r--drivers/s390/net/ctcm_main.c26
-rw-r--r--drivers/s390/net/netiucv.c10
-rw-r--r--drivers/s390/net/qeth_core.h1
-rw-r--r--drivers/s390/net/qeth_core_main.c82
-rw-r--r--drivers/s390/net/qeth_core_mpc.c14
-rw-r--r--drivers/s390/net/qeth_core_mpc.h18
-rw-r--r--drivers/s390/net/qeth_l2_main.c50
-rw-r--r--drivers/scsi/qedi/qedi_fw.c1
-rw-r--r--drivers/scsi/qedi/qedi_main.c4
-rw-r--r--drivers/staging/iio/cdc/ad7152.c6
-rw-r--r--drivers/staging/octeon/ethernet-tx.c3
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_recv.c4
-rw-r--r--drivers/staging/rtl8192e/rtllib_rx.c5
-rw-r--r--drivers/staging/rtl8192e/rtllib_softmac.c11
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c4
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c11
-rw-r--r--drivers/staging/rtl8192u/r819xU_cmdpkt.c3
-rw-r--r--drivers/staging/rtl8712/rtl8712_recv.c4
-rw-r--r--drivers/staging/rtl8723bs/os_dep/osdep_service.c2
-rw-r--r--drivers/staging/rtl8723bs/os_dep/recv_linux.c4
-rw-r--r--drivers/staging/wlan-ng/hfa384x_usb.c2
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_cm.c12
-rw-r--r--drivers/target/iscsi/iscsi_target.c22
-rw-r--r--drivers/target/target_core_internal.h2
-rw-r--r--drivers/target/target_core_tmr.c16
-rw-r--r--drivers/target/target_core_transport.c9
-rw-r--r--drivers/usb/gadget/composite.c11
-rw-r--r--drivers/usb/gadget/function/f_ncm.c11
-rw-r--r--drivers/usb/gadget/legacy/inode.c9
-rw-r--r--drivers/usb/gadget/udc/dummy_hcd.c13
-rw-r--r--drivers/usb/gadget/udc/net2280.c9
-rw-r--r--drivers/usb/host/xhci-mem.c7
-rw-r--r--drivers/usb/host/xhci-pci.c3
-rw-r--r--drivers/video/fbdev/core/fbmon.c2
-rw-r--r--drivers/video/fbdev/smscufx.c5
-rw-r--r--drivers/video/fbdev/udlfb.c9
-rw-r--r--drivers/video/fbdev/via/viafbdev.c8
-rw-r--r--drivers/virtio/virtio_balloon.c7
617 files changed, 24445 insertions, 8428 deletions
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 3a10d7573477..d53162997f32 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1428,6 +1428,37 @@ static void acpi_init_coherency(struct acpi_device *adev)
adev->flags.coherent_dma = cca;
}
+static int acpi_check_spi_i2c_slave(struct acpi_resource *ares, void *data)
+{
+ bool *is_spi_i2c_slave_p = data;
+
+ if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS)
+ return 1;
+
+ /*
+ * devices that are connected to UART still need to be enumerated to
+ * platform bus
+ */
+ if (ares->data.common_serial_bus.type != ACPI_RESOURCE_SERIAL_TYPE_UART)
+ *is_spi_i2c_slave_p = true;
+
+ /* no need to do more checking */
+ return -1;
+}
+
+static bool acpi_is_spi_i2c_slave(struct acpi_device *device)
+{
+ struct list_head resource_list;
+ bool is_spi_i2c_slave = false;
+
+ INIT_LIST_HEAD(&resource_list);
+ acpi_dev_get_resources(device, &resource_list, acpi_check_spi_i2c_slave,
+ &is_spi_i2c_slave);
+ acpi_dev_free_resource_list(&resource_list);
+
+ return is_spi_i2c_slave;
+}
+
void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
int type, unsigned long long sta)
{
@@ -1443,6 +1474,7 @@ void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
acpi_bus_get_flags(device);
device->flags.match_driver = false;
device->flags.initialized = true;
+ device->flags.spi_i2c_slave = acpi_is_spi_i2c_slave(device);
acpi_device_clear_enumerated(device);
device_initialize(&device->dev);
dev_set_uevent_suppress(&device->dev, true);
@@ -1727,38 +1759,13 @@ static acpi_status acpi_bus_check_add(acpi_handle handle, u32 lvl_not_used,
return AE_OK;
}
-static int acpi_check_spi_i2c_slave(struct acpi_resource *ares, void *data)
-{
- bool *is_spi_i2c_slave_p = data;
-
- if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS)
- return 1;
-
- /*
- * devices that are connected to UART still need to be enumerated to
- * platform bus
- */
- if (ares->data.common_serial_bus.type != ACPI_RESOURCE_SERIAL_TYPE_UART)
- *is_spi_i2c_slave_p = true;
-
- /* no need to do more checking */
- return -1;
-}
-
static void acpi_default_enumeration(struct acpi_device *device)
{
- struct list_head resource_list;
- bool is_spi_i2c_slave = false;
-
/*
* Do not enumerate SPI/I2C slaves as they will be enumerated by their
* respective parents.
*/
- INIT_LIST_HEAD(&resource_list);
- acpi_dev_get_resources(device, &resource_list, acpi_check_spi_i2c_slave,
- &is_spi_i2c_slave);
- acpi_dev_free_resource_list(&resource_list);
- if (!is_spi_i2c_slave) {
+ if (!device->flags.spi_i2c_slave) {
acpi_create_platform_device(device, NULL);
acpi_device_set_enumerated(device);
} else {
@@ -1854,7 +1861,7 @@ static void acpi_bus_attach(struct acpi_device *device)
return;
device->flags.match_driver = true;
- if (ret > 0) {
+ if (ret > 0 && !device->flags.spi_i2c_slave) {
acpi_device_set_enumerated(device);
goto ok;
}
@@ -1863,10 +1870,10 @@ static void acpi_bus_attach(struct acpi_device *device)
if (ret < 0)
return;
- if (device->pnp.type.platform_id)
- acpi_default_enumeration(device);
- else
+ if (!device->pnp.type.platform_id && !device->flags.spi_i2c_slave)
acpi_device_set_enumerated(device);
+ else
+ acpi_default_enumeration(device);
ok:
list_for_each_entry(child, &device->children, node)
diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
index 7584ae1ded85..f0433adcd8fc 100644
--- a/drivers/atm/fore200e.c
+++ b/drivers/atm/fore200e.c
@@ -924,12 +924,7 @@ fore200e_tx_irq(struct fore200e* fore200e)
else {
dev_kfree_skb_any(entry->skb);
}
-#if 1
- /* race fixed by the above incarnation mechanism, but... */
- if (atomic_read(&sk_atm(vcc)->sk_wmem_alloc) < 0) {
- atomic_set(&sk_atm(vcc)->sk_wmem_alloc, 0);
- }
-#endif
+
/* check error condition */
if (*entry->status & STATUS_ERROR)
atomic_inc(&vcc->stats->tx_err);
@@ -1130,13 +1125,9 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
return -ENOMEM;
}
- ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
-
vcc->push(vcc, skb);
atomic_inc(&vcc->stats->rx);
- ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
-
return 0;
}
@@ -1572,7 +1563,6 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
unsigned long flags;
ASSERT(vcc);
- ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
ASSERT(fore200e);
ASSERT(fore200e_vcc);
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
index 461da2bce8ef..37ee21c5a5ca 100644
--- a/drivers/atm/he.c
+++ b/drivers/atm/he.c
@@ -2395,7 +2395,7 @@ he_close(struct atm_vcc *vcc)
* TBRQ, the host issues the close command to the adapter.
*/
- while (((tx_inuse = atomic_read(&sk_atm(vcc)->sk_wmem_alloc)) > 1) &&
+ while (((tx_inuse = refcount_read(&sk_atm(vcc)->sk_wmem_alloc)) > 1) &&
(retry < MAX_RETRY)) {
msleep(sleep);
if (sleep < 250)
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index 4e64de380bda..60bacba03d17 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -724,7 +724,7 @@ push_on_scq(struct idt77252_dev *card, struct vc_map *vc, struct sk_buff *skb)
struct sock *sk = sk_atm(vcc);
vc->estimator->cells += (skb->len + 47) / 48;
- if (atomic_read(&sk->sk_wmem_alloc) >
+ if (refcount_read(&sk->sk_wmem_alloc) >
(sk->sk_sndbuf >> 1)) {
u32 cps = vc->estimator->maxcps;
@@ -2009,7 +2009,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
atomic_inc(&vcc->stats->tx_err);
return -ENOMEM;
}
- atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
+ refcount_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
skb_put_data(skb, cell, 52);
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 726c32e35db9..0e824091a12f 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -609,8 +609,6 @@ int xen_blkif_schedule(void *arg)
unsigned long timeout;
int ret;
- xen_blkif_get(blkif);
-
set_freezable();
while (!kthread_should_stop()) {
if (try_to_freeze())
@@ -665,7 +663,6 @@ purge_gnt_list:
print_stats(ring);
ring->xenblkd = NULL;
- xen_blkif_put(blkif);
return 0;
}
@@ -1436,34 +1433,35 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
static void make_response(struct xen_blkif_ring *ring, u64 id,
unsigned short op, int st)
{
- struct blkif_response resp;
+ struct blkif_response *resp;
unsigned long flags;
union blkif_back_rings *blk_rings;
int notify;
- resp.id = id;
- resp.operation = op;
- resp.status = st;
-
spin_lock_irqsave(&ring->blk_ring_lock, flags);
blk_rings = &ring->blk_rings;
/* Place on the response ring for the relevant domain. */
switch (ring->blkif->blk_protocol) {
case BLKIF_PROTOCOL_NATIVE:
- memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt),
- &resp, sizeof(resp));
+ resp = RING_GET_RESPONSE(&blk_rings->native,
+ blk_rings->native.rsp_prod_pvt);
break;
case BLKIF_PROTOCOL_X86_32:
- memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, blk_rings->x86_32.rsp_prod_pvt),
- &resp, sizeof(resp));
+ resp = RING_GET_RESPONSE(&blk_rings->x86_32,
+ blk_rings->x86_32.rsp_prod_pvt);
break;
case BLKIF_PROTOCOL_X86_64:
- memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, blk_rings->x86_64.rsp_prod_pvt),
- &resp, sizeof(resp));
+ resp = RING_GET_RESPONSE(&blk_rings->x86_64,
+ blk_rings->x86_64.rsp_prod_pvt);
break;
default:
BUG();
}
+
+ resp->id = id;
+ resp->operation = op;
+ resp->status = st;
+
blk_rings->common.rsp_prod_pvt++;
RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
spin_unlock_irqrestore(&ring->blk_ring_lock, flags);
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index dea61f6ab8cb..ecb35fe8ca8d 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -75,9 +75,8 @@ extern unsigned int xenblk_max_queues;
struct blkif_common_request {
char dummy;
};
-struct blkif_common_response {
- char dummy;
-};
+
+/* i386 protocol version */
struct blkif_x86_32_request_rw {
uint8_t nr_segments; /* number of segments */
@@ -129,14 +128,6 @@ struct blkif_x86_32_request {
} u;
} __attribute__((__packed__));
-/* i386 protocol version */
-#pragma pack(push, 4)
-struct blkif_x86_32_response {
- uint64_t id; /* copied from request */
- uint8_t operation; /* copied from request */
- int16_t status; /* BLKIF_RSP_??? */
-};
-#pragma pack(pop)
/* x86_64 protocol version */
struct blkif_x86_64_request_rw {
@@ -193,18 +184,12 @@ struct blkif_x86_64_request {
} u;
} __attribute__((__packed__));
-struct blkif_x86_64_response {
- uint64_t __attribute__((__aligned__(8))) id;
- uint8_t operation; /* copied from request */
- int16_t status; /* BLKIF_RSP_??? */
-};
-
DEFINE_RING_TYPES(blkif_common, struct blkif_common_request,
- struct blkif_common_response);
+ struct blkif_response);
DEFINE_RING_TYPES(blkif_x86_32, struct blkif_x86_32_request,
- struct blkif_x86_32_response);
+ struct blkif_response __packed);
DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request,
- struct blkif_x86_64_response);
+ struct blkif_response);
union blkif_back_rings {
struct blkif_back_ring native;
@@ -281,6 +266,7 @@ struct xen_blkif_ring {
wait_queue_head_t wq;
atomic_t inflight;
+ bool active;
/* One thread per blkif ring. */
struct task_struct *xenblkd;
unsigned int waiting_reqs;
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 1f3dfaa54d87..792da683e70d 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -159,7 +159,7 @@ static int xen_blkif_alloc_rings(struct xen_blkif *blkif)
init_waitqueue_head(&ring->shutdown_wq);
ring->blkif = blkif;
ring->st_print = jiffies;
- xen_blkif_get(blkif);
+ ring->active = true;
}
return 0;
@@ -249,10 +249,12 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
struct xen_blkif_ring *ring = &blkif->rings[r];
unsigned int i = 0;
+ if (!ring->active)
+ continue;
+
if (ring->xenblkd) {
kthread_stop(ring->xenblkd);
wake_up(&ring->shutdown_wq);
- ring->xenblkd = NULL;
}
/* The above kthread_stop() guarantees that at this point we
@@ -296,7 +298,7 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
BUG_ON(ring->free_pages_num != 0);
BUG_ON(ring->persistent_gnt_c != 0);
WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages));
- xen_blkif_put(blkif);
+ ring->active = false;
}
blkif->nr_ring_pages = 0;
/*
@@ -312,9 +314,10 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
static void xen_blkif_free(struct xen_blkif *blkif)
{
-
- xen_blkif_disconnect(blkif);
+ WARN_ON(xen_blkif_disconnect(blkif));
xen_vbd_free(&blkif->vbd);
+ kfree(blkif->be->mode);
+ kfree(blkif->be);
/* Make sure everything is drained before shutting down */
kmem_cache_free(xen_blkif_cachep, blkif);
@@ -511,8 +514,6 @@ static int xen_blkbk_remove(struct xenbus_device *dev)
xen_blkif_put(be->blkif);
}
- kfree(be->mode);
- kfree(be);
return 0;
}
diff --git a/drivers/char/random.c b/drivers/char/random.c
index e870f329db88..01a260f67437 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -803,13 +803,13 @@ static int crng_fast_load(const char *cp, size_t len)
p[crng_init_cnt % CHACHA20_KEY_SIZE] ^= *cp;
cp++; crng_init_cnt++; len--;
}
+ spin_unlock_irqrestore(&primary_crng.lock, flags);
if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) {
invalidate_batched_entropy();
crng_init = 1;
wake_up_interruptible(&crng_init_wait);
pr_notice("random: fast init done\n");
}
- spin_unlock_irqrestore(&primary_crng.lock, flags);
return 1;
}
@@ -841,6 +841,7 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
}
memzero_explicit(&buf, sizeof(buf));
crng->init_time = jiffies;
+ spin_unlock_irqrestore(&primary_crng.lock, flags);
if (crng == &primary_crng && crng_init < 2) {
invalidate_batched_entropy();
crng_init = 2;
@@ -848,7 +849,6 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
wake_up_interruptible(&crng_init_wait);
pr_notice("random: crng init done\n");
}
- spin_unlock_irqrestore(&primary_crng.lock, flags);
}
static inline void crng_wait_ready(void)
@@ -2041,8 +2041,8 @@ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64);
u64 get_random_u64(void)
{
u64 ret;
- bool use_lock = crng_init < 2;
- unsigned long flags;
+ bool use_lock = READ_ONCE(crng_init) < 2;
+ unsigned long flags = 0;
struct batched_entropy *batch;
#if BITS_PER_LONG == 64
@@ -2073,8 +2073,8 @@ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32);
u32 get_random_u32(void)
{
u32 ret;
- bool use_lock = crng_init < 2;
- unsigned long flags;
+ bool use_lock = READ_ONCE(crng_init) < 2;
+ unsigned long flags = 0;
struct batched_entropy *batch;
if (arch_get_random_int(&ret))
diff --git a/drivers/clk/meson/Kconfig b/drivers/clk/meson/Kconfig
index 19480bcc7046..2f29ee1a4d00 100644
--- a/drivers/clk/meson/Kconfig
+++ b/drivers/clk/meson/Kconfig
@@ -14,6 +14,7 @@ config COMMON_CLK_MESON8B
config COMMON_CLK_GXBB
bool
depends on COMMON_CLK_AMLOGIC
+ select RESET_CONTROLLER
help
Support for the clock controller on AmLogic S905 devices, aka gxbb.
Say Y if you want peripherals and CPU frequency scaling to work.
diff --git a/drivers/clk/sunxi-ng/Kconfig b/drivers/clk/sunxi-ng/Kconfig
index b0d551a8efe4..eb89c7801f00 100644
--- a/drivers/clk/sunxi-ng/Kconfig
+++ b/drivers/clk/sunxi-ng/Kconfig
@@ -156,6 +156,7 @@ config SUN8I_R_CCU
bool "Support for Allwinner SoCs' PRCM CCUs"
select SUNXI_CCU_DIV
select SUNXI_CCU_GATE
+ select SUNXI_CCU_MP
default MACH_SUN8I || (ARCH_SUNXI && ARM64)
endif
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a64.h b/drivers/clk/sunxi-ng/ccu-sun50i-a64.h
index 9b3cd24b78d2..061b6fbb4f95 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-a64.h
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-a64.h
@@ -31,7 +31,9 @@
#define CLK_PLL_VIDEO0_2X 8
#define CLK_PLL_VE 9
#define CLK_PLL_DDR0 10
-#define CLK_PLL_PERIPH0 11
+
+/* PLL_PERIPH0 exported for PRCM */
+
#define CLK_PLL_PERIPH0_2X 12
#define CLK_PLL_PERIPH1 13
#define CLK_PLL_PERIPH1_2X 14
diff --git a/drivers/clk/sunxi-ng/ccu-sun5i.c b/drivers/clk/sunxi-ng/ccu-sun5i.c
index 5c476f966a72..5372bf8be5e6 100644
--- a/drivers/clk/sunxi-ng/ccu-sun5i.c
+++ b/drivers/clk/sunxi-ng/ccu-sun5i.c
@@ -243,7 +243,7 @@ static SUNXI_CCU_GATE(ahb_ss_clk, "ahb-ss", "ahb",
static SUNXI_CCU_GATE(ahb_dma_clk, "ahb-dma", "ahb",
0x060, BIT(6), 0);
static SUNXI_CCU_GATE(ahb_bist_clk, "ahb-bist", "ahb",
- 0x060, BIT(6), 0);
+ 0x060, BIT(7), 0);
static SUNXI_CCU_GATE(ahb_mmc0_clk, "ahb-mmc0", "ahb",
0x060, BIT(8), 0);
static SUNXI_CCU_GATE(ahb_mmc1_clk, "ahb-mmc1", "ahb",
diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
index 89e68d29bf45..df97e25aec76 100644
--- a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
+++ b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
@@ -556,7 +556,7 @@ static SUNXI_CCU_M_WITH_MUX_GATE(lcd0_ch1_clk, "lcd0-ch1", lcd_ch1_parents,
0x12c, 0, 4, 24, 3, BIT(31),
CLK_SET_RATE_PARENT);
static SUNXI_CCU_M_WITH_MUX_GATE(lcd1_ch1_clk, "lcd1-ch1", lcd_ch1_parents,
- 0x12c, 0, 4, 24, 3, BIT(31),
+ 0x130, 0, 4, 24, 3, BIT(31),
CLK_SET_RATE_PARENT);
static const char * const csi_sclk_parents[] = { "pll-video0", "pll-video1",
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-h3.h b/drivers/clk/sunxi-ng/ccu-sun8i-h3.h
index 85973d1e8165..1b4baea37d81 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-h3.h
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-h3.h
@@ -29,7 +29,9 @@
#define CLK_PLL_VIDEO 6
#define CLK_PLL_VE 7
#define CLK_PLL_DDR 8
-#define CLK_PLL_PERIPH0 9
+
+/* PLL_PERIPH0 exported for PRCM */
+
#define CLK_PLL_PERIPH0_2X 10
#define CLK_PLL_GPU 11
#define CLK_PLL_PERIPH1 12
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
index e58706b40ae9..6297add857b5 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
@@ -537,7 +537,7 @@ static struct ccu_reset_map sun8i_v3s_ccu_resets[] = {
[RST_BUS_EMAC] = { 0x2c0, BIT(17) },
[RST_BUS_HSTIMER] = { 0x2c0, BIT(19) },
[RST_BUS_SPI0] = { 0x2c0, BIT(20) },
- [RST_BUS_OTG] = { 0x2c0, BIT(23) },
+ [RST_BUS_OTG] = { 0x2c0, BIT(24) },
[RST_BUS_EHCI0] = { 0x2c0, BIT(26) },
[RST_BUS_OHCI0] = { 0x2c0, BIT(29) },
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 4bed671e490e..8b5c30062d99 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -1209,9 +1209,9 @@ arch_timer_mem_frame_get_cntfrq(struct arch_timer_mem_frame *frame)
return 0;
}
- rate = readl_relaxed(frame + CNTFRQ);
+ rate = readl_relaxed(base + CNTFRQ);
- iounmap(frame);
+ iounmap(base);
return rate;
}
diff --git a/drivers/clocksource/cadence_ttc_timer.c b/drivers/clocksource/cadence_ttc_timer.c
index 44e5e951583b..8e64b8460f11 100644
--- a/drivers/clocksource/cadence_ttc_timer.c
+++ b/drivers/clocksource/cadence_ttc_timer.c
@@ -18,6 +18,7 @@
#include <linux/clk.h>
#include <linux/interrupt.h>
#include <linux/clockchips.h>
+#include <linux/clocksource.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/slab.h>
diff --git a/drivers/clocksource/timer-sun5i.c b/drivers/clocksource/timer-sun5i.c
index 2e9c830ae1cd..c4656c4d44a6 100644
--- a/drivers/clocksource/timer-sun5i.c
+++ b/drivers/clocksource/timer-sun5i.c
@@ -12,6 +12,7 @@
#include <linux/clk.h>
#include <linux/clockchips.h>
+#include <linux/clocksource.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index 92185ab6797d..b75b8beed68f 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -604,8 +604,7 @@ static struct sk_buff
if (!skb)
return ERR_PTR(-ENOMEM);
skb_reserve(skb, sizeof(struct sge_opaque_hdr));
- chcr_req = __skb_put(skb, transhdr_len);
- memset(chcr_req, 0, transhdr_len);
+ chcr_req = __skb_put_zero(skb, transhdr_len);
chcr_req->sec_cpl.op_ivinsrtofst =
FILL_SEC_CPL_OP_IVINSR(ctx->dev->rx_channel_id, 2, 1);
@@ -881,8 +880,7 @@ static struct sk_buff *create_hash_wr(struct ahash_request *req,
return skb;
skb_reserve(skb, sizeof(struct sge_opaque_hdr));
- chcr_req = __skb_put(skb, transhdr_len);
- memset(chcr_req, 0, transhdr_len);
+ chcr_req = __skb_put_zero(skb, transhdr_len);
chcr_req->sec_cpl.op_ivinsrtofst =
FILL_SEC_CPL_OP_IVINSR(ctx->dev->rx_channel_id, 2, 0);
@@ -1447,8 +1445,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
skb_reserve(skb, sizeof(struct sge_opaque_hdr));
/* Write WR */
- chcr_req = __skb_put(skb, transhdr_len);
- memset(chcr_req, 0, transhdr_len);
+ chcr_req = __skb_put_zero(skb, transhdr_len);
stop_offset = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize;
@@ -1779,8 +1776,7 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
skb_reserve(skb, sizeof(struct sge_opaque_hdr));
- chcr_req = __skb_put(skb, transhdr_len);
- memset(chcr_req, 0, transhdr_len);
+ chcr_req = __skb_put_zero(skb, transhdr_len);
fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, op_type, ctx);
@@ -1892,8 +1888,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
/* NIC driver is going to write the sge hdr. */
skb_reserve(skb, sizeof(struct sge_opaque_hdr));
- chcr_req = __skb_put(skb, transhdr_len);
- memset(chcr_req, 0, transhdr_len);
+ chcr_req = __skb_put_zero(skb, transhdr_len);
if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
req->assoclen -= 8;
diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c
index dc269cb288c2..951b6c79f166 100644
--- a/drivers/firmware/dmi-id.c
+++ b/drivers/firmware/dmi-id.c
@@ -47,7 +47,7 @@ DEFINE_DMI_ATTR_WITH_SHOW(product_name, 0444, DMI_PRODUCT_NAME);
DEFINE_DMI_ATTR_WITH_SHOW(product_version, 0444, DMI_PRODUCT_VERSION);
DEFINE_DMI_ATTR_WITH_SHOW(product_serial, 0400, DMI_PRODUCT_SERIAL);
DEFINE_DMI_ATTR_WITH_SHOW(product_uuid, 0400, DMI_PRODUCT_UUID);
-DEFINE_DMI_ATTR_WITH_SHOW(product_family, 0400, DMI_PRODUCT_FAMILY);
+DEFINE_DMI_ATTR_WITH_SHOW(product_family, 0444, DMI_PRODUCT_FAMILY);
DEFINE_DMI_ATTR_WITH_SHOW(board_vendor, 0444, DMI_BOARD_VENDOR);
DEFINE_DMI_ATTR_WITH_SHOW(board_name, 0444, DMI_BOARD_NAME);
DEFINE_DMI_ATTR_WITH_SHOW(board_version, 0444, DMI_BOARD_VERSION);
@@ -192,7 +192,7 @@ static void __init dmi_id_init_attr_table(void)
ADD_DMI_ATTR(product_version, DMI_PRODUCT_VERSION);
ADD_DMI_ATTR(product_serial, DMI_PRODUCT_SERIAL);
ADD_DMI_ATTR(product_uuid, DMI_PRODUCT_UUID);
- ADD_DMI_ATTR(product_family, DMI_PRODUCT_FAMILY);
+ ADD_DMI_ATTR(product_family, DMI_PRODUCT_FAMILY);
ADD_DMI_ATTR(board_vendor, DMI_BOARD_VENDOR);
ADD_DMI_ATTR(board_name, DMI_BOARD_NAME);
ADD_DMI_ATTR(board_version, DMI_BOARD_VERSION);
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index 93f7acdaac7a..783041964439 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -144,7 +144,7 @@ static int __init dmi_walk_early(void (*decode)(const struct dmi_header *,
buf = dmi_early_remap(dmi_base, orig_dmi_len);
if (buf == NULL)
- return -1;
+ return -ENOMEM;
dmi_decode_table(buf, decode, NULL);
@@ -178,7 +178,7 @@ static void __init dmi_save_ident(const struct dmi_header *dm, int slot,
const char *d = (const char *) dm;
const char *p;
- if (dmi_ident[slot])
+ if (dmi_ident[slot] || dm->length <= string)
return;
p = dmi_string(dm, d[string]);
@@ -191,13 +191,14 @@ static void __init dmi_save_ident(const struct dmi_header *dm, int slot,
static void __init dmi_save_uuid(const struct dmi_header *dm, int slot,
int index)
{
- const u8 *d = (u8 *) dm + index;
+ const u8 *d;
char *s;
int is_ff = 1, is_00 = 1, i;
- if (dmi_ident[slot])
+ if (dmi_ident[slot] || dm->length <= index + 16)
return;
+ d = (u8 *) dm + index;
for (i = 0; i < 16 && (is_ff || is_00); i++) {
if (d[i] != 0x00)
is_00 = 0;
@@ -228,16 +229,17 @@ static void __init dmi_save_uuid(const struct dmi_header *dm, int slot,
static void __init dmi_save_type(const struct dmi_header *dm, int slot,
int index)
{
- const u8 *d = (u8 *) dm + index;
+ const u8 *d;
char *s;
- if (dmi_ident[slot])
+ if (dmi_ident[slot] || dm->length <= index)
return;
s = dmi_alloc(4);
if (!s)
return;
+ d = (u8 *) dm + index;
sprintf(s, "%u", *d & 0x7F);
dmi_ident[slot] = s;
}
@@ -278,9 +280,13 @@ static void __init dmi_save_devices(const struct dmi_header *dm)
static void __init dmi_save_oem_strings_devices(const struct dmi_header *dm)
{
- int i, count = *(u8 *)(dm + 1);
+ int i, count;
struct dmi_device *dev;
+ if (dm->length < 0x05)
+ return;
+
+ count = *(u8 *)(dm + 1);
for (i = 1; i <= count; i++) {
const char *devname = dmi_string(dm, i);
@@ -353,6 +359,9 @@ static void __init dmi_save_extended_devices(const struct dmi_header *dm)
const char *name;
const u8 *d = (u8 *)dm;
+ if (dm->length < 0x0B)
+ return;
+
/* Skip disabled device */
if ((d[0x5] & 0x80) == 0)
return;
@@ -387,7 +396,7 @@ static void __init save_mem_devices(const struct dmi_header *dm, void *v)
const char *d = (const char *)dm;
static int nr;
- if (dm->type != DMI_ENTRY_MEM_DEVICE)
+ if (dm->type != DMI_ENTRY_MEM_DEVICE || dm->length < 0x12)
return;
if (nr >= dmi_memdev_nr) {
pr_warn(FW_BUG "Too many DIMM entries in SMBIOS table\n");
@@ -650,6 +659,21 @@ void __init dmi_scan_machine(void)
goto error;
/*
+ * Same logic as above, look for a 64-bit entry point
+ * first, and if not found, fall back to 32-bit entry point.
+ */
+ memcpy_fromio(buf, p, 16);
+ for (q = p + 16; q < p + 0x10000; q += 16) {
+ memcpy_fromio(buf + 16, q, 16);
+ if (!dmi_smbios3_present(buf)) {
+ dmi_available = 1;
+ dmi_early_unmap(p, 0x10000);
+ goto out;
+ }
+ memcpy(buf, buf + 16, 16);
+ }
+
+ /*
* Iterate over all possible DMI header addresses q.
* Maintain the 32 bytes around q in buf. On the
* first iteration, substitute zero for the
@@ -659,7 +683,7 @@ void __init dmi_scan_machine(void)
memset(buf, 0, 16);
for (q = p; q < p + 0x10000; q += 16) {
memcpy_fromio(buf + 16, q, 16);
- if (!dmi_smbios3_present(buf) || !dmi_present(buf)) {
+ if (!dmi_present(buf)) {
dmi_available = 1;
dmi_early_unmap(p, 0x10000);
goto out;
@@ -993,7 +1017,8 @@ EXPORT_SYMBOL(dmi_get_date);
* @decode: Callback function
* @private_data: Private data to be passed to the callback function
*
- * Returns -1 when the DMI table can't be reached, 0 on success.
+ * Returns 0 on success, -ENXIO if DMI is not selected or not present,
+ * or a different negative error code if DMI walking fails.
*/
int dmi_walk(void (*decode)(const struct dmi_header *, void *),
void *private_data)
@@ -1001,11 +1026,11 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
u8 *buf;
if (!dmi_available)
- return -1;
+ return -ENXIO;
buf = dmi_remap(dmi_base, dmi_len);
if (buf == NULL)
- return -1;
+ return -ENOMEM;
dmi_decode_table(buf, decode, private_data);
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index 5104b6398139..c83ea68be792 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -721,7 +721,7 @@ static int mvebu_pwm_probe(struct platform_device *pdev,
u32 set;
if (!of_device_is_compatible(mvchip->chip.of_node,
- "marvell,armada-370-xp-gpio"))
+ "marvell,armada-370-gpio"))
return 0;
if (IS_ERR(mvchip->clk))
@@ -852,7 +852,7 @@ static const struct of_device_id mvebu_gpio_of_match[] = {
.data = (void *) MVEBU_GPIO_SOC_VARIANT_ARMADAXP,
},
{
- .compatible = "marvell,armada-370-xp-gpio",
+ .compatible = "marvell,armada-370-gpio",
.data = (void *) MVEBU_GPIO_SOC_VARIANT_ORION,
},
{
@@ -1128,7 +1128,7 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
mvchip);
}
- /* Armada 370/XP has simple PWM support for GPIO lines */
+ /* Some MVEBU SoCs have simple PWM support for GPIO lines */
if (IS_ENABLED(CONFIG_PWM))
return mvebu_pwm_probe(pdev, mvchip, id);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index 1cf78f4dd339..1e8e1123ddf4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -693,6 +693,10 @@ int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev)
DRM_INFO("Changing default dispclk from %dMhz to 600Mhz\n",
adev->clock.default_dispclk / 100);
adev->clock.default_dispclk = 60000;
+ } else if (adev->clock.default_dispclk <= 60000) {
+ DRM_INFO("Changing default dispclk from %dMhz to 625Mhz\n",
+ adev->clock.default_dispclk / 100);
+ adev->clock.default_dispclk = 62500;
}
adev->clock.dp_extclk =
le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index f2d705e6a75a..ab6b0d0febab 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -449,6 +449,7 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x6986, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
{0x1002, 0x6987, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
{0x1002, 0x6995, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
+ {0x1002, 0x6997, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
{0x1002, 0x699F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
/* Vega 10 */
{0x1002, 0x6860, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
index 8c9bc75a9c2d..8a0818b23ea4 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
@@ -165,7 +165,7 @@ void amdgpu_atombios_crtc_powergate(struct drm_crtc *crtc, int state)
struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private;
int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating);
- ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1 args;
+ ENABLE_DISP_POWER_GATING_PS_ALLOCATION args;
memset(&args, 0, sizeof(args));
@@ -178,7 +178,7 @@ void amdgpu_atombios_crtc_powergate(struct drm_crtc *crtc, int state)
void amdgpu_atombios_crtc_powergate_init(struct amdgpu_device *adev)
{
int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating);
- ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1 args;
+ ENABLE_DISP_POWER_GATING_PS_ALLOCATION args;
memset(&args, 0, sizeof(args));
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 0cdeb6a2e4a0..5dffa27afa45 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -1207,8 +1207,11 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev,
u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
if (amdgpu_crtc->base.enabled && num_heads && mode) {
- active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
- line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
+ active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
+ (u32)mode->clock);
+ line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
+ (u32)mode->clock);
+ line_time = min(line_time, (u32)65535);
/* watermark for high clocks */
if (adev->pm.dpm_enabled) {
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 773654a19749..47bbc87f96d2 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -1176,8 +1176,11 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev,
u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
if (amdgpu_crtc->base.enabled && num_heads && mode) {
- active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
- line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
+ active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
+ (u32)mode->clock);
+ line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
+ (u32)mode->clock);
+ line_time = min(line_time, (u32)65535);
/* watermark for high clocks */
if (adev->pm.dpm_enabled) {
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index 1f3552967ba3..d8c9a959493e 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -983,8 +983,11 @@ static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
fixed20_12 a, b, c;
if (amdgpu_crtc->base.enabled && num_heads && mode) {
- active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
- line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
+ active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
+ (u32)mode->clock);
+ line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
+ (u32)mode->clock);
+ line_time = min(line_time, (u32)65535);
priority_a_cnt = 0;
priority_b_cnt = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 3c558c170e5e..db30c6ba563a 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -1091,8 +1091,11 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev,
u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
if (amdgpu_crtc->base.enabled && num_heads && mode) {
- active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
- line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
+ active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
+ (u32)mode->clock);
+ line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
+ (u32)mode->clock);
+ line_time = min(line_time, (u32)65535);
/* watermark for high clocks */
if (adev->pm.dpm_enabled) {
diff --git a/drivers/gpu/drm/bridge/synopsys/Kconfig b/drivers/gpu/drm/bridge/synopsys/Kconfig
index 40d2827a6d19..53e78d092d18 100644
--- a/drivers/gpu/drm/bridge/synopsys/Kconfig
+++ b/drivers/gpu/drm/bridge/synopsys/Kconfig
@@ -1,6 +1,7 @@
config DRM_DW_HDMI
tristate
select DRM_KMS_HELPER
+ select REGMAP_MMIO
config DRM_DW_HDMI_AHB_AUDIO
tristate "Synopsys Designware AHB Audio interface"
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index 9f847615ac74..48ca2457df8c 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -1229,21 +1229,6 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
if (!connector)
return -ENOENT;
- drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
- encoder = drm_connector_get_encoder(connector);
- if (encoder)
- out_resp->encoder_id = encoder->base.id;
- else
- out_resp->encoder_id = 0;
-
- ret = drm_mode_object_get_properties(&connector->base, file_priv->atomic,
- (uint32_t __user *)(unsigned long)(out_resp->props_ptr),
- (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr),
- &out_resp->count_props);
- drm_modeset_unlock(&dev->mode_config.connection_mutex);
- if (ret)
- goto out_unref;
-
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++)
if (connector->encoder_ids[i] != 0)
encoders_count++;
@@ -1256,7 +1241,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
if (put_user(connector->encoder_ids[i],
encoder_ptr + copied)) {
ret = -EFAULT;
- goto out_unref;
+ goto out;
}
copied++;
}
@@ -1300,15 +1285,32 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
if (copy_to_user(mode_ptr + copied,
&u_mode, sizeof(u_mode))) {
ret = -EFAULT;
+ mutex_unlock(&dev->mode_config.mutex);
+
goto out;
}
copied++;
}
}
out_resp->count_modes = mode_count;
-out:
mutex_unlock(&dev->mode_config.mutex);
-out_unref:
+
+ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+ encoder = drm_connector_get_encoder(connector);
+ if (encoder)
+ out_resp->encoder_id = encoder->base.id;
+ else
+ out_resp->encoder_id = 0;
+
+ /* Only grab properties after probing, to make sure EDID and other
+ * properties reflect the latest status. */
+ ret = drm_mode_object_get_properties(&connector->base, file_priv->atomic,
+ (uint32_t __user *)(unsigned long)(out_resp->props_ptr),
+ (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr),
+ &out_resp->count_props);
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
+
+out:
drm_connector_put(connector);
return ret;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.h b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
index c4a091e87426..e437fba1209d 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
@@ -106,9 +106,10 @@ struct etnaviv_gem_submit {
struct etnaviv_gpu *gpu;
struct ww_acquire_ctx ticket;
struct dma_fence *fence;
+ u32 flags;
unsigned int nr_bos;
struct etnaviv_gem_submit_bo bos[0];
- u32 flags;
+ /* No new members here, the previous one is variable-length! */
};
int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index de80ee1b71df..1013765274da 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -172,7 +172,7 @@ static int submit_fence_sync(const struct etnaviv_gem_submit *submit)
for (i = 0; i < submit->nr_bos; i++) {
struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
bool write = submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE;
- bool explicit = !(submit->flags & ETNA_SUBMIT_NO_IMPLICIT);
+ bool explicit = !!(submit->flags & ETNA_SUBMIT_NO_IMPLICIT);
ret = etnaviv_gpu_fence_sync_obj(etnaviv_obj, context, write,
explicit);
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index d689e511744e..4bd1467c17b1 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -292,6 +292,8 @@ static int per_file_stats(int id, void *ptr, void *data)
struct file_stats *stats = data;
struct i915_vma *vma;
+ lockdep_assert_held(&obj->base.dev->struct_mutex);
+
stats->count++;
stats->total += obj->base.size;
if (!obj->bind_count)
@@ -476,6 +478,8 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
struct drm_i915_gem_request *request;
struct task_struct *task;
+ mutex_lock(&dev->struct_mutex);
+
memset(&stats, 0, sizeof(stats));
stats.file_priv = file->driver_priv;
spin_lock(&file->table_lock);
@@ -487,7 +491,6 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
* still alive (e.g. get_pid(current) => fork() => exit()).
* Therefore, we need to protect this ->comm access using RCU.
*/
- mutex_lock(&dev->struct_mutex);
request = list_first_entry_or_null(&file_priv->mm.request_list,
struct drm_i915_gem_request,
client_link);
@@ -497,6 +500,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
PIDTYPE_PID);
print_file_stats(m, task ? task->comm : "<unknown>", stats);
rcu_read_unlock();
+
mutex_unlock(&dev->struct_mutex);
}
mutex_unlock(&dev->filelist_mutex);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 462031cbd77f..615f0a855222 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2285,8 +2285,8 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
struct page *page;
unsigned long last_pfn = 0; /* suppress gcc warning */
unsigned int max_segment;
+ gfp_t noreclaim;
int ret;
- gfp_t gfp;
/* Assert that the object is not currently in any GPU domain. As it
* wasn't in the GTT, there shouldn't be any way it could have been in
@@ -2315,22 +2315,31 @@ rebuild_st:
* Fail silently without starting the shrinker
*/
mapping = obj->base.filp->f_mapping;
- gfp = mapping_gfp_constraint(mapping, ~(__GFP_IO | __GFP_RECLAIM));
- gfp |= __GFP_NORETRY | __GFP_NOWARN;
+ noreclaim = mapping_gfp_constraint(mapping,
+ ~(__GFP_IO | __GFP_RECLAIM));
+ noreclaim |= __GFP_NORETRY | __GFP_NOWARN;
+
sg = st->sgl;
st->nents = 0;
for (i = 0; i < page_count; i++) {
- page = shmem_read_mapping_page_gfp(mapping, i, gfp);
- if (unlikely(IS_ERR(page))) {
- i915_gem_shrink(dev_priv,
- page_count,
- I915_SHRINK_BOUND |
- I915_SHRINK_UNBOUND |
- I915_SHRINK_PURGEABLE);
+ const unsigned int shrink[] = {
+ I915_SHRINK_BOUND | I915_SHRINK_UNBOUND | I915_SHRINK_PURGEABLE,
+ 0,
+ }, *s = shrink;
+ gfp_t gfp = noreclaim;
+
+ do {
page = shmem_read_mapping_page_gfp(mapping, i, gfp);
- }
- if (unlikely(IS_ERR(page))) {
- gfp_t reclaim;
+ if (likely(!IS_ERR(page)))
+ break;
+
+ if (!*s) {
+ ret = PTR_ERR(page);
+ goto err_sg;
+ }
+
+ i915_gem_shrink(dev_priv, 2 * page_count, *s++);
+ cond_resched();
/* We've tried hard to allocate the memory by reaping
* our own buffer, now let the real VM do its job and
@@ -2340,15 +2349,26 @@ rebuild_st:
* defer the oom here by reporting the ENOMEM back
* to userspace.
*/
- reclaim = mapping_gfp_mask(mapping);
- reclaim |= __GFP_NORETRY; /* reclaim, but no oom */
-
- page = shmem_read_mapping_page_gfp(mapping, i, reclaim);
- if (IS_ERR(page)) {
- ret = PTR_ERR(page);
- goto err_sg;
+ if (!*s) {
+ /* reclaim and warn, but no oom */
+ gfp = mapping_gfp_mask(mapping);
+
+ /* Our bo are always dirty and so we require
+ * kswapd to reclaim our pages (direct reclaim
+ * does not effectively begin pageout of our
+ * buffers on its own). However, direct reclaim
+ * only waits for kswapd when under allocation
+ * congestion. So as a result __GFP_RECLAIM is
+ * unreliable and fails to actually reclaim our
+ * dirty pages -- unless you try over and over
+ * again with !__GFP_NORETRY. However, we still
+ * want to fail this allocation rather than
+ * trigger the out-of-memory killer and for
+ * this we want the future __GFP_MAYFAIL.
+ */
}
- }
+ } while (1);
+
if (!i ||
sg->length >= max_segment ||
page_to_pfn(page) != last_pfn + 1) {
@@ -4222,6 +4242,7 @@ i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size)
mapping = obj->base.filp->f_mapping;
mapping_set_gfp_mask(mapping, mask);
+ GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM));
i915_gem_object_init(obj, &i915_gem_object_ops);
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index a3e59c8ef27b..9ad13eeed904 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -546,11 +546,12 @@ repeat:
}
static int
-i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
+i915_gem_execbuffer_relocate_entry(struct i915_vma *vma,
struct eb_vmas *eb,
struct drm_i915_gem_relocation_entry *reloc,
struct reloc_cache *cache)
{
+ struct drm_i915_gem_object *obj = vma->obj;
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct drm_gem_object *target_obj;
struct drm_i915_gem_object *target_i915_obj;
@@ -628,6 +629,16 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
return -EINVAL;
}
+ /*
+ * If we write into the object, we need to force the synchronisation
+ * barrier, either with an asynchronous clflush or if we executed the
+ * patching using the GPU (though that should be serialised by the
+ * timeline). To be completely sure, and since we are required to
+ * do relocations we are already stalling, disable the user's opt
+ * of our synchronisation.
+ */
+ vma->exec_entry->flags &= ~EXEC_OBJECT_ASYNC;
+
ret = relocate_entry(obj, reloc, cache, target_offset);
if (ret)
return ret;
@@ -678,7 +689,7 @@ i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
do {
u64 offset = r->presumed_offset;
- ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r, &cache);
+ ret = i915_gem_execbuffer_relocate_entry(vma, eb, r, &cache);
if (ret)
goto out;
@@ -726,7 +737,7 @@ i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma,
reloc_cache_init(&cache, eb->i915);
for (i = 0; i < entry->relocation_count; i++) {
- ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i], &cache);
+ ret = i915_gem_execbuffer_relocate_entry(vma, eb, &relocs[i], &cache);
if (ret)
break;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index 5ddbc9499775..a74d0ac737cb 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -623,7 +623,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
* GPU processing the request, we never over-estimate the
* position of the head.
*/
- req->head = req->ring->tail;
+ req->head = req->ring->emit;
/* Check that we didn't interrupt ourselves with a new request */
GEM_BUG_ON(req->timeline->seqno != req->fence.seqno);
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
index 1642fff9cf13..ab5140ba108d 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -480,9 +480,7 @@ static void guc_wq_item_append(struct i915_guc_client *client,
GEM_BUG_ON(freespace < wqi_size);
/* The GuC firmware wants the tail index in QWords, not bytes */
- tail = rq->tail;
- assert_ring_tail_valid(rq->ring, rq->tail);
- tail >>= 3;
+ tail = intel_ring_set_tail(rq->ring, rq->tail) >> 3;
GEM_BUG_ON(tail > WQ_RING_TAIL_MAX);
/* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we
diff --git a/drivers/gpu/drm/i915/i915_pvinfo.h b/drivers/gpu/drm/i915/i915_pvinfo.h
index c0cb2974caac..2cfe96d3e5d1 100644
--- a/drivers/gpu/drm/i915/i915_pvinfo.h
+++ b/drivers/gpu/drm/i915/i915_pvinfo.h
@@ -36,10 +36,6 @@
#define VGT_VERSION_MAJOR 1
#define VGT_VERSION_MINOR 0
-#define INTEL_VGT_IF_VERSION_ENCODE(major, minor) ((major) << 16 | (minor))
-#define INTEL_VGT_IF_VERSION \
- INTEL_VGT_IF_VERSION_ENCODE(VGT_VERSION_MAJOR, VGT_VERSION_MINOR)
-
/*
* notifications from guest to vgpu device model
*/
@@ -55,8 +51,8 @@ enum vgt_g2v_type {
struct vgt_if {
u64 magic; /* VGT_MAGIC */
- uint16_t version_major;
- uint16_t version_minor;
+ u16 version_major;
+ u16 version_minor;
u32 vgt_id; /* ID of vGT instance */
u32 rsv1[12]; /* pad to offset 0x40 */
/*
diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c
index 4ab8a973b61f..2e739018fb4c 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.c
+++ b/drivers/gpu/drm/i915/i915_vgpu.c
@@ -60,8 +60,8 @@
*/
void i915_check_vgpu(struct drm_i915_private *dev_priv)
{
- uint64_t magic;
- uint32_t version;
+ u64 magic;
+ u16 version_major;
BUILD_BUG_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
@@ -69,10 +69,8 @@ void i915_check_vgpu(struct drm_i915_private *dev_priv)
if (magic != VGT_MAGIC)
return;
- version = INTEL_VGT_IF_VERSION_ENCODE(
- __raw_i915_read16(dev_priv, vgtif_reg(version_major)),
- __raw_i915_read16(dev_priv, vgtif_reg(version_minor)));
- if (version != INTEL_VGT_IF_VERSION) {
+ version_major = __raw_i915_read16(dev_priv, vgtif_reg(version_major));
+ if (version_major < VGT_VERSION_MAJOR) {
DRM_INFO("VGT interface version mismatch!\n");
return;
}
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 1aba47024656..f066e2d785f5 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -650,6 +650,11 @@ int i915_vma_unbind(struct i915_vma *vma)
break;
}
+ if (!ret) {
+ ret = i915_gem_active_retire(&vma->last_fence,
+ &vma->vm->i915->drm.struct_mutex);
+ }
+
__i915_vma_unpin(vma);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 569717a12723..9106ea32b048 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -120,7 +120,8 @@ static void intel_crtc_init_scalers(struct intel_crtc *crtc,
static void skylake_pfit_enable(struct intel_crtc *crtc);
static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force);
static void ironlake_pfit_enable(struct intel_crtc *crtc);
-static void intel_modeset_setup_hw_state(struct drm_device *dev);
+static void intel_modeset_setup_hw_state(struct drm_device *dev,
+ struct drm_modeset_acquire_ctx *ctx);
static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
struct intel_limit {
@@ -3449,7 +3450,7 @@ __intel_display_resume(struct drm_device *dev,
struct drm_crtc *crtc;
int i, ret;
- intel_modeset_setup_hw_state(dev);
+ intel_modeset_setup_hw_state(dev, ctx);
i915_redisable_vga(to_i915(dev));
if (!state)
@@ -4598,7 +4599,7 @@ static void cpt_verify_modeset(struct drm_device *dev, int pipe)
static int
skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
- unsigned scaler_user, int *scaler_id, unsigned int rotation,
+ unsigned int scaler_user, int *scaler_id,
int src_w, int src_h, int dst_w, int dst_h)
{
struct intel_crtc_scaler_state *scaler_state =
@@ -4607,9 +4608,12 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
to_intel_crtc(crtc_state->base.crtc);
int need_scaling;
- need_scaling = drm_rotation_90_or_270(rotation) ?
- (src_h != dst_w || src_w != dst_h):
- (src_w != dst_w || src_h != dst_h);
+ /*
+ * Src coordinates are already rotated by 270 degrees for
+ * the 90/270 degree plane rotation cases (to match the
+ * GTT mapping), hence no need to account for rotation here.
+ */
+ need_scaling = src_w != dst_w || src_h != dst_h;
/*
* if plane is being disabled or scaler is no more required or force detach
@@ -4671,7 +4675,7 @@ int skl_update_scaler_crtc(struct intel_crtc_state *state)
const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
- &state->scaler_state.scaler_id, DRM_ROTATE_0,
+ &state->scaler_state.scaler_id,
state->pipe_src_w, state->pipe_src_h,
adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay);
}
@@ -4700,7 +4704,6 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
ret = skl_update_scaler(crtc_state, force_detach,
drm_plane_index(&intel_plane->base),
&plane_state->scaler_id,
- plane_state->base.rotation,
drm_rect_width(&plane_state->base.src) >> 16,
drm_rect_height(&plane_state->base.src) >> 16,
drm_rect_width(&plane_state->base.dst),
@@ -5823,7 +5826,8 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
intel_update_watermarks(intel_crtc);
}
-static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
+static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
+ struct drm_modeset_acquire_ctx *ctx)
{
struct intel_encoder *encoder;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -5853,7 +5857,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
return;
}
- state->acquire_ctx = crtc->dev->mode_config.acquire_ctx;
+ state->acquire_ctx = ctx;
/* Everything's already locked, -EDEADLK can't happen. */
crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
@@ -15028,7 +15032,7 @@ int intel_modeset_init(struct drm_device *dev)
intel_setup_outputs(dev_priv);
drm_modeset_lock_all(dev);
- intel_modeset_setup_hw_state(dev);
+ intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
drm_modeset_unlock_all(dev);
for_each_intel_crtc(dev, crtc) {
@@ -15065,13 +15069,13 @@ int intel_modeset_init(struct drm_device *dev)
return 0;
}
-static void intel_enable_pipe_a(struct drm_device *dev)
+static void intel_enable_pipe_a(struct drm_device *dev,
+ struct drm_modeset_acquire_ctx *ctx)
{
struct intel_connector *connector;
struct drm_connector_list_iter conn_iter;
struct drm_connector *crt = NULL;
struct intel_load_detect_pipe load_detect_temp;
- struct drm_modeset_acquire_ctx *ctx = dev->mode_config.acquire_ctx;
int ret;
/* We can't just switch on the pipe A, we need to set things up with a
@@ -15143,7 +15147,8 @@ static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
(HAS_PCH_LPT_H(dev_priv) && pch_transcoder == TRANSCODER_A);
}
-static void intel_sanitize_crtc(struct intel_crtc *crtc)
+static void intel_sanitize_crtc(struct intel_crtc *crtc,
+ struct drm_modeset_acquire_ctx *ctx)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -15189,7 +15194,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
plane = crtc->plane;
crtc->base.primary->state->visible = true;
crtc->plane = !plane;
- intel_crtc_disable_noatomic(&crtc->base);
+ intel_crtc_disable_noatomic(&crtc->base, ctx);
crtc->plane = plane;
}
@@ -15199,13 +15204,13 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
* resume. Force-enable the pipe to fix this, the update_dpms
* call below we restore the pipe to the right state, but leave
* the required bits on. */
- intel_enable_pipe_a(dev);
+ intel_enable_pipe_a(dev, ctx);
}
/* Adjust the state of the output pipe according to whether we
* have active connectors/encoders. */
if (crtc->active && !intel_crtc_has_encoders(crtc))
- intel_crtc_disable_noatomic(&crtc->base);
+ intel_crtc_disable_noatomic(&crtc->base, ctx);
if (crtc->active || HAS_GMCH_DISPLAY(dev_priv)) {
/*
@@ -15503,7 +15508,8 @@ get_encoder_power_domains(struct drm_i915_private *dev_priv)
* and sanitizes it to the current state
*/
static void
-intel_modeset_setup_hw_state(struct drm_device *dev)
+intel_modeset_setup_hw_state(struct drm_device *dev,
+ struct drm_modeset_acquire_ctx *ctx)
{
struct drm_i915_private *dev_priv = to_i915(dev);
enum pipe pipe;
@@ -15523,7 +15529,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev)
for_each_pipe(dev_priv, pipe) {
crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
- intel_sanitize_crtc(crtc);
+ intel_sanitize_crtc(crtc, ctx);
intel_dump_pipe_config(crtc, crtc->config,
"[setup_hw_state]");
}
diff --git a/drivers/gpu/drm/i915/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/intel_dp_aux_backlight.c
index 6532e226db29..40ba3134545e 100644
--- a/drivers/gpu/drm/i915/intel_dp_aux_backlight.c
+++ b/drivers/gpu/drm/i915/intel_dp_aux_backlight.c
@@ -119,8 +119,6 @@ static int intel_dp_aux_setup_backlight(struct intel_connector *connector,
struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
struct intel_panel *panel = &connector->panel;
- intel_dp_aux_enable_backlight(connector);
-
if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT)
panel->backlight.max = 0xFFFF;
else
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index dac4e003c1f3..62f44d3e7c43 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -326,8 +326,7 @@ static u64 execlists_update_context(struct drm_i915_gem_request *rq)
rq->ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt;
u32 *reg_state = ce->lrc_reg_state;
- assert_ring_tail_valid(rq->ring, rq->tail);
- reg_state[CTX_RING_TAIL+1] = rq->tail;
+ reg_state[CTX_RING_TAIL+1] = intel_ring_set_tail(rq->ring, rq->tail);
/* True 32b PPGTT with dynamic page allocation: update PDP
* registers and point the unallocated PDPs to scratch page.
@@ -2036,8 +2035,7 @@ void intel_lr_context_resume(struct drm_i915_private *dev_priv)
ce->state->obj->mm.dirty = true;
i915_gem_object_unpin_map(ce->state->obj);
- ce->ring->head = ce->ring->tail = 0;
- intel_ring_update_space(ce->ring);
+ intel_ring_reset(ce->ring, 0);
}
}
}
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 2ca481b5aa69..078fd1bfa5ea 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -3373,20 +3373,26 @@ skl_plane_downscale_amount(const struct intel_crtc_state *cstate,
/* n.b., src is 16.16 fixed point, dst is whole integer */
if (plane->id == PLANE_CURSOR) {
+ /*
+ * Cursors only support 0/180 degree rotation,
+ * hence no need to account for rotation here.
+ */
src_w = pstate->base.src_w;
src_h = pstate->base.src_h;
dst_w = pstate->base.crtc_w;
dst_h = pstate->base.crtc_h;
} else {
+ /*
+ * Src coordinates are already rotated by 270 degrees for
+ * the 90/270 degree plane rotation cases (to match the
+ * GTT mapping), hence no need to account for rotation here.
+ */
src_w = drm_rect_width(&pstate->base.src);
src_h = drm_rect_height(&pstate->base.src);
dst_w = drm_rect_width(&pstate->base.dst);
dst_h = drm_rect_height(&pstate->base.dst);
}
- if (drm_rotation_90_or_270(pstate->base.rotation))
- swap(dst_w, dst_h);
-
downscale_h = max(src_h / dst_h, (uint32_t)DRM_PLANE_HELPER_NO_SCALING);
downscale_w = max(src_w / dst_w, (uint32_t)DRM_PLANE_HELPER_NO_SCALING);
@@ -3417,12 +3423,14 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
if (y && format != DRM_FORMAT_NV12)
return 0;
+ /*
+ * Src coordinates are already rotated by 270 degrees for
+ * the 90/270 degree plane rotation cases (to match the
+ * GTT mapping), hence no need to account for rotation here.
+ */
width = drm_rect_width(&intel_pstate->base.src) >> 16;
height = drm_rect_height(&intel_pstate->base.src) >> 16;
- if (drm_rotation_90_or_270(pstate->rotation))
- swap(width, height);
-
/* for planar format */
if (format == DRM_FORMAT_NV12) {
if (y) /* y-plane data rate */
@@ -3505,12 +3513,14 @@ skl_ddb_min_alloc(const struct drm_plane_state *pstate,
fb->modifier != I915_FORMAT_MOD_Yf_TILED)
return 8;
+ /*
+ * Src coordinates are already rotated by 270 degrees for
+ * the 90/270 degree plane rotation cases (to match the
+ * GTT mapping), hence no need to account for rotation here.
+ */
src_w = drm_rect_width(&intel_pstate->base.src) >> 16;
src_h = drm_rect_height(&intel_pstate->base.src) >> 16;
- if (drm_rotation_90_or_270(pstate->rotation))
- swap(src_w, src_h);
-
/* Halve UV plane width and height for NV12 */
if (fb->format->format == DRM_FORMAT_NV12 && !y) {
src_w /= 2;
@@ -3794,13 +3804,15 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
width = intel_pstate->base.crtc_w;
height = intel_pstate->base.crtc_h;
} else {
+ /*
+ * Src coordinates are already rotated by 270 degrees for
+ * the 90/270 degree plane rotation cases (to match the
+ * GTT mapping), hence no need to account for rotation here.
+ */
width = drm_rect_width(&intel_pstate->base.src) >> 16;
height = drm_rect_height(&intel_pstate->base.src) >> 16;
}
- if (drm_rotation_90_or_270(pstate->rotation))
- swap(width, height);
-
cpp = fb->format->cpp[0];
plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate, intel_pstate);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 66a2b8b83972..513a0f4b469b 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -49,7 +49,7 @@ static int __intel_ring_space(int head, int tail, int size)
void intel_ring_update_space(struct intel_ring *ring)
{
- ring->space = __intel_ring_space(ring->head, ring->tail, ring->size);
+ ring->space = __intel_ring_space(ring->head, ring->emit, ring->size);
}
static int
@@ -774,8 +774,8 @@ static void i9xx_submit_request(struct drm_i915_gem_request *request)
i915_gem_request_submit(request);
- assert_ring_tail_valid(request->ring, request->tail);
- I915_WRITE_TAIL(request->engine, request->tail);
+ I915_WRITE_TAIL(request->engine,
+ intel_ring_set_tail(request->ring, request->tail));
}
static void i9xx_emit_breadcrumb(struct drm_i915_gem_request *req, u32 *cs)
@@ -1316,11 +1316,23 @@ err:
return PTR_ERR(addr);
}
+void intel_ring_reset(struct intel_ring *ring, u32 tail)
+{
+ GEM_BUG_ON(!list_empty(&ring->request_list));
+ ring->tail = tail;
+ ring->head = tail;
+ ring->emit = tail;
+ intel_ring_update_space(ring);
+}
+
void intel_ring_unpin(struct intel_ring *ring)
{
GEM_BUG_ON(!ring->vma);
GEM_BUG_ON(!ring->vaddr);
+ /* Discard any unused bytes beyond that submitted to hw. */
+ intel_ring_reset(ring, ring->tail);
+
if (i915_vma_is_map_and_fenceable(ring->vma))
i915_vma_unpin_iomap(ring->vma);
else
@@ -1562,8 +1574,9 @@ void intel_legacy_submission_resume(struct drm_i915_private *dev_priv)
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ /* Restart from the beginning of the rings for convenience */
for_each_engine(engine, dev_priv, id)
- engine->buffer->head = engine->buffer->tail;
+ intel_ring_reset(engine->buffer, 0);
}
static int ring_request_alloc(struct drm_i915_gem_request *request)
@@ -1616,7 +1629,7 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
unsigned space;
/* Would completion of this request free enough space? */
- space = __intel_ring_space(target->postfix, ring->tail,
+ space = __intel_ring_space(target->postfix, ring->emit,
ring->size);
if (space >= bytes)
break;
@@ -1641,8 +1654,8 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
u32 *intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
{
struct intel_ring *ring = req->ring;
- int remain_actual = ring->size - ring->tail;
- int remain_usable = ring->effective_size - ring->tail;
+ int remain_actual = ring->size - ring->emit;
+ int remain_usable = ring->effective_size - ring->emit;
int bytes = num_dwords * sizeof(u32);
int total_bytes, wait_bytes;
bool need_wrap = false;
@@ -1678,17 +1691,17 @@ u32 *intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
if (unlikely(need_wrap)) {
GEM_BUG_ON(remain_actual > ring->space);
- GEM_BUG_ON(ring->tail + remain_actual > ring->size);
+ GEM_BUG_ON(ring->emit + remain_actual > ring->size);
/* Fill the tail with MI_NOOP */
- memset(ring->vaddr + ring->tail, 0, remain_actual);
- ring->tail = 0;
+ memset(ring->vaddr + ring->emit, 0, remain_actual);
+ ring->emit = 0;
ring->space -= remain_actual;
}
- GEM_BUG_ON(ring->tail > ring->size - bytes);
- cs = ring->vaddr + ring->tail;
- ring->tail += bytes;
+ GEM_BUG_ON(ring->emit > ring->size - bytes);
+ cs = ring->vaddr + ring->emit;
+ ring->emit += bytes;
ring->space -= bytes;
GEM_BUG_ON(ring->space < 0);
@@ -1699,7 +1712,7 @@ u32 *intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
{
int num_dwords =
- (req->ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
+ (req->ring->emit & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
u32 *cs;
if (num_dwords == 0)
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index a82a0807f64d..f7144fe09613 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -145,6 +145,7 @@ struct intel_ring {
u32 head;
u32 tail;
+ u32 emit;
int space;
int size;
@@ -488,6 +489,8 @@ intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
struct intel_ring *
intel_engine_create_ring(struct intel_engine_cs *engine, int size);
int intel_ring_pin(struct intel_ring *ring, unsigned int offset_bias);
+void intel_ring_reset(struct intel_ring *ring, u32 tail);
+void intel_ring_update_space(struct intel_ring *ring);
void intel_ring_unpin(struct intel_ring *ring);
void intel_ring_free(struct intel_ring *ring);
@@ -511,7 +514,7 @@ intel_ring_advance(struct drm_i915_gem_request *req, u32 *cs)
* reserved for the command packet (i.e. the value passed to
* intel_ring_begin()).
*/
- GEM_BUG_ON((req->ring->vaddr + req->ring->tail) != cs);
+ GEM_BUG_ON((req->ring->vaddr + req->ring->emit) != cs);
}
static inline u32
@@ -540,7 +543,19 @@ assert_ring_tail_valid(const struct intel_ring *ring, unsigned int tail)
GEM_BUG_ON(tail >= ring->size);
}
-void intel_ring_update_space(struct intel_ring *ring);
+static inline unsigned int
+intel_ring_set_tail(struct intel_ring *ring, unsigned int tail)
+{
+ /* Whilst writes to the tail are strictly order, there is no
+ * serialisation between readers and the writers. The tail may be
+ * read by i915_gem_request_retire() just as it is being updated
+ * by execlists, as although the breadcrumb is complete, the context
+ * switch hasn't been seen.
+ */
+ assert_ring_tail_valid(ring, tail);
+ ring->tail = tail;
+ return tail;
+}
void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno);
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index adb411a078e8..f4b53588e071 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -1173,7 +1173,10 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
if (IS_G200_SE(mdev)) {
- if (mdev->unique_rev_id >= 0x02) {
+ if (mdev->unique_rev_id >= 0x04) {
+ WREG8(MGAREG_CRTCEXT_INDEX, 0x06);
+ WREG8(MGAREG_CRTCEXT_DATA, 0);
+ } else if (mdev->unique_rev_id >= 0x02) {
u8 hi_pri_lvl;
u32 bpp;
u32 mb;
@@ -1639,6 +1642,10 @@ static int mga_vga_mode_valid(struct drm_connector *connector,
if (mga_vga_calculate_mode_bandwidth(mode, bpp)
> (30100 * 1024))
return MODE_BANDWIDTH;
+ } else {
+ if (mga_vga_calculate_mode_bandwidth(mode, bpp)
+ > (55000 * 1024))
+ return MODE_BANDWIDTH;
}
} else if (mdev->type == G200_WB) {
if (mode->hdisplay > 1280)
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_crtc.c b/drivers/gpu/drm/mxsfb/mxsfb_crtc.c
index 1144e0c9e894..0abe77675b76 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_crtc.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_crtc.c
@@ -35,6 +35,13 @@
#include "mxsfb_drv.h"
#include "mxsfb_regs.h"
+#define MXS_SET_ADDR 0x4
+#define MXS_CLR_ADDR 0x8
+#define MODULE_CLKGATE BIT(30)
+#define MODULE_SFTRST BIT(31)
+/* 1 second delay should be plenty of time for block reset */
+#define RESET_TIMEOUT 1000000
+
static u32 set_hsync_pulse_width(struct mxsfb_drm_private *mxsfb, u32 val)
{
return (val & mxsfb->devdata->hs_wdth_mask) <<
@@ -159,6 +166,36 @@ static void mxsfb_disable_controller(struct mxsfb_drm_private *mxsfb)
clk_disable_unprepare(mxsfb->clk_disp_axi);
}
+/*
+ * Clear the bit and poll it cleared. This is usually called with
+ * a reset address and mask being either SFTRST(bit 31) or CLKGATE
+ * (bit 30).
+ */
+static int clear_poll_bit(void __iomem *addr, u32 mask)
+{
+ u32 reg;
+
+ writel(mask, addr + MXS_CLR_ADDR);
+ return readl_poll_timeout(addr, reg, !(reg & mask), 0, RESET_TIMEOUT);
+}
+
+static int mxsfb_reset_block(void __iomem *reset_addr)
+{
+ int ret;
+
+ ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
+ if (ret)
+ return ret;
+
+ writel(MODULE_CLKGATE, reset_addr + MXS_CLR_ADDR);
+
+ ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
+ if (ret)
+ return ret;
+
+ return clear_poll_bit(reset_addr, MODULE_CLKGATE);
+}
+
static void mxsfb_crtc_mode_set_nofb(struct mxsfb_drm_private *mxsfb)
{
struct drm_display_mode *m = &mxsfb->pipe.crtc.state->adjusted_mode;
@@ -173,6 +210,11 @@ static void mxsfb_crtc_mode_set_nofb(struct mxsfb_drm_private *mxsfb)
*/
mxsfb_enable_axi_clk(mxsfb);
+ /* Mandatory eLCDIF reset as per the Reference Manual */
+ err = mxsfb_reset_block(mxsfb->base);
+ if (err)
+ return;
+
/* Clear the FIFOs */
writel(CTRL1_FIFO_CLEAR, mxsfb->base + LCDC_CTRL1 + REG_SET);
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 008c145b7f29..ca44233ceacc 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -9267,8 +9267,11 @@ static void dce8_program_watermarks(struct radeon_device *rdev,
u32 tmp, wm_mask;
if (radeon_crtc->base.enabled && num_heads && mode) {
- active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
- line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
+ active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
+ (u32)mode->clock);
+ line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
+ (u32)mode->clock);
+ line_time = min(line_time, (u32)65535);
/* watermark for high clocks */
if ((rdev->pm.pm_method == PM_METHOD_DPM) &&
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 0bf103536404..534637203e70 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -2266,8 +2266,11 @@ static void evergreen_program_watermarks(struct radeon_device *rdev,
fixed20_12 a, b, c;
if (radeon_crtc->base.enabled && num_heads && mode) {
- active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
- line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
+ active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
+ (u32)mode->clock);
+ line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
+ (u32)mode->clock);
+ line_time = min(line_time, (u32)65535);
priority_a_cnt = 0;
priority_b_cnt = 0;
dram_channels = evergreen_get_number_of_dram_channels(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 432480ff9d22..3178ba0c537c 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -3393,6 +3393,13 @@ void radeon_combios_asic_init(struct drm_device *dev)
rdev->pdev->subsystem_vendor == 0x103c &&
rdev->pdev->subsystem_device == 0x280a)
return;
+ /* quirk for rs4xx Toshiba Sattellite L20-183 latop to make it resume
+ * - it hangs on resume inside the dynclk 1 table.
+ */
+ if (rdev->family == CHIP_RS400 &&
+ rdev->pdev->subsystem_vendor == 0x1179 &&
+ rdev->pdev->subsystem_device == 0xff31)
+ return;
/* DYN CLK 1 */
table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE);
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 6ecf42783d4b..0a6444d72000 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -136,6 +136,10 @@ static struct radeon_px_quirk radeon_px_quirk_list[] = {
* https://bugzilla.kernel.org/show_bug.cgi?id=51381
*/
{ PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
+ /* Asus K53TK laptop with AMD A6-3420M APU and Radeon 7670m GPU
+ * https://bugs.freedesktop.org/show_bug.cgi?id=101491
+ */
+ { PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
/* macbook pro 8.2 */
{ PCI_VENDOR_ID_ATI, 0x6741, PCI_VENDOR_ID_APPLE, 0x00e2, RADEON_PX_QUIRK_LONG_WAKEUP },
{ 0, 0, 0, 0, 0 },
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 7431eb4a11b7..d34d1cf33895 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -621,7 +621,7 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
}
/* TODO: is this still necessary on NI+ ? */
- if ((cmd == 0 || cmd == 1 || cmd == 0x3) &&
+ if ((cmd == 0 || cmd == 0x3) &&
(start >> 28) != (p->rdev->uvd.gpu_addr >> 28)) {
DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n",
start, end);
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 76d1888528e6..5303f25d5280 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -2284,8 +2284,11 @@ static void dce6_program_watermarks(struct radeon_device *rdev,
fixed20_12 a, b, c;
if (radeon_crtc->base.enabled && num_heads && mode) {
- active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
- line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
+ active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
+ (u32)mode->clock);
+ line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
+ (u32)mode->clock);
+ line_time = min(line_time, (u32)65535);
priority_a_cnt = 0;
priority_b_cnt = 0;
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 9a1e34e48f64..81f86a67c10d 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -451,18 +451,6 @@ fail:
#ifdef CONFIG_DRM_TEGRA_STAGING
-static struct tegra_drm_context *
-tegra_drm_file_get_context(struct tegra_drm_file *file, u32 id)
-{
- struct tegra_drm_context *context;
-
- mutex_lock(&file->lock);
- context = idr_find(&file->contexts, id);
- mutex_unlock(&file->lock);
-
- return context;
-}
-
static int tegra_gem_create(struct drm_device *drm, void *data,
struct drm_file *file)
{
@@ -551,7 +539,7 @@ static int tegra_client_open(struct tegra_drm_file *fpriv,
if (err < 0)
return err;
- err = idr_alloc(&fpriv->contexts, context, 0, 0, GFP_KERNEL);
+ err = idr_alloc(&fpriv->contexts, context, 1, 0, GFP_KERNEL);
if (err < 0) {
client->ops->close_channel(context);
return err;
@@ -606,7 +594,7 @@ static int tegra_close_channel(struct drm_device *drm, void *data,
mutex_lock(&fpriv->lock);
- context = tegra_drm_file_get_context(fpriv, args->context);
+ context = idr_find(&fpriv->contexts, args->context);
if (!context) {
err = -EINVAL;
goto unlock;
@@ -631,7 +619,7 @@ static int tegra_get_syncpt(struct drm_device *drm, void *data,
mutex_lock(&fpriv->lock);
- context = tegra_drm_file_get_context(fpriv, args->context);
+ context = idr_find(&fpriv->contexts, args->context);
if (!context) {
err = -ENODEV;
goto unlock;
@@ -660,7 +648,7 @@ static int tegra_submit(struct drm_device *drm, void *data,
mutex_lock(&fpriv->lock);
- context = tegra_drm_file_get_context(fpriv, args->context);
+ context = idr_find(&fpriv->contexts, args->context);
if (!context) {
err = -ENODEV;
goto unlock;
@@ -685,7 +673,7 @@ static int tegra_get_syncpt_base(struct drm_device *drm, void *data,
mutex_lock(&fpriv->lock);
- context = tegra_drm_file_get_context(fpriv, args->context);
+ context = idr_find(&fpriv->contexts, args->context);
if (!context) {
err = -ENODEV;
goto unlock;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
index 13db8a2851ed..1f013d45c9e9 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
@@ -321,6 +321,7 @@ void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man)
list_for_each_entry_safe(entry, next, &man->list, head)
vmw_cmdbuf_res_free(man, entry);
+ drm_ht_remove(&man->resources);
kfree(man);
}
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index f05ebb14fa63..ac65f52850a6 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -172,7 +172,7 @@ static int host1x_probe(struct platform_device *pdev)
host->rst = devm_reset_control_get(&pdev->dev, "host1x");
if (IS_ERR(host->rst)) {
- err = PTR_ERR(host->clk);
+ err = PTR_ERR(host->rst);
dev_err(&pdev->dev, "failed to get reset: %d\n", err);
return err;
}
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 8ca1e8ce0af2..4f9a3938189a 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -319,6 +319,9 @@
#define USB_VENDOR_ID_DELCOM 0x0fc5
#define USB_DEVICE_ID_DELCOM_VISUAL_IND 0xb080
+#define USB_VENDOR_ID_DELL 0x413c
+#define USB_DEVICE_ID_DELL_PIXART_USB_OPTICAL_MOUSE 0x301a
+
#define USB_VENDOR_ID_DELORME 0x1163
#define USB_DEVICE_ID_DELORME_EARTHMATE 0x0100
#define USB_DEVICE_ID_DELORME_EM_LT20 0x0200
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
index 1d6c997b3001..20b40ad26325 100644
--- a/drivers/hid/hid-magicmouse.c
+++ b/drivers/hid/hid-magicmouse.c
@@ -349,7 +349,6 @@ static int magicmouse_raw_event(struct hid_device *hdev,
if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE) {
magicmouse_emit_buttons(msc, clicks & 3);
- input_mt_report_pointer_emulation(input, true);
input_report_rel(input, REL_X, x);
input_report_rel(input, REL_Y, y);
} else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */
@@ -389,16 +388,16 @@ static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hd
__clear_bit(BTN_RIGHT, input->keybit);
__clear_bit(BTN_MIDDLE, input->keybit);
__set_bit(BTN_MOUSE, input->keybit);
+ __set_bit(BTN_TOOL_FINGER, input->keybit);
+ __set_bit(BTN_TOOL_DOUBLETAP, input->keybit);
+ __set_bit(BTN_TOOL_TRIPLETAP, input->keybit);
+ __set_bit(BTN_TOOL_QUADTAP, input->keybit);
+ __set_bit(BTN_TOOL_QUINTTAP, input->keybit);
+ __set_bit(BTN_TOUCH, input->keybit);
+ __set_bit(INPUT_PROP_POINTER, input->propbit);
__set_bit(INPUT_PROP_BUTTONPAD, input->propbit);
}
- __set_bit(BTN_TOOL_FINGER, input->keybit);
- __set_bit(BTN_TOOL_DOUBLETAP, input->keybit);
- __set_bit(BTN_TOOL_TRIPLETAP, input->keybit);
- __set_bit(BTN_TOOL_QUADTAP, input->keybit);
- __set_bit(BTN_TOOL_QUINTTAP, input->keybit);
- __set_bit(BTN_TOUCH, input->keybit);
- __set_bit(INPUT_PROP_POINTER, input->propbit);
__set_bit(EV_ABS, input->evbit);
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 6316498b7812..a88e7c7bea0a 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -85,6 +85,7 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB_RAPIDFIRE, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
{ USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
{ USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_DELL, USB_DEVICE_ID_DELL_PIXART_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
{ USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3, HID_QUIRK_MULTI_INPUT },
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 95ed17183e73..54a47b40546f 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -734,9 +734,9 @@ static int i2c_imx_dma_read(struct imx_i2c_struct *i2c_imx,
* the first read operation, otherwise the first read cost
* one extra clock cycle.
*/
- temp = readb(i2c_imx->base + IMX_I2C_I2CR);
+ temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
temp |= I2CR_MTX;
- writeb(temp, i2c_imx->base + IMX_I2C_I2CR);
+ imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
}
msgs->buf[msgs->len-1] = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR);
@@ -857,9 +857,9 @@ static int i2c_imx_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs, bo
* the first read operation, otherwise the first read cost
* one extra clock cycle.
*/
- temp = readb(i2c_imx->base + IMX_I2C_I2CR);
+ temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
temp |= I2CR_MTX;
- writeb(temp, i2c_imx->base + IMX_I2C_I2CR);
+ imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
}
} else if (i == (msgs->len - 2)) {
dev_dbg(&i2c_imx->adapter.dev,
diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
index f573448d2132..e98e44e584a4 100644
--- a/drivers/i2c/busses/i2c-ismt.c
+++ b/drivers/i2c/busses/i2c-ismt.c
@@ -584,7 +584,7 @@ static int ismt_access(struct i2c_adapter *adap, u16 addr,
/* unmap the data buffer */
if (dma_size != 0)
- dma_unmap_single(&adap->dev, dma_addr, dma_size, dma_direction);
+ dma_unmap_single(dev, dma_addr, dma_size, dma_direction);
if (unlikely(!time_left)) {
dev_err(dev, "completion wait timed out\n");
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index 214bf2835d1f..8be3e6cb8fe6 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -319,7 +319,7 @@ static void rcar_i2c_dma_unmap(struct rcar_i2c_priv *priv)
rcar_i2c_write(priv, ICFBSCR, TCYC06);
dma_unmap_single(chan->device->dev, sg_dma_address(&priv->sg),
- priv->msg->len, priv->dma_direction);
+ sg_dma_len(&priv->sg), priv->dma_direction);
priv->dma_direction = DMA_NONE;
}
diff --git a/drivers/iio/adc/meson_saradc.c b/drivers/iio/adc/meson_saradc.c
index dd4190b50df6..6066bbfc42fe 100644
--- a/drivers/iio/adc/meson_saradc.c
+++ b/drivers/iio/adc/meson_saradc.c
@@ -468,13 +468,13 @@ static void meson_sar_adc_unlock(struct iio_dev *indio_dev)
static void meson_sar_adc_clear_fifo(struct iio_dev *indio_dev)
{
struct meson_sar_adc_priv *priv = iio_priv(indio_dev);
- int count;
+ unsigned int count, tmp;
for (count = 0; count < MESON_SAR_ADC_MAX_FIFO_SIZE; count++) {
if (!meson_sar_adc_get_fifo_count(indio_dev))
break;
- regmap_read(priv->regmap, MESON_SAR_ADC_FIFO_RD, 0);
+ regmap_read(priv->regmap, MESON_SAR_ADC_FIFO_RD, &tmp);
}
}
diff --git a/drivers/iio/adc/mxs-lradc-adc.c b/drivers/iio/adc/mxs-lradc-adc.c
index b0c7d8ee5cb8..6888167ca1e6 100644
--- a/drivers/iio/adc/mxs-lradc-adc.c
+++ b/drivers/iio/adc/mxs-lradc-adc.c
@@ -718,9 +718,12 @@ static int mxs_lradc_adc_probe(struct platform_device *pdev)
adc->dev = dev;
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!iores)
+ return -EINVAL;
+
adc->base = devm_ioremap(dev, iores->start, resource_size(iores));
- if (IS_ERR(adc->base))
- return PTR_ERR(adc->base);
+ if (!adc->base)
+ return -ENOMEM;
init_completion(&adc->completion);
spin_lock_init(&adc->lock);
diff --git a/drivers/iio/buffer/industrialio-buffer-dma.c b/drivers/iio/buffer/industrialio-buffer-dma.c
index dd99d273bae9..ff03324dee13 100644
--- a/drivers/iio/buffer/industrialio-buffer-dma.c
+++ b/drivers/iio/buffer/industrialio-buffer-dma.c
@@ -14,6 +14,7 @@
#include <linux/sched.h>
#include <linux/poll.h>
#include <linux/iio/buffer.h>
+#include <linux/iio/buffer_impl.h>
#include <linux/iio/buffer-dma.h>
#include <linux/dma-mapping.h>
#include <linux/sizes.h>
diff --git a/drivers/iio/buffer/industrialio-buffer-dmaengine.c b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
index 9fabed47053d..2b5a320f42c5 100644
--- a/drivers/iio/buffer/industrialio-buffer-dmaengine.c
+++ b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
@@ -14,6 +14,7 @@
#include <linux/iio/iio.h>
#include <linux/iio/buffer.h>
+#include <linux/iio/buffer_impl.h>
#include <linux/iio/buffer-dma.h>
#include <linux/iio/buffer-dmaengine.h>
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
index 96dabbd2f004..88a7c5d4e4d2 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
@@ -41,6 +41,7 @@ static const int accel_scale[] = {598, 1196, 2392, 4785};
static const struct inv_mpu6050_reg_map reg_set_6500 = {
.sample_rate_div = INV_MPU6050_REG_SAMPLE_RATE_DIV,
.lpf = INV_MPU6050_REG_CONFIG,
+ .accel_lpf = INV_MPU6500_REG_ACCEL_CONFIG_2,
.user_ctrl = INV_MPU6050_REG_USER_CTRL,
.fifo_en = INV_MPU6050_REG_FIFO_EN,
.gyro_config = INV_MPU6050_REG_GYRO_CONFIG,
@@ -211,6 +212,37 @@ int inv_mpu6050_set_power_itg(struct inv_mpu6050_state *st, bool power_on)
EXPORT_SYMBOL_GPL(inv_mpu6050_set_power_itg);
/**
+ * inv_mpu6050_set_lpf_regs() - set low pass filter registers, chip dependent
+ *
+ * MPU60xx/MPU9150 use only 1 register for accelerometer + gyroscope
+ * MPU6500 and above have a dedicated register for accelerometer
+ */
+static int inv_mpu6050_set_lpf_regs(struct inv_mpu6050_state *st,
+ enum inv_mpu6050_filter_e val)
+{
+ int result;
+
+ result = regmap_write(st->map, st->reg->lpf, val);
+ if (result)
+ return result;
+
+ switch (st->chip_type) {
+ case INV_MPU6050:
+ case INV_MPU6000:
+ case INV_MPU9150:
+ /* old chips, nothing to do */
+ result = 0;
+ break;
+ default:
+ /* set accel lpf */
+ result = regmap_write(st->map, st->reg->accel_lpf, val);
+ break;
+ }
+
+ return result;
+}
+
+/**
* inv_mpu6050_init_config() - Initialize hardware, disable FIFO.
*
* Initial configuration:
@@ -233,8 +265,7 @@ static int inv_mpu6050_init_config(struct iio_dev *indio_dev)
if (result)
return result;
- d = INV_MPU6050_FILTER_20HZ;
- result = regmap_write(st->map, st->reg->lpf, d);
+ result = inv_mpu6050_set_lpf_regs(st, INV_MPU6050_FILTER_20HZ);
if (result)
return result;
@@ -537,6 +568,8 @@ error_write_raw:
* would be alising. This function basically search for the
* correct low pass parameters based on the fifo rate, e.g,
* sampling frequency.
+ *
+ * lpf is set automatically when setting sampling rate to avoid any aliases.
*/
static int inv_mpu6050_set_lpf(struct inv_mpu6050_state *st, int rate)
{
@@ -552,7 +585,7 @@ static int inv_mpu6050_set_lpf(struct inv_mpu6050_state *st, int rate)
while ((h < hz[i]) && (i < ARRAY_SIZE(d) - 1))
i++;
data = d[i];
- result = regmap_write(st->map, st->reg->lpf, data);
+ result = inv_mpu6050_set_lpf_regs(st, data);
if (result)
return result;
st->chip_config.lpf = data;
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
index ef13de7a2c20..953a0c09d568 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
@@ -28,6 +28,7 @@
* struct inv_mpu6050_reg_map - Notable registers.
* @sample_rate_div: Divider applied to gyro output rate.
* @lpf: Configures internal low pass filter.
+ * @accel_lpf: Configures accelerometer low pass filter.
* @user_ctrl: Enables/resets the FIFO.
* @fifo_en: Determines which data will appear in FIFO.
* @gyro_config: gyro config register.
@@ -47,6 +48,7 @@
struct inv_mpu6050_reg_map {
u8 sample_rate_div;
u8 lpf;
+ u8 accel_lpf;
u8 user_ctrl;
u8 fifo_en;
u8 gyro_config;
@@ -188,6 +190,7 @@ struct inv_mpu6050_state {
#define INV_MPU6050_FIFO_THRESHOLD 500
/* mpu6500 registers */
+#define INV_MPU6500_REG_ACCEL_CONFIG_2 0x1D
#define INV_MPU6500_REG_ACCEL_OFFSET 0x77
/* delay time in milliseconds */
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index d2957b38575f..a6cb379a4ebc 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -448,12 +448,7 @@ static int addr6_resolve(struct sockaddr_in6 *src_in,
return ret;
rt = (struct rt6_info *)dst;
- if (ipv6_addr_any(&fl6.saddr)) {
- ret = ipv6_dev_get_saddr(addr->net, ip6_dst_idev(dst)->dev,
- &fl6.daddr, 0, &fl6.saddr);
- if (ret)
- goto put;
-
+ if (ipv6_addr_any(&src_in->sin6_addr)) {
src_in->sin6_family = AF_INET6;
src_in->sin6_addr = fl6.saddr;
}
@@ -470,9 +465,6 @@ static int addr6_resolve(struct sockaddr_in6 *src_in,
*pdst = dst;
return 0;
-put:
- dst_release(dst);
- return ret;
}
#else
static int addr6_resolve(struct sockaddr_in6 *src_in,
diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
index ebf7be8d4139..08772836fded 100644
--- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
@@ -56,6 +56,10 @@
#define BNXT_RE_MAX_SRQC_COUNT (64 * 1024)
#define BNXT_RE_MAX_CQ_COUNT (64 * 1024)
+#define BNXT_RE_UD_QP_HW_STALL 0x400000
+
+#define BNXT_RE_RQ_WQE_THRESHOLD 32
+
struct bnxt_re_work {
struct work_struct work;
unsigned long event;
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index 7ba9e699d7ab..c7bd68311d0c 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -61,6 +61,48 @@
#include "ib_verbs.h"
#include <rdma/bnxt_re-abi.h>
+static int __from_ib_access_flags(int iflags)
+{
+ int qflags = 0;
+
+ if (iflags & IB_ACCESS_LOCAL_WRITE)
+ qflags |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
+ if (iflags & IB_ACCESS_REMOTE_READ)
+ qflags |= BNXT_QPLIB_ACCESS_REMOTE_READ;
+ if (iflags & IB_ACCESS_REMOTE_WRITE)
+ qflags |= BNXT_QPLIB_ACCESS_REMOTE_WRITE;
+ if (iflags & IB_ACCESS_REMOTE_ATOMIC)
+ qflags |= BNXT_QPLIB_ACCESS_REMOTE_ATOMIC;
+ if (iflags & IB_ACCESS_MW_BIND)
+ qflags |= BNXT_QPLIB_ACCESS_MW_BIND;
+ if (iflags & IB_ZERO_BASED)
+ qflags |= BNXT_QPLIB_ACCESS_ZERO_BASED;
+ if (iflags & IB_ACCESS_ON_DEMAND)
+ qflags |= BNXT_QPLIB_ACCESS_ON_DEMAND;
+ return qflags;
+};
+
+static enum ib_access_flags __to_ib_access_flags(int qflags)
+{
+ enum ib_access_flags iflags = 0;
+
+ if (qflags & BNXT_QPLIB_ACCESS_LOCAL_WRITE)
+ iflags |= IB_ACCESS_LOCAL_WRITE;
+ if (qflags & BNXT_QPLIB_ACCESS_REMOTE_WRITE)
+ iflags |= IB_ACCESS_REMOTE_WRITE;
+ if (qflags & BNXT_QPLIB_ACCESS_REMOTE_READ)
+ iflags |= IB_ACCESS_REMOTE_READ;
+ if (qflags & BNXT_QPLIB_ACCESS_REMOTE_ATOMIC)
+ iflags |= IB_ACCESS_REMOTE_ATOMIC;
+ if (qflags & BNXT_QPLIB_ACCESS_MW_BIND)
+ iflags |= IB_ACCESS_MW_BIND;
+ if (qflags & BNXT_QPLIB_ACCESS_ZERO_BASED)
+ iflags |= IB_ZERO_BASED;
+ if (qflags & BNXT_QPLIB_ACCESS_ON_DEMAND)
+ iflags |= IB_ACCESS_ON_DEMAND;
+ return iflags;
+};
+
static int bnxt_re_build_sgl(struct ib_sge *ib_sg_list,
struct bnxt_qplib_sge *sg_list, int num)
{
@@ -149,8 +191,8 @@ int bnxt_re_query_device(struct ib_device *ibdev,
ib_attr->max_total_mcast_qp_attach = 0;
ib_attr->max_ah = dev_attr->max_ah;
- ib_attr->max_fmr = dev_attr->max_fmr;
- ib_attr->max_map_per_fmr = 1; /* ? */
+ ib_attr->max_fmr = 0;
+ ib_attr->max_map_per_fmr = 0;
ib_attr->max_srq = dev_attr->max_srq;
ib_attr->max_srq_wr = dev_attr->max_srq_wqes;
@@ -410,6 +452,158 @@ enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev,
return IB_LINK_LAYER_ETHERNET;
}
+#define BNXT_RE_FENCE_PBL_SIZE DIV_ROUND_UP(BNXT_RE_FENCE_BYTES, PAGE_SIZE)
+
+static void bnxt_re_create_fence_wqe(struct bnxt_re_pd *pd)
+{
+ struct bnxt_re_fence_data *fence = &pd->fence;
+ struct ib_mr *ib_mr = &fence->mr->ib_mr;
+ struct bnxt_qplib_swqe *wqe = &fence->bind_wqe;
+
+ memset(wqe, 0, sizeof(*wqe));
+ wqe->type = BNXT_QPLIB_SWQE_TYPE_BIND_MW;
+ wqe->wr_id = BNXT_QPLIB_FENCE_WRID;
+ wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
+ wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
+ wqe->bind.zero_based = false;
+ wqe->bind.parent_l_key = ib_mr->lkey;
+ wqe->bind.va = (u64)(unsigned long)fence->va;
+ wqe->bind.length = fence->size;
+ wqe->bind.access_cntl = __from_ib_access_flags(IB_ACCESS_REMOTE_READ);
+ wqe->bind.mw_type = SQ_BIND_MW_TYPE_TYPE1;
+
+ /* Save the initial rkey in fence structure for now;
+ * wqe->bind.r_key will be set at (re)bind time.
+ */
+ fence->bind_rkey = ib_inc_rkey(fence->mw->rkey);
+}
+
+static int bnxt_re_bind_fence_mw(struct bnxt_qplib_qp *qplib_qp)
+{
+ struct bnxt_re_qp *qp = container_of(qplib_qp, struct bnxt_re_qp,
+ qplib_qp);
+ struct ib_pd *ib_pd = qp->ib_qp.pd;
+ struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
+ struct bnxt_re_fence_data *fence = &pd->fence;
+ struct bnxt_qplib_swqe *fence_wqe = &fence->bind_wqe;
+ struct bnxt_qplib_swqe wqe;
+ int rc;
+
+ memcpy(&wqe, fence_wqe, sizeof(wqe));
+ wqe.bind.r_key = fence->bind_rkey;
+ fence->bind_rkey = ib_inc_rkey(fence->bind_rkey);
+
+ dev_dbg(rdev_to_dev(qp->rdev),
+ "Posting bind fence-WQE: rkey: %#x QP: %d PD: %p\n",
+ wqe.bind.r_key, qp->qplib_qp.id, pd);
+ rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
+ if (rc) {
+ dev_err(rdev_to_dev(qp->rdev), "Failed to bind fence-WQE\n");
+ return rc;
+ }
+ bnxt_qplib_post_send_db(&qp->qplib_qp);
+
+ return rc;
+}
+
+static void bnxt_re_destroy_fence_mr(struct bnxt_re_pd *pd)
+{
+ struct bnxt_re_fence_data *fence = &pd->fence;
+ struct bnxt_re_dev *rdev = pd->rdev;
+ struct device *dev = &rdev->en_dev->pdev->dev;
+ struct bnxt_re_mr *mr = fence->mr;
+
+ if (fence->mw) {
+ bnxt_re_dealloc_mw(fence->mw);
+ fence->mw = NULL;
+ }
+ if (mr) {
+ if (mr->ib_mr.rkey)
+ bnxt_qplib_dereg_mrw(&rdev->qplib_res, &mr->qplib_mr,
+ true);
+ if (mr->ib_mr.lkey)
+ bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
+ kfree(mr);
+ fence->mr = NULL;
+ }
+ if (fence->dma_addr) {
+ dma_unmap_single(dev, fence->dma_addr, BNXT_RE_FENCE_BYTES,
+ DMA_BIDIRECTIONAL);
+ fence->dma_addr = 0;
+ }
+}
+
+static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
+{
+ int mr_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_MW_BIND;
+ struct bnxt_re_fence_data *fence = &pd->fence;
+ struct bnxt_re_dev *rdev = pd->rdev;
+ struct device *dev = &rdev->en_dev->pdev->dev;
+ struct bnxt_re_mr *mr = NULL;
+ dma_addr_t dma_addr = 0;
+ struct ib_mw *mw;
+ u64 pbl_tbl;
+ int rc;
+
+ dma_addr = dma_map_single(dev, fence->va, BNXT_RE_FENCE_BYTES,
+ DMA_BIDIRECTIONAL);
+ rc = dma_mapping_error(dev, dma_addr);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev), "Failed to dma-map fence-MR-mem\n");
+ rc = -EIO;
+ fence->dma_addr = 0;
+ goto fail;
+ }
+ fence->dma_addr = dma_addr;
+
+ /* Allocate a MR */
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr) {
+ rc = -ENOMEM;
+ goto fail;
+ }
+ fence->mr = mr;
+ mr->rdev = rdev;
+ mr->qplib_mr.pd = &pd->qplib_pd;
+ mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
+ mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
+ rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev), "Failed to alloc fence-HW-MR\n");
+ goto fail;
+ }
+
+ /* Register MR */
+ mr->ib_mr.lkey = mr->qplib_mr.lkey;
+ mr->qplib_mr.va = (u64)(unsigned long)fence->va;
+ mr->qplib_mr.total_size = BNXT_RE_FENCE_BYTES;
+ pbl_tbl = dma_addr;
+ rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl_tbl,
+ BNXT_RE_FENCE_PBL_SIZE, false);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev), "Failed to register fence-MR\n");
+ goto fail;
+ }
+ mr->ib_mr.rkey = mr->qplib_mr.rkey;
+
+ /* Create a fence MW only for kernel consumers */
+ mw = bnxt_re_alloc_mw(&pd->ib_pd, IB_MW_TYPE_1, NULL);
+ if (!mw) {
+ dev_err(rdev_to_dev(rdev),
+ "Failed to create fence-MW for PD: %p\n", pd);
+ rc = -EINVAL;
+ goto fail;
+ }
+ fence->mw = mw;
+
+ bnxt_re_create_fence_wqe(pd);
+ return 0;
+
+fail:
+ bnxt_re_destroy_fence_mr(pd);
+ return rc;
+}
+
/* Protection Domains */
int bnxt_re_dealloc_pd(struct ib_pd *ib_pd)
{
@@ -417,6 +611,7 @@ int bnxt_re_dealloc_pd(struct ib_pd *ib_pd)
struct bnxt_re_dev *rdev = pd->rdev;
int rc;
+ bnxt_re_destroy_fence_mr(pd);
if (ib_pd->uobject && pd->dpi.dbr) {
struct ib_ucontext *ib_uctx = ib_pd->uobject->context;
struct bnxt_re_ucontext *ucntx;
@@ -498,6 +693,10 @@ struct ib_pd *bnxt_re_alloc_pd(struct ib_device *ibdev,
}
}
+ if (!udata)
+ if (bnxt_re_create_fence_mr(pd))
+ dev_warn(rdev_to_dev(rdev),
+ "Failed to create Fence-MR\n");
return &pd->ib_pd;
dbfail:
(void)bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl,
@@ -849,12 +1048,16 @@ static struct bnxt_re_qp *bnxt_re_create_shadow_qp
/* Shadow QP SQ depth should be same as QP1 RQ depth */
qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe;
qp->qplib_qp.sq.max_sge = 2;
+ /* Q full delta can be 1 since it is internal QP */
+ qp->qplib_qp.sq.q_full_delta = 1;
qp->qplib_qp.scq = qp1_qp->scq;
qp->qplib_qp.rcq = qp1_qp->rcq;
qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe;
qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge;
+ /* Q full delta can be 1 since it is internal QP */
+ qp->qplib_qp.rq.q_full_delta = 1;
qp->qplib_qp.mtu = qp1_qp->mtu;
@@ -917,10 +1120,6 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
qp->qplib_qp.sig_type = ((qp_init_attr->sq_sig_type ==
IB_SIGNAL_ALL_WR) ? true : false);
- entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr + 1);
- qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
- dev_attr->max_qp_wqes + 1);
-
qp->qplib_qp.sq.max_sge = qp_init_attr->cap.max_send_sge;
if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges)
qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges;
@@ -959,6 +1158,9 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
qp->qplib_qp.rq.max_wqe = min_t(u32, entries,
dev_attr->max_qp_wqes + 1);
+ qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
+ qp_init_attr->cap.max_recv_wr;
+
qp->qplib_qp.rq.max_sge = qp_init_attr->cap.max_recv_sge;
if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges)
qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
@@ -967,6 +1169,12 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
qp->qplib_qp.mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
if (qp_init_attr->qp_type == IB_QPT_GSI) {
+ /* Allocate 1 more than what's provided */
+ entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr + 1);
+ qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
+ dev_attr->max_qp_wqes + 1);
+ qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
+ qp_init_attr->cap.max_send_wr;
qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges)
qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
@@ -1006,6 +1214,22 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
}
} else {
+ /* Allocate 128 + 1 more than what's provided */
+ entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr +
+ BNXT_QPLIB_RESERVED_QP_WRS + 1);
+ qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
+ dev_attr->max_qp_wqes +
+ BNXT_QPLIB_RESERVED_QP_WRS + 1);
+ qp->qplib_qp.sq.q_full_delta = BNXT_QPLIB_RESERVED_QP_WRS + 1;
+
+ /*
+ * Reserving one slot for Phantom WQE. Application can
+ * post one extra entry in this case. But allowing this to avoid
+ * unexpected Queue full condition
+ */
+
+ qp->qplib_qp.sq.q_full_delta -= 1;
+
qp->qplib_qp.max_rd_atomic = dev_attr->max_qp_rd_atom;
qp->qplib_qp.max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom;
if (udata) {
@@ -1025,6 +1249,7 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
qp->ib_qp.qp_num = qp->qplib_qp.id;
spin_lock_init(&qp->sq_lock);
+ spin_lock_init(&qp->rq_lock);
if (udata) {
struct bnxt_re_qp_resp resp;
@@ -1129,48 +1354,6 @@ static enum ib_mtu __to_ib_mtu(u32 mtu)
}
}
-static int __from_ib_access_flags(int iflags)
-{
- int qflags = 0;
-
- if (iflags & IB_ACCESS_LOCAL_WRITE)
- qflags |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
- if (iflags & IB_ACCESS_REMOTE_READ)
- qflags |= BNXT_QPLIB_ACCESS_REMOTE_READ;
- if (iflags & IB_ACCESS_REMOTE_WRITE)
- qflags |= BNXT_QPLIB_ACCESS_REMOTE_WRITE;
- if (iflags & IB_ACCESS_REMOTE_ATOMIC)
- qflags |= BNXT_QPLIB_ACCESS_REMOTE_ATOMIC;
- if (iflags & IB_ACCESS_MW_BIND)
- qflags |= BNXT_QPLIB_ACCESS_MW_BIND;
- if (iflags & IB_ZERO_BASED)
- qflags |= BNXT_QPLIB_ACCESS_ZERO_BASED;
- if (iflags & IB_ACCESS_ON_DEMAND)
- qflags |= BNXT_QPLIB_ACCESS_ON_DEMAND;
- return qflags;
-};
-
-static enum ib_access_flags __to_ib_access_flags(int qflags)
-{
- enum ib_access_flags iflags = 0;
-
- if (qflags & BNXT_QPLIB_ACCESS_LOCAL_WRITE)
- iflags |= IB_ACCESS_LOCAL_WRITE;
- if (qflags & BNXT_QPLIB_ACCESS_REMOTE_WRITE)
- iflags |= IB_ACCESS_REMOTE_WRITE;
- if (qflags & BNXT_QPLIB_ACCESS_REMOTE_READ)
- iflags |= IB_ACCESS_REMOTE_READ;
- if (qflags & BNXT_QPLIB_ACCESS_REMOTE_ATOMIC)
- iflags |= IB_ACCESS_REMOTE_ATOMIC;
- if (qflags & BNXT_QPLIB_ACCESS_MW_BIND)
- iflags |= IB_ACCESS_MW_BIND;
- if (qflags & BNXT_QPLIB_ACCESS_ZERO_BASED)
- iflags |= IB_ZERO_BASED;
- if (qflags & BNXT_QPLIB_ACCESS_ON_DEMAND)
- iflags |= IB_ACCESS_ON_DEMAND;
- return iflags;
-};
-
static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev,
struct bnxt_re_qp *qp1_qp,
int qp_attr_mask)
@@ -1378,11 +1561,21 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
entries = roundup_pow_of_two(qp_attr->cap.max_send_wr);
qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
dev_attr->max_qp_wqes + 1);
+ qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
+ qp_attr->cap.max_send_wr;
+ /*
+ * Reserving one slot for Phantom WQE. Some application can
+ * post one extra entry in this case. Allowing this to avoid
+ * unexpected Queue full condition
+ */
+ qp->qplib_qp.sq.q_full_delta -= 1;
qp->qplib_qp.sq.max_sge = qp_attr->cap.max_send_sge;
if (qp->qplib_qp.rq.max_wqe) {
entries = roundup_pow_of_two(qp_attr->cap.max_recv_wr);
qp->qplib_qp.rq.max_wqe =
min_t(u32, entries, dev_attr->max_qp_wqes + 1);
+ qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
+ qp_attr->cap.max_recv_wr;
qp->qplib_qp.rq.max_sge = qp_attr->cap.max_recv_sge;
} else {
/* SRQ was used prior, just ignore the RQ caps */
@@ -1883,6 +2076,22 @@ static int bnxt_re_copy_wr_payload(struct bnxt_re_dev *rdev,
return payload_sz;
}
+static void bnxt_ud_qp_hw_stall_workaround(struct bnxt_re_qp *qp)
+{
+ if ((qp->ib_qp.qp_type == IB_QPT_UD ||
+ qp->ib_qp.qp_type == IB_QPT_GSI ||
+ qp->ib_qp.qp_type == IB_QPT_RAW_ETHERTYPE) &&
+ qp->qplib_qp.wqe_cnt == BNXT_RE_UD_QP_HW_STALL) {
+ int qp_attr_mask;
+ struct ib_qp_attr qp_attr;
+
+ qp_attr_mask = IB_QP_STATE;
+ qp_attr.qp_state = IB_QPS_RTS;
+ bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, qp_attr_mask, NULL);
+ qp->qplib_qp.wqe_cnt = 0;
+ }
+}
+
static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev,
struct bnxt_re_qp *qp,
struct ib_send_wr *wr)
@@ -1928,6 +2137,7 @@ bad:
wr = wr->next;
}
bnxt_qplib_post_send_db(&qp->qplib_qp);
+ bnxt_ud_qp_hw_stall_workaround(qp);
spin_unlock_irqrestore(&qp->sq_lock, flags);
return rc;
}
@@ -2024,6 +2234,7 @@ bad:
wr = wr->next;
}
bnxt_qplib_post_send_db(&qp->qplib_qp);
+ bnxt_ud_qp_hw_stall_workaround(qp);
spin_unlock_irqrestore(&qp->sq_lock, flags);
return rc;
@@ -2071,7 +2282,10 @@ int bnxt_re_post_recv(struct ib_qp *ib_qp, struct ib_recv_wr *wr,
struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
struct bnxt_qplib_swqe wqe;
int rc = 0, payload_sz = 0;
+ unsigned long flags;
+ u32 count = 0;
+ spin_lock_irqsave(&qp->rq_lock, flags);
while (wr) {
/* House keeping */
memset(&wqe, 0, sizeof(wqe));
@@ -2100,9 +2314,21 @@ int bnxt_re_post_recv(struct ib_qp *ib_qp, struct ib_recv_wr *wr,
*bad_wr = wr;
break;
}
+
+ /* Ring DB if the RQEs posted reaches a threshold value */
+ if (++count >= BNXT_RE_RQ_WQE_THRESHOLD) {
+ bnxt_qplib_post_recv_db(&qp->qplib_qp);
+ count = 0;
+ }
+
wr = wr->next;
}
- bnxt_qplib_post_recv_db(&qp->qplib_qp);
+
+ if (count)
+ bnxt_qplib_post_recv_db(&qp->qplib_qp);
+
+ spin_unlock_irqrestore(&qp->rq_lock, flags);
+
return rc;
}
@@ -2643,12 +2869,36 @@ static void bnxt_re_process_res_ud_wc(struct ib_wc *wc,
wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
}
+static int send_phantom_wqe(struct bnxt_re_qp *qp)
+{
+ struct bnxt_qplib_qp *lib_qp = &qp->qplib_qp;
+ unsigned long flags;
+ int rc = 0;
+
+ spin_lock_irqsave(&qp->sq_lock, flags);
+
+ rc = bnxt_re_bind_fence_mw(lib_qp);
+ if (!rc) {
+ lib_qp->sq.phantom_wqe_cnt++;
+ dev_dbg(&lib_qp->sq.hwq.pdev->dev,
+ "qp %#x sq->prod %#x sw_prod %#x phantom_wqe_cnt %d\n",
+ lib_qp->id, lib_qp->sq.hwq.prod,
+ HWQ_CMP(lib_qp->sq.hwq.prod, &lib_qp->sq.hwq),
+ lib_qp->sq.phantom_wqe_cnt);
+ }
+
+ spin_unlock_irqrestore(&qp->sq_lock, flags);
+ return rc;
+}
+
int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
{
struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
struct bnxt_re_qp *qp;
struct bnxt_qplib_cqe *cqe;
int i, ncqe, budget;
+ struct bnxt_qplib_q *sq;
+ struct bnxt_qplib_qp *lib_qp;
u32 tbl_idx;
struct bnxt_re_sqp_entries *sqp_entry = NULL;
unsigned long flags;
@@ -2661,7 +2911,21 @@ int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
}
cqe = &cq->cql[0];
while (budget) {
- ncqe = bnxt_qplib_poll_cq(&cq->qplib_cq, cqe, budget);
+ lib_qp = NULL;
+ ncqe = bnxt_qplib_poll_cq(&cq->qplib_cq, cqe, budget, &lib_qp);
+ if (lib_qp) {
+ sq = &lib_qp->sq;
+ if (sq->send_phantom) {
+ qp = container_of(lib_qp,
+ struct bnxt_re_qp, qplib_qp);
+ if (send_phantom_wqe(qp) == -ENOMEM)
+ dev_err(rdev_to_dev(cq->rdev),
+ "Phantom failed! Scheduled to send again\n");
+ else
+ sq->send_phantom = false;
+ }
+ }
+
if (!ncqe)
break;
@@ -2822,6 +3086,12 @@ int bnxt_re_dereg_mr(struct ib_mr *ib_mr)
struct bnxt_re_dev *rdev = mr->rdev;
int rc;
+ rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev), "Dereg MR failed: %#x\n", rc);
+ return rc;
+ }
+
if (mr->npages && mr->pages) {
rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res,
&mr->qplib_frpl);
@@ -2829,8 +3099,6 @@ int bnxt_re_dereg_mr(struct ib_mr *ib_mr)
mr->npages = 0;
mr->pages = NULL;
}
- rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
-
if (!IS_ERR_OR_NULL(mr->ib_umem))
ib_umem_release(mr->ib_umem);
@@ -2914,97 +3182,52 @@ fail:
return ERR_PTR(rc);
}
-/* Fast Memory Regions */
-struct ib_fmr *bnxt_re_alloc_fmr(struct ib_pd *ib_pd, int mr_access_flags,
- struct ib_fmr_attr *fmr_attr)
+struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
+ struct ib_udata *udata)
{
struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
struct bnxt_re_dev *rdev = pd->rdev;
- struct bnxt_re_fmr *fmr;
+ struct bnxt_re_mw *mw;
int rc;
- if (fmr_attr->max_pages > MAX_PBL_LVL_2_PGS ||
- fmr_attr->max_maps > rdev->dev_attr.max_map_per_fmr) {
- dev_err(rdev_to_dev(rdev), "Allocate FMR exceeded Max limit");
+ mw = kzalloc(sizeof(*mw), GFP_KERNEL);
+ if (!mw)
return ERR_PTR(-ENOMEM);
- }
- fmr = kzalloc(sizeof(*fmr), GFP_KERNEL);
- if (!fmr)
- return ERR_PTR(-ENOMEM);
-
- fmr->rdev = rdev;
- fmr->qplib_fmr.pd = &pd->qplib_pd;
- fmr->qplib_fmr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
+ mw->rdev = rdev;
+ mw->qplib_mw.pd = &pd->qplib_pd;
- rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &fmr->qplib_fmr);
- if (rc)
+ mw->qplib_mw.type = (type == IB_MW_TYPE_1 ?
+ CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1 :
+ CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B);
+ rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mw->qplib_mw);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev), "Allocate MW failed!");
goto fail;
+ }
+ mw->ib_mw.rkey = mw->qplib_mw.rkey;
- fmr->qplib_fmr.flags = __from_ib_access_flags(mr_access_flags);
- fmr->ib_fmr.lkey = fmr->qplib_fmr.lkey;
- fmr->ib_fmr.rkey = fmr->ib_fmr.lkey;
+ atomic_inc(&rdev->mw_count);
+ return &mw->ib_mw;
- atomic_inc(&rdev->mr_count);
- return &fmr->ib_fmr;
fail:
- kfree(fmr);
+ kfree(mw);
return ERR_PTR(rc);
}
-int bnxt_re_map_phys_fmr(struct ib_fmr *ib_fmr, u64 *page_list, int list_len,
- u64 iova)
+int bnxt_re_dealloc_mw(struct ib_mw *ib_mw)
{
- struct bnxt_re_fmr *fmr = container_of(ib_fmr, struct bnxt_re_fmr,
- ib_fmr);
- struct bnxt_re_dev *rdev = fmr->rdev;
+ struct bnxt_re_mw *mw = container_of(ib_mw, struct bnxt_re_mw, ib_mw);
+ struct bnxt_re_dev *rdev = mw->rdev;
int rc;
- fmr->qplib_fmr.va = iova;
- fmr->qplib_fmr.total_size = list_len * PAGE_SIZE;
-
- rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &fmr->qplib_fmr, page_list,
- list_len, true);
- if (rc)
- dev_err(rdev_to_dev(rdev), "Failed to map FMR for lkey = 0x%x!",
- fmr->ib_fmr.lkey);
- return rc;
-}
-
-int bnxt_re_unmap_fmr(struct list_head *fmr_list)
-{
- struct bnxt_re_dev *rdev;
- struct bnxt_re_fmr *fmr;
- struct ib_fmr *ib_fmr;
- int rc = 0;
-
- /* Validate each FMRs inside the fmr_list */
- list_for_each_entry(ib_fmr, fmr_list, list) {
- fmr = container_of(ib_fmr, struct bnxt_re_fmr, ib_fmr);
- rdev = fmr->rdev;
-
- if (rdev) {
- rc = bnxt_qplib_dereg_mrw(&rdev->qplib_res,
- &fmr->qplib_fmr, true);
- if (rc)
- break;
- }
+ rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mw->qplib_mw);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev), "Free MW failed: %#x\n", rc);
+ return rc;
}
- return rc;
-}
-
-int bnxt_re_dealloc_fmr(struct ib_fmr *ib_fmr)
-{
- struct bnxt_re_fmr *fmr = container_of(ib_fmr, struct bnxt_re_fmr,
- ib_fmr);
- struct bnxt_re_dev *rdev = fmr->rdev;
- int rc;
- rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &fmr->qplib_fmr);
- if (rc)
- dev_err(rdev_to_dev(rdev), "Failed to free FMR");
-
- kfree(fmr);
- atomic_dec(&rdev->mr_count);
+ kfree(mw);
+ atomic_dec(&rdev->mw_count);
return rc;
}
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
index 5c3d71765454..6c160f6a5398 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
@@ -44,11 +44,23 @@ struct bnxt_re_gid_ctx {
u32 refcnt;
};
+#define BNXT_RE_FENCE_BYTES 64
+struct bnxt_re_fence_data {
+ u32 size;
+ u8 va[BNXT_RE_FENCE_BYTES];
+ dma_addr_t dma_addr;
+ struct bnxt_re_mr *mr;
+ struct ib_mw *mw;
+ struct bnxt_qplib_swqe bind_wqe;
+ u32 bind_rkey;
+};
+
struct bnxt_re_pd {
struct bnxt_re_dev *rdev;
struct ib_pd ib_pd;
struct bnxt_qplib_pd qplib_pd;
struct bnxt_qplib_dpi dpi;
+ struct bnxt_re_fence_data fence;
};
struct bnxt_re_ah {
@@ -62,6 +74,7 @@ struct bnxt_re_qp {
struct bnxt_re_dev *rdev;
struct ib_qp ib_qp;
spinlock_t sq_lock; /* protect sq */
+ spinlock_t rq_lock; /* protect rq */
struct bnxt_qplib_qp qplib_qp;
struct ib_umem *sumem;
struct ib_umem *rumem;
@@ -181,12 +194,9 @@ int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg, int sg_nents,
struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type mr_type,
u32 max_num_sg);
int bnxt_re_dereg_mr(struct ib_mr *mr);
-struct ib_fmr *bnxt_re_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
- struct ib_fmr_attr *fmr_attr);
-int bnxt_re_map_phys_fmr(struct ib_fmr *fmr, u64 *page_list, int list_len,
- u64 iova);
-int bnxt_re_unmap_fmr(struct list_head *fmr_list);
-int bnxt_re_dealloc_fmr(struct ib_fmr *fmr);
+struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
+ struct ib_udata *udata);
+int bnxt_re_dealloc_mw(struct ib_mw *mw);
struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int mr_access_flags,
struct ib_udata *udata);
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index 5d355401179b..1fce5e73216b 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -507,10 +507,6 @@ static int bnxt_re_register_ib(struct bnxt_re_dev *rdev)
ibdev->dereg_mr = bnxt_re_dereg_mr;
ibdev->alloc_mr = bnxt_re_alloc_mr;
ibdev->map_mr_sg = bnxt_re_map_mr_sg;
- ibdev->alloc_fmr = bnxt_re_alloc_fmr;
- ibdev->map_phys_fmr = bnxt_re_map_phys_fmr;
- ibdev->unmap_fmr = bnxt_re_unmap_fmr;
- ibdev->dealloc_fmr = bnxt_re_dealloc_fmr;
ibdev->reg_user_mr = bnxt_re_reg_user_mr;
ibdev->alloc_ucontext = bnxt_re_alloc_ucontext;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index 43d08b5e9085..f05500bcdcf1 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -284,7 +284,7 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
{
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
struct cmdq_create_qp1 req;
- struct creq_create_qp1_resp *resp;
+ struct creq_create_qp1_resp resp;
struct bnxt_qplib_pbl *pbl;
struct bnxt_qplib_q *sq = &qp->sq;
struct bnxt_qplib_q *rq = &qp->rq;
@@ -394,31 +394,12 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
req.pd_id = cpu_to_le32(qp->pd->id);
- resp = (struct creq_create_qp1_resp *)
- bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
- NULL, 0);
- if (!resp) {
- dev_err(&res->pdev->dev, "QPLIB: FP: CREATE_QP1 send failed");
- rc = -EINVAL;
- goto fail;
- }
- if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
- /* Cmd timed out */
- dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_QP1 timed out");
- rc = -ETIMEDOUT;
- goto fail;
- }
- if (resp->status ||
- le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
- dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_QP1 failed ");
- dev_err(&rcfw->pdev->dev,
- "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
- resp->status, le16_to_cpu(req.cookie),
- le16_to_cpu(resp->cookie));
- rc = -EINVAL;
+ rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
+ (void *)&resp, NULL, 0);
+ if (rc)
goto fail;
- }
- qp->id = le32_to_cpu(resp->xid);
+
+ qp->id = le32_to_cpu(resp.xid);
qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
sq->flush_in_progress = false;
rq->flush_in_progress = false;
@@ -442,7 +423,7 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
struct sq_send *hw_sq_send_hdr, **hw_sq_send_ptr;
struct cmdq_create_qp req;
- struct creq_create_qp_resp *resp;
+ struct creq_create_qp_resp resp;
struct bnxt_qplib_pbl *pbl;
struct sq_psn_search **psn_search_ptr;
unsigned long int psn_search, poff = 0;
@@ -627,31 +608,12 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
}
req.pd_id = cpu_to_le32(qp->pd->id);
- resp = (struct creq_create_qp_resp *)
- bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
- NULL, 0);
- if (!resp) {
- dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_QP send failed");
- rc = -EINVAL;
- goto fail;
- }
- if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
- /* Cmd timed out */
- dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_QP timed out");
- rc = -ETIMEDOUT;
- goto fail;
- }
- if (resp->status ||
- le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
- dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_QP failed ");
- dev_err(&rcfw->pdev->dev,
- "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
- resp->status, le16_to_cpu(req.cookie),
- le16_to_cpu(resp->cookie));
- rc = -EINVAL;
+ rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
+ (void *)&resp, NULL, 0);
+ if (rc)
goto fail;
- }
- qp->id = le32_to_cpu(resp->xid);
+
+ qp->id = le32_to_cpu(resp.xid);
qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
sq->flush_in_progress = false;
rq->flush_in_progress = false;
@@ -769,10 +731,11 @@ int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
{
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
struct cmdq_modify_qp req;
- struct creq_modify_qp_resp *resp;
+ struct creq_modify_qp_resp resp;
u16 cmd_flags = 0, pkey;
u32 temp32[4];
u32 bmask;
+ int rc;
RCFW_CMD_PREP(req, MODIFY_QP, cmd_flags);
@@ -862,27 +825,10 @@ int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
req.vlan_pcp_vlan_dei_vlan_id = cpu_to_le16(qp->vlan_id);
- resp = (struct creq_modify_qp_resp *)
- bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
- NULL, 0);
- if (!resp) {
- dev_err(&rcfw->pdev->dev, "QPLIB: FP: MODIFY_QP send failed");
- return -EINVAL;
- }
- if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
- /* Cmd timed out */
- dev_err(&rcfw->pdev->dev, "QPLIB: FP: MODIFY_QP timed out");
- return -ETIMEDOUT;
- }
- if (resp->status ||
- le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
- dev_err(&rcfw->pdev->dev, "QPLIB: FP: MODIFY_QP failed ");
- dev_err(&rcfw->pdev->dev,
- "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
- resp->status, le16_to_cpu(req.cookie),
- le16_to_cpu(resp->cookie));
- return -EINVAL;
- }
+ rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
+ (void *)&resp, NULL, 0);
+ if (rc)
+ return rc;
qp->cur_qp_state = qp->state;
return 0;
}
@@ -891,37 +837,26 @@ int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
{
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
struct cmdq_query_qp req;
- struct creq_query_qp_resp *resp;
+ struct creq_query_qp_resp resp;
+ struct bnxt_qplib_rcfw_sbuf *sbuf;
struct creq_query_qp_resp_sb *sb;
u16 cmd_flags = 0;
u32 temp32[4];
- int i;
+ int i, rc = 0;
RCFW_CMD_PREP(req, QUERY_QP, cmd_flags);
+ sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
+ if (!sbuf)
+ return -ENOMEM;
+ sb = sbuf->sb;
+
req.qp_cid = cpu_to_le32(qp->id);
req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS;
- resp = (struct creq_query_qp_resp *)
- bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
- (void **)&sb, 0);
- if (!resp) {
- dev_err(&rcfw->pdev->dev, "QPLIB: FP: QUERY_QP send failed");
- return -EINVAL;
- }
- if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
- /* Cmd timed out */
- dev_err(&rcfw->pdev->dev, "QPLIB: FP: QUERY_QP timed out");
- return -ETIMEDOUT;
- }
- if (resp->status ||
- le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
- dev_err(&rcfw->pdev->dev, "QPLIB: FP: QUERY_QP failed ");
- dev_err(&rcfw->pdev->dev,
- "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
- resp->status, le16_to_cpu(req.cookie),
- le16_to_cpu(resp->cookie));
- return -EINVAL;
- }
+ rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
+ (void *)sbuf, 0);
+ if (rc)
+ goto bail;
/* Extract the context from the side buffer */
qp->state = sb->en_sqd_async_notify_state &
CREQ_QUERY_QP_RESP_SB_STATE_MASK;
@@ -976,7 +911,9 @@ int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
qp->dest_qpn = le32_to_cpu(sb->dest_qp_id);
memcpy(qp->smac, sb->src_mac, 6);
qp->vlan_id = le16_to_cpu(sb->vlan_pcp_vlan_dei_vlan_id);
- return 0;
+bail:
+ bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
+ return rc;
}
static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp)
@@ -1021,34 +958,18 @@ int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
{
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
struct cmdq_destroy_qp req;
- struct creq_destroy_qp_resp *resp;
+ struct creq_destroy_qp_resp resp;
unsigned long flags;
u16 cmd_flags = 0;
+ int rc;
RCFW_CMD_PREP(req, DESTROY_QP, cmd_flags);
req.qp_cid = cpu_to_le32(qp->id);
- resp = (struct creq_destroy_qp_resp *)
- bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
- NULL, 0);
- if (!resp) {
- dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_QP send failed");
- return -EINVAL;
- }
- if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
- /* Cmd timed out */
- dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_QP timed out");
- return -ETIMEDOUT;
- }
- if (resp->status ||
- le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
- dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_QP failed ");
- dev_err(&rcfw->pdev->dev,
- "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
- resp->status, le16_to_cpu(req.cookie),
- le16_to_cpu(resp->cookie));
- return -EINVAL;
- }
+ rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
+ (void *)&resp, NULL, 0);
+ if (rc)
+ return rc;
/* Must walk the associated CQs to nullified the QP ptr */
spin_lock_irqsave(&qp->scq->hwq.lock, flags);
@@ -1162,8 +1083,12 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
rc = -EINVAL;
goto done;
}
- if (HWQ_CMP((sq->hwq.prod + 1), &sq->hwq) ==
- HWQ_CMP(sq->hwq.cons, &sq->hwq)) {
+
+ if (bnxt_qplib_queue_full(sq)) {
+ dev_err(&sq->hwq.pdev->dev,
+ "QPLIB: prod = %#x cons = %#x qdepth = %#x delta = %#x",
+ sq->hwq.prod, sq->hwq.cons, sq->hwq.max_elements,
+ sq->q_full_delta);
rc = -ENOMEM;
goto done;
}
@@ -1373,6 +1298,9 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
}
sq->hwq.prod++;
+
+ qp->wqe_cnt++;
+
done:
return rc;
}
@@ -1411,8 +1339,7 @@ int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
rc = -EINVAL;
goto done;
}
- if (HWQ_CMP((rq->hwq.prod + 1), &rq->hwq) ==
- HWQ_CMP(rq->hwq.cons, &rq->hwq)) {
+ if (bnxt_qplib_queue_full(rq)) {
dev_err(&rq->hwq.pdev->dev,
"QPLIB: FP: QP (0x%x) RQ is full!", qp->id);
rc = -EINVAL;
@@ -1483,7 +1410,7 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
{
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
struct cmdq_create_cq req;
- struct creq_create_cq_resp *resp;
+ struct creq_create_cq_resp resp;
struct bnxt_qplib_pbl *pbl;
u16 cmd_flags = 0;
int rc;
@@ -1525,30 +1452,12 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
(cq->cnq_hw_ring_id & CMDQ_CREATE_CQ_CNQ_ID_MASK) <<
CMDQ_CREATE_CQ_CNQ_ID_SFT);
- resp = (struct creq_create_cq_resp *)
- bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
- NULL, 0);
- if (!resp) {
- dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_CQ send failed");
- return -EINVAL;
- }
- if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
- /* Cmd timed out */
- dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_CQ timed out");
- rc = -ETIMEDOUT;
- goto fail;
- }
- if (resp->status ||
- le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
- dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_CQ failed ");
- dev_err(&rcfw->pdev->dev,
- "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
- resp->status, le16_to_cpu(req.cookie),
- le16_to_cpu(resp->cookie));
- rc = -EINVAL;
+ rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
+ (void *)&resp, NULL, 0);
+ if (rc)
goto fail;
- }
- cq->id = le32_to_cpu(resp->xid);
+
+ cq->id = le32_to_cpu(resp.xid);
cq->dbr_base = res->dpi_tbl.dbr_bar_reg_iomem;
cq->period = BNXT_QPLIB_QUEUE_START_PERIOD;
init_waitqueue_head(&cq->waitq);
@@ -1566,33 +1475,17 @@ int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
{
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
struct cmdq_destroy_cq req;
- struct creq_destroy_cq_resp *resp;
+ struct creq_destroy_cq_resp resp;
u16 cmd_flags = 0;
+ int rc;
RCFW_CMD_PREP(req, DESTROY_CQ, cmd_flags);
req.cq_cid = cpu_to_le32(cq->id);
- resp = (struct creq_destroy_cq_resp *)
- bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
- NULL, 0);
- if (!resp) {
- dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_CQ send failed");
- return -EINVAL;
- }
- if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
- /* Cmd timed out */
- dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_CQ timed out");
- return -ETIMEDOUT;
- }
- if (resp->status ||
- le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
- dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_CQ failed ");
- dev_err(&rcfw->pdev->dev,
- "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
- resp->status, le16_to_cpu(req.cookie),
- le16_to_cpu(resp->cookie));
- return -EINVAL;
- }
+ rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
+ (void *)&resp, NULL, 0);
+ if (rc)
+ return rc;
bnxt_qplib_free_hwq(res->pdev, &cq->hwq);
return 0;
}
@@ -1664,14 +1557,113 @@ static int __flush_rq(struct bnxt_qplib_q *rq, struct bnxt_qplib_qp *qp,
return rc;
}
+/* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive)
+ * CQE is track from sw_cq_cons to max_element but valid only if VALID=1
+ */
+static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq,
+ u32 cq_cons, u32 sw_sq_cons, u32 cqe_sq_cons)
+{
+ struct bnxt_qplib_q *sq = &qp->sq;
+ struct bnxt_qplib_swq *swq;
+ u32 peek_sw_cq_cons, peek_raw_cq_cons, peek_sq_cons_idx;
+ struct cq_base *peek_hwcqe, **peek_hw_cqe_ptr;
+ struct cq_req *peek_req_hwcqe;
+ struct bnxt_qplib_qp *peek_qp;
+ struct bnxt_qplib_q *peek_sq;
+ int i, rc = 0;
+
+ /* Normal mode */
+ /* Check for the psn_search marking before completing */
+ swq = &sq->swq[sw_sq_cons];
+ if (swq->psn_search &&
+ le32_to_cpu(swq->psn_search->flags_next_psn) & 0x80000000) {
+ /* Unmark */
+ swq->psn_search->flags_next_psn = cpu_to_le32
+ (le32_to_cpu(swq->psn_search->flags_next_psn)
+ & ~0x80000000);
+ dev_dbg(&cq->hwq.pdev->dev,
+ "FP: Process Req cq_cons=0x%x qp=0x%x sq cons sw=0x%x cqe=0x%x marked!\n",
+ cq_cons, qp->id, sw_sq_cons, cqe_sq_cons);
+ sq->condition = true;
+ sq->send_phantom = true;
+
+ /* TODO: Only ARM if the previous SQE is ARMALL */
+ bnxt_qplib_arm_cq(cq, DBR_DBR_TYPE_CQ_ARMALL);
+
+ rc = -EAGAIN;
+ goto out;
+ }
+ if (sq->condition) {
+ /* Peek at the completions */
+ peek_raw_cq_cons = cq->hwq.cons;
+ peek_sw_cq_cons = cq_cons;
+ i = cq->hwq.max_elements;
+ while (i--) {
+ peek_sw_cq_cons = HWQ_CMP((peek_sw_cq_cons), &cq->hwq);
+ peek_hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr;
+ peek_hwcqe = &peek_hw_cqe_ptr[CQE_PG(peek_sw_cq_cons)]
+ [CQE_IDX(peek_sw_cq_cons)];
+ /* If the next hwcqe is VALID */
+ if (CQE_CMP_VALID(peek_hwcqe, peek_raw_cq_cons,
+ cq->hwq.max_elements)) {
+ /* If the next hwcqe is a REQ */
+ if ((peek_hwcqe->cqe_type_toggle &
+ CQ_BASE_CQE_TYPE_MASK) ==
+ CQ_BASE_CQE_TYPE_REQ) {
+ peek_req_hwcqe = (struct cq_req *)
+ peek_hwcqe;
+ peek_qp = (struct bnxt_qplib_qp *)
+ ((unsigned long)
+ le64_to_cpu
+ (peek_req_hwcqe->qp_handle));
+ peek_sq = &peek_qp->sq;
+ peek_sq_cons_idx = HWQ_CMP(le16_to_cpu(
+ peek_req_hwcqe->sq_cons_idx) - 1
+ , &sq->hwq);
+ /* If the hwcqe's sq's wr_id matches */
+ if (peek_sq == sq &&
+ sq->swq[peek_sq_cons_idx].wr_id ==
+ BNXT_QPLIB_FENCE_WRID) {
+ /*
+ * Unbreak only if the phantom
+ * comes back
+ */
+ dev_dbg(&cq->hwq.pdev->dev,
+ "FP:Got Phantom CQE");
+ sq->condition = false;
+ sq->single = true;
+ rc = 0;
+ goto out;
+ }
+ }
+ /* Valid but not the phantom, so keep looping */
+ } else {
+ /* Not valid yet, just exit and wait */
+ rc = -EINVAL;
+ goto out;
+ }
+ peek_sw_cq_cons++;
+ peek_raw_cq_cons++;
+ }
+ dev_err(&cq->hwq.pdev->dev,
+ "Should not have come here! cq_cons=0x%x qp=0x%x sq cons sw=0x%x hw=0x%x",
+ cq_cons, qp->id, sw_sq_cons, cqe_sq_cons);
+ rc = -EINVAL;
+ }
+out:
+ return rc;
+}
+
static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
struct cq_req *hwcqe,
- struct bnxt_qplib_cqe **pcqe, int *budget)
+ struct bnxt_qplib_cqe **pcqe, int *budget,
+ u32 cq_cons, struct bnxt_qplib_qp **lib_qp)
{
struct bnxt_qplib_qp *qp;
struct bnxt_qplib_q *sq;
struct bnxt_qplib_cqe *cqe;
- u32 sw_cons, cqe_cons;
+ u32 sw_sq_cons, cqe_sq_cons;
+ struct bnxt_qplib_swq *swq;
int rc = 0;
qp = (struct bnxt_qplib_qp *)((unsigned long)
@@ -1683,13 +1675,13 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
}
sq = &qp->sq;
- cqe_cons = HWQ_CMP(le16_to_cpu(hwcqe->sq_cons_idx), &sq->hwq);
- if (cqe_cons > sq->hwq.max_elements) {
+ cqe_sq_cons = HWQ_CMP(le16_to_cpu(hwcqe->sq_cons_idx), &sq->hwq);
+ if (cqe_sq_cons > sq->hwq.max_elements) {
dev_err(&cq->hwq.pdev->dev,
"QPLIB: FP: CQ Process req reported ");
dev_err(&cq->hwq.pdev->dev,
"QPLIB: sq_cons_idx 0x%x which exceeded max 0x%x",
- cqe_cons, sq->hwq.max_elements);
+ cqe_sq_cons, sq->hwq.max_elements);
return -EINVAL;
}
/* If we were in the middle of flushing the SQ, continue */
@@ -1698,53 +1690,74 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
/* Require to walk the sq's swq to fabricate CQEs for all previously
* signaled SWQEs due to CQE aggregation from the current sq cons
- * to the cqe_cons
+ * to the cqe_sq_cons
*/
cqe = *pcqe;
while (*budget) {
- sw_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq);
- if (sw_cons == cqe_cons)
+ sw_sq_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq);
+ if (sw_sq_cons == cqe_sq_cons)
+ /* Done */
break;
+
+ swq = &sq->swq[sw_sq_cons];
memset(cqe, 0, sizeof(*cqe));
cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
cqe->qp_handle = (u64)(unsigned long)qp;
cqe->src_qp = qp->id;
- cqe->wr_id = sq->swq[sw_cons].wr_id;
- cqe->type = sq->swq[sw_cons].type;
+ cqe->wr_id = swq->wr_id;
+ if (cqe->wr_id == BNXT_QPLIB_FENCE_WRID)
+ goto skip;
+ cqe->type = swq->type;
/* For the last CQE, check for status. For errors, regardless
* of the request being signaled or not, it must complete with
* the hwcqe error status
*/
- if (HWQ_CMP((sw_cons + 1), &sq->hwq) == cqe_cons &&
+ if (HWQ_CMP((sw_sq_cons + 1), &sq->hwq) == cqe_sq_cons &&
hwcqe->status != CQ_REQ_STATUS_OK) {
cqe->status = hwcqe->status;
dev_err(&cq->hwq.pdev->dev,
"QPLIB: FP: CQ Processed Req ");
dev_err(&cq->hwq.pdev->dev,
"QPLIB: wr_id[%d] = 0x%llx with status 0x%x",
- sw_cons, cqe->wr_id, cqe->status);
+ sw_sq_cons, cqe->wr_id, cqe->status);
cqe++;
(*budget)--;
sq->flush_in_progress = true;
/* Must block new posting of SQ and RQ */
qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
+ sq->condition = false;
+ sq->single = false;
} else {
- if (sq->swq[sw_cons].flags &
- SQ_SEND_FLAGS_SIGNAL_COMP) {
+ if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
+ /* Before we complete, do WA 9060 */
+ if (do_wa9060(qp, cq, cq_cons, sw_sq_cons,
+ cqe_sq_cons)) {
+ *lib_qp = qp;
+ goto out;
+ }
cqe->status = CQ_REQ_STATUS_OK;
cqe++;
(*budget)--;
}
}
+skip:
sq->hwq.cons++;
+ if (sq->single)
+ break;
}
+out:
*pcqe = cqe;
- if (!*budget && HWQ_CMP(sq->hwq.cons, &sq->hwq) != cqe_cons) {
+ if (HWQ_CMP(sq->hwq.cons, &sq->hwq) != cqe_sq_cons) {
/* Out of budget */
rc = -EAGAIN;
goto done;
}
+ /*
+ * Back to normal completion mode only after it has completed all of
+ * the WC for this CQE
+ */
+ sq->single = false;
if (!sq->flush_in_progress)
goto done;
flush:
@@ -2074,7 +2087,7 @@ static int bnxt_qplib_cq_process_cutoff(struct bnxt_qplib_cq *cq,
}
int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
- int num_cqes)
+ int num_cqes, struct bnxt_qplib_qp **lib_qp)
{
struct cq_base *hw_cqe, **hw_cqe_ptr;
unsigned long flags;
@@ -2099,7 +2112,8 @@ int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
case CQ_BASE_CQE_TYPE_REQ:
rc = bnxt_qplib_cq_process_req(cq,
(struct cq_req *)hw_cqe,
- &cqe, &budget);
+ &cqe, &budget,
+ sw_cons, lib_qp);
break;
case CQ_BASE_CQE_TYPE_RES_RC:
rc = bnxt_qplib_cq_process_res_rc(cq,
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
index f0150f8da1e3..36b7b7db0e3f 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
@@ -88,6 +88,7 @@ struct bnxt_qplib_swq {
struct bnxt_qplib_swqe {
/* General */
+#define BNXT_QPLIB_FENCE_WRID 0x46454E43 /* "FENC" */
u64 wr_id;
u8 reqs_type;
u8 type;
@@ -216,9 +217,16 @@ struct bnxt_qplib_q {
struct scatterlist *sglist;
u32 nmap;
u32 max_wqe;
+ u16 q_full_delta;
u16 max_sge;
u32 psn;
bool flush_in_progress;
+ bool condition;
+ bool single;
+ bool send_phantom;
+ u32 phantom_wqe_cnt;
+ u32 phantom_cqe_cnt;
+ u32 next_cq_cons;
};
struct bnxt_qplib_qp {
@@ -242,6 +250,7 @@ struct bnxt_qplib_qp {
u8 timeout;
u8 retry_cnt;
u8 rnr_retry;
+ u64 wqe_cnt;
u32 min_rnr_timer;
u32 max_rd_atomic;
u32 max_dest_rd_atomic;
@@ -301,6 +310,13 @@ struct bnxt_qplib_qp {
(!!((hdr)->cqe_type_toggle & CQ_BASE_TOGGLE) == \
!((raw_cons) & (cp_bit)))
+static inline bool bnxt_qplib_queue_full(struct bnxt_qplib_q *qplib_q)
+{
+ return HWQ_CMP((qplib_q->hwq.prod + qplib_q->q_full_delta),
+ &qplib_q->hwq) == HWQ_CMP(qplib_q->hwq.cons,
+ &qplib_q->hwq);
+}
+
struct bnxt_qplib_cqe {
u8 status;
u8 type;
@@ -432,7 +448,7 @@ int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq);
int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq);
int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
- int num);
+ int num, struct bnxt_qplib_qp **qp);
void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type);
void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq);
int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
index 23fb7260662b..16e42754dbec 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
@@ -39,72 +39,55 @@
#include <linux/spinlock.h>
#include <linux/pci.h>
#include <linux/prefetch.h>
+#include <linux/delay.h>
+
#include "roce_hsi.h"
#include "qplib_res.h"
#include "qplib_rcfw.h"
static void bnxt_qplib_service_creq(unsigned long data);
/* Hardware communication channel */
-int bnxt_qplib_rcfw_wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
+static int __wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
{
u16 cbit;
int rc;
- cookie &= RCFW_MAX_COOKIE_VALUE;
cbit = cookie % RCFW_MAX_OUTSTANDING_CMD;
- if (!test_bit(cbit, rcfw->cmdq_bitmap))
- dev_warn(&rcfw->pdev->dev,
- "QPLIB: CMD bit %d for cookie 0x%x is not set?",
- cbit, cookie);
-
rc = wait_event_timeout(rcfw->waitq,
!test_bit(cbit, rcfw->cmdq_bitmap),
msecs_to_jiffies(RCFW_CMD_WAIT_TIME_MS));
- if (!rc) {
- dev_warn(&rcfw->pdev->dev,
- "QPLIB: Bono Error: timeout %d msec, msg {0x%x}\n",
- RCFW_CMD_WAIT_TIME_MS, cookie);
- }
-
- return rc;
+ return rc ? 0 : -ETIMEDOUT;
};
-int bnxt_qplib_rcfw_block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
+static int __block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
{
- u32 count = -1;
+ u32 count = RCFW_BLOCKED_CMD_WAIT_COUNT;
u16 cbit;
- cookie &= RCFW_MAX_COOKIE_VALUE;
cbit = cookie % RCFW_MAX_OUTSTANDING_CMD;
if (!test_bit(cbit, rcfw->cmdq_bitmap))
goto done;
do {
+ mdelay(1); /* 1m sec */
bnxt_qplib_service_creq((unsigned long)rcfw);
} while (test_bit(cbit, rcfw->cmdq_bitmap) && --count);
done:
- return count;
+ return count ? 0 : -ETIMEDOUT;
};
-void *bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
- struct cmdq_base *req, void **crsbe,
- u8 is_block)
+static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
+ struct creq_base *resp, void *sb, u8 is_block)
{
- struct bnxt_qplib_crsq *crsq = &rcfw->crsq;
struct bnxt_qplib_cmdqe *cmdqe, **cmdq_ptr;
struct bnxt_qplib_hwq *cmdq = &rcfw->cmdq;
- struct bnxt_qplib_hwq *crsb = &rcfw->crsb;
- struct bnxt_qplib_crsqe *crsqe = NULL;
- struct bnxt_qplib_crsbe **crsb_ptr;
+ struct bnxt_qplib_crsq *crsqe;
u32 sw_prod, cmdq_prod;
- u8 retry_cnt = 0xFF;
- dma_addr_t dma_addr;
unsigned long flags;
u32 size, opcode;
u16 cookie, cbit;
int pg, idx;
u8 *preq;
-retry:
opcode = req->opcode;
if (!test_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags) &&
(opcode != CMDQ_BASE_OPCODE_QUERY_FUNC &&
@@ -112,63 +95,50 @@ retry:
dev_err(&rcfw->pdev->dev,
"QPLIB: RCFW not initialized, reject opcode 0x%x",
opcode);
- return NULL;
+ return -EINVAL;
}
if (test_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags) &&
opcode == CMDQ_BASE_OPCODE_INITIALIZE_FW) {
dev_err(&rcfw->pdev->dev, "QPLIB: RCFW already initialized!");
- return NULL;
+ return -EINVAL;
}
/* Cmdq are in 16-byte units, each request can consume 1 or more
* cmdqe
*/
spin_lock_irqsave(&cmdq->lock, flags);
- if (req->cmd_size > cmdq->max_elements -
- ((HWQ_CMP(cmdq->prod, cmdq) - HWQ_CMP(cmdq->cons, cmdq)) &
- (cmdq->max_elements - 1))) {
+ if (req->cmd_size >= HWQ_FREE_SLOTS(cmdq)) {
dev_err(&rcfw->pdev->dev, "QPLIB: RCFW: CMDQ is full!");
spin_unlock_irqrestore(&cmdq->lock, flags);
-
- if (!retry_cnt--)
- return NULL;
- goto retry;
+ return -EAGAIN;
}
- retry_cnt = 0xFF;
- cookie = atomic_inc_return(&rcfw->seq_num) & RCFW_MAX_COOKIE_VALUE;
+ cookie = rcfw->seq_num & RCFW_MAX_COOKIE_VALUE;
cbit = cookie % RCFW_MAX_OUTSTANDING_CMD;
if (is_block)
cookie |= RCFW_CMD_IS_BLOCKING;
+
+ set_bit(cbit, rcfw->cmdq_bitmap);
req->cookie = cpu_to_le16(cookie);
- if (test_and_set_bit(cbit, rcfw->cmdq_bitmap)) {
- dev_err(&rcfw->pdev->dev,
- "QPLIB: RCFW MAX outstanding cmd reached!");
- atomic_dec(&rcfw->seq_num);
+ crsqe = &rcfw->crsqe_tbl[cbit];
+ if (crsqe->resp) {
spin_unlock_irqrestore(&cmdq->lock, flags);
-
- if (!retry_cnt--)
- return NULL;
- goto retry;
+ return -EBUSY;
}
- /* Reserve a resp buffer slot if requested */
- if (req->resp_size && crsbe) {
- spin_lock(&crsb->lock);
- sw_prod = HWQ_CMP(crsb->prod, crsb);
- crsb_ptr = (struct bnxt_qplib_crsbe **)crsb->pbl_ptr;
- *crsbe = (void *)&crsb_ptr[get_crsb_pg(sw_prod)]
- [get_crsb_idx(sw_prod)];
- bnxt_qplib_crsb_dma_next(crsb->pbl_dma_ptr, sw_prod, &dma_addr);
- req->resp_addr = cpu_to_le64(dma_addr);
- crsb->prod++;
- spin_unlock(&crsb->lock);
-
- req->resp_size = (sizeof(struct bnxt_qplib_crsbe) +
- BNXT_QPLIB_CMDQE_UNITS - 1) /
- BNXT_QPLIB_CMDQE_UNITS;
+ memset(resp, 0, sizeof(*resp));
+ crsqe->resp = (struct creq_qp_event *)resp;
+ crsqe->resp->cookie = req->cookie;
+ crsqe->req_size = req->cmd_size;
+ if (req->resp_size && sb) {
+ struct bnxt_qplib_rcfw_sbuf *sbuf = sb;
+
+ req->resp_addr = cpu_to_le64(sbuf->dma_addr);
+ req->resp_size = (sbuf->size + BNXT_QPLIB_CMDQE_UNITS - 1) /
+ BNXT_QPLIB_CMDQE_UNITS;
}
+
cmdq_ptr = (struct bnxt_qplib_cmdqe **)cmdq->pbl_ptr;
preq = (u8 *)req;
size = req->cmd_size * BNXT_QPLIB_CMDQE_UNITS;
@@ -190,23 +160,24 @@ retry:
preq += min_t(u32, size, sizeof(*cmdqe));
size -= min_t(u32, size, sizeof(*cmdqe));
cmdq->prod++;
+ rcfw->seq_num++;
} while (size > 0);
+ rcfw->seq_num++;
+
cmdq_prod = cmdq->prod;
if (rcfw->flags & FIRMWARE_FIRST_FLAG) {
- /* The very first doorbell write is required to set this flag
- * which prompts the FW to reset its internal pointers
+ /* The very first doorbell write
+ * is required to set this flag
+ * which prompts the FW to reset
+ * its internal pointers
*/
cmdq_prod |= FIRMWARE_FIRST_FLAG;
rcfw->flags &= ~FIRMWARE_FIRST_FLAG;
}
- sw_prod = HWQ_CMP(crsq->prod, crsq);
- crsqe = &crsq->crsq[sw_prod];
- memset(crsqe, 0, sizeof(*crsqe));
- crsq->prod++;
- crsqe->req_size = req->cmd_size;
/* ring CMDQ DB */
+ wmb();
writel(cmdq_prod, rcfw->cmdq_bar_reg_iomem +
rcfw->cmdq_bar_reg_prod_off);
writel(RCFW_CMDQ_TRIG_VAL, rcfw->cmdq_bar_reg_iomem +
@@ -214,9 +185,56 @@ retry:
done:
spin_unlock_irqrestore(&cmdq->lock, flags);
/* Return the CREQ response pointer */
- return crsqe ? &crsqe->qp_event : NULL;
+ return 0;
}
+int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
+ struct cmdq_base *req,
+ struct creq_base *resp,
+ void *sb, u8 is_block)
+{
+ struct creq_qp_event *evnt = (struct creq_qp_event *)resp;
+ u16 cookie;
+ u8 opcode, retry_cnt = 0xFF;
+ int rc = 0;
+
+ do {
+ opcode = req->opcode;
+ rc = __send_message(rcfw, req, resp, sb, is_block);
+ cookie = le16_to_cpu(req->cookie) & RCFW_MAX_COOKIE_VALUE;
+ if (!rc)
+ break;
+
+ if (!retry_cnt || (rc != -EAGAIN && rc != -EBUSY)) {
+ /* send failed */
+ dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x send failed",
+ cookie, opcode);
+ return rc;
+ }
+ is_block ? mdelay(1) : usleep_range(500, 1000);
+
+ } while (retry_cnt--);
+
+ if (is_block)
+ rc = __block_for_resp(rcfw, cookie);
+ else
+ rc = __wait_for_resp(rcfw, cookie);
+ if (rc) {
+ /* timed out */
+ dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x timedout (%d)msec",
+ cookie, opcode, RCFW_CMD_WAIT_TIME_MS);
+ return rc;
+ }
+
+ if (evnt->status) {
+ /* failed with status */
+ dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x status %#x",
+ cookie, opcode, evnt->status);
+ rc = -EFAULT;
+ }
+
+ return rc;
+}
/* Completions */
static int bnxt_qplib_process_func_event(struct bnxt_qplib_rcfw *rcfw,
struct creq_func_event *func_event)
@@ -260,12 +278,12 @@ static int bnxt_qplib_process_func_event(struct bnxt_qplib_rcfw *rcfw,
static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
struct creq_qp_event *qp_event)
{
- struct bnxt_qplib_crsq *crsq = &rcfw->crsq;
struct bnxt_qplib_hwq *cmdq = &rcfw->cmdq;
- struct bnxt_qplib_crsqe *crsqe;
- u16 cbit, cookie, blocked = 0;
+ struct bnxt_qplib_crsq *crsqe;
unsigned long flags;
- u32 sw_cons;
+ u16 cbit, blocked = 0;
+ u16 cookie;
+ __le16 mcookie;
switch (qp_event->event) {
case CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION:
@@ -275,24 +293,31 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
default:
/* Command Response */
spin_lock_irqsave(&cmdq->lock, flags);
- sw_cons = HWQ_CMP(crsq->cons, crsq);
- crsqe = &crsq->crsq[sw_cons];
- crsq->cons++;
- memcpy(&crsqe->qp_event, qp_event, sizeof(crsqe->qp_event));
-
- cookie = le16_to_cpu(crsqe->qp_event.cookie);
+ cookie = le16_to_cpu(qp_event->cookie);
+ mcookie = qp_event->cookie;
blocked = cookie & RCFW_CMD_IS_BLOCKING;
cookie &= RCFW_MAX_COOKIE_VALUE;
cbit = cookie % RCFW_MAX_OUTSTANDING_CMD;
+ crsqe = &rcfw->crsqe_tbl[cbit];
+ if (crsqe->resp &&
+ crsqe->resp->cookie == mcookie) {
+ memcpy(crsqe->resp, qp_event, sizeof(*qp_event));
+ crsqe->resp = NULL;
+ } else {
+ dev_err(&rcfw->pdev->dev,
+ "QPLIB: CMD %s resp->cookie = %#x, evnt->cookie = %#x",
+ crsqe->resp ? "mismatch" : "collision",
+ crsqe->resp ? crsqe->resp->cookie : 0, mcookie);
+ }
if (!test_and_clear_bit(cbit, rcfw->cmdq_bitmap))
dev_warn(&rcfw->pdev->dev,
"QPLIB: CMD bit %d was not requested", cbit);
-
cmdq->cons += crsqe->req_size;
- spin_unlock_irqrestore(&cmdq->lock, flags);
+ crsqe->req_size = 0;
+
if (!blocked)
wake_up(&rcfw->waitq);
- break;
+ spin_unlock_irqrestore(&cmdq->lock, flags);
}
return 0;
}
@@ -305,12 +330,12 @@ static void bnxt_qplib_service_creq(unsigned long data)
struct creq_base *creqe, **creq_ptr;
u32 sw_cons, raw_cons;
unsigned long flags;
- u32 type;
+ u32 type, budget = CREQ_ENTRY_POLL_BUDGET;
- /* Service the CREQ until empty */
+ /* Service the CREQ until budget is over */
spin_lock_irqsave(&creq->lock, flags);
raw_cons = creq->cons;
- while (1) {
+ while (budget > 0) {
sw_cons = HWQ_CMP(raw_cons, creq);
creq_ptr = (struct creq_base **)creq->pbl_ptr;
creqe = &creq_ptr[get_creq_pg(sw_cons)][get_creq_idx(sw_cons)];
@@ -320,15 +345,9 @@ static void bnxt_qplib_service_creq(unsigned long data)
type = creqe->type & CREQ_BASE_TYPE_MASK;
switch (type) {
case CREQ_BASE_TYPE_QP_EVENT:
- if (!bnxt_qplib_process_qp_event
- (rcfw, (struct creq_qp_event *)creqe))
- rcfw->creq_qp_event_processed++;
- else {
- dev_warn(&rcfw->pdev->dev, "QPLIB: crsqe with");
- dev_warn(&rcfw->pdev->dev,
- "QPLIB: type = 0x%x not handled",
- type);
- }
+ bnxt_qplib_process_qp_event
+ (rcfw, (struct creq_qp_event *)creqe);
+ rcfw->creq_qp_event_processed++;
break;
case CREQ_BASE_TYPE_FUNC_EVENT:
if (!bnxt_qplib_process_func_event
@@ -346,7 +365,9 @@ static void bnxt_qplib_service_creq(unsigned long data)
break;
}
raw_cons++;
+ budget--;
}
+
if (creq->cons != raw_cons) {
creq->cons = raw_cons;
CREQ_DB_REARM(rcfw->creq_bar_reg_iomem, raw_cons,
@@ -375,23 +396,16 @@ static irqreturn_t bnxt_qplib_creq_irq(int irq, void *dev_instance)
/* RCFW */
int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw)
{
- struct creq_deinitialize_fw_resp *resp;
struct cmdq_deinitialize_fw req;
+ struct creq_deinitialize_fw_resp resp;
u16 cmd_flags = 0;
+ int rc;
RCFW_CMD_PREP(req, DEINITIALIZE_FW, cmd_flags);
- resp = (struct creq_deinitialize_fw_resp *)
- bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
- NULL, 0);
- if (!resp)
- return -EINVAL;
-
- if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie)))
- return -ETIMEDOUT;
-
- if (resp->status ||
- le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie))
- return -EFAULT;
+ rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
+ NULL, 0);
+ if (rc)
+ return rc;
clear_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags);
return 0;
@@ -417,9 +431,10 @@ static int __get_pbl_pg_idx(struct bnxt_qplib_pbl *pbl)
int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
struct bnxt_qplib_ctx *ctx, int is_virtfn)
{
- struct creq_initialize_fw_resp *resp;
struct cmdq_initialize_fw req;
+ struct creq_initialize_fw_resp resp;
u16 cmd_flags = 0, level;
+ int rc;
RCFW_CMD_PREP(req, INITIALIZE_FW, cmd_flags);
@@ -482,37 +497,19 @@ int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
skip_ctx_setup:
req.stat_ctx_id = cpu_to_le32(ctx->stats.fw_id);
- resp = (struct creq_initialize_fw_resp *)
- bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
- NULL, 0);
- if (!resp) {
- dev_err(&rcfw->pdev->dev,
- "QPLIB: RCFW: INITIALIZE_FW send failed");
- return -EINVAL;
- }
- if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
- /* Cmd timed out */
- dev_err(&rcfw->pdev->dev,
- "QPLIB: RCFW: INITIALIZE_FW timed out");
- return -ETIMEDOUT;
- }
- if (resp->status ||
- le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
- dev_err(&rcfw->pdev->dev,
- "QPLIB: RCFW: INITIALIZE_FW failed");
- return -EINVAL;
- }
+ rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
+ NULL, 0);
+ if (rc)
+ return rc;
set_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags);
return 0;
}
void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
{
- bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->crsb);
- kfree(rcfw->crsq.crsq);
+ kfree(rcfw->crsqe_tbl);
bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->cmdq);
bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->creq);
-
rcfw->pdev = NULL;
}
@@ -539,21 +536,11 @@ int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev,
goto fail;
}
- rcfw->crsq.max_elements = rcfw->cmdq.max_elements;
- rcfw->crsq.crsq = kcalloc(rcfw->crsq.max_elements,
- sizeof(*rcfw->crsq.crsq), GFP_KERNEL);
- if (!rcfw->crsq.crsq)
+ rcfw->crsqe_tbl = kcalloc(rcfw->cmdq.max_elements,
+ sizeof(*rcfw->crsqe_tbl), GFP_KERNEL);
+ if (!rcfw->crsqe_tbl)
goto fail;
- rcfw->crsb.max_elements = BNXT_QPLIB_CRSBE_MAX_CNT;
- if (bnxt_qplib_alloc_init_hwq(rcfw->pdev, &rcfw->crsb, NULL, 0,
- &rcfw->crsb.max_elements,
- BNXT_QPLIB_CRSBE_UNITS, 0, PAGE_SIZE,
- HWQ_TYPE_CTX)) {
- dev_err(&rcfw->pdev->dev,
- "QPLIB: HW channel CRSB allocation failed");
- goto fail;
- }
return 0;
fail:
@@ -606,7 +593,7 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
int rc;
/* General */
- atomic_set(&rcfw->seq_num, 0);
+ rcfw->seq_num = 0;
rcfw->flags = FIRMWARE_FIRST_FLAG;
bmap_size = BITS_TO_LONGS(RCFW_MAX_OUTSTANDING_CMD *
sizeof(unsigned long));
@@ -636,10 +623,6 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
rcfw->cmdq_bar_reg_trig_off = RCFW_COMM_TRIG_OFFSET;
- /* CRSQ */
- rcfw->crsq.prod = 0;
- rcfw->crsq.cons = 0;
-
/* CREQ */
rcfw->creq_bar_reg = RCFW_COMM_CONS_PCI_BAR_REGION;
res_base = pci_resource_start(pdev, rcfw->creq_bar_reg);
@@ -692,3 +675,34 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
__iowrite32_copy(rcfw->cmdq_bar_reg_iomem, &init, sizeof(init) / 4);
return 0;
}
+
+struct bnxt_qplib_rcfw_sbuf *bnxt_qplib_rcfw_alloc_sbuf(
+ struct bnxt_qplib_rcfw *rcfw,
+ u32 size)
+{
+ struct bnxt_qplib_rcfw_sbuf *sbuf;
+
+ sbuf = kzalloc(sizeof(*sbuf), GFP_ATOMIC);
+ if (!sbuf)
+ return NULL;
+
+ sbuf->size = size;
+ sbuf->sb = dma_zalloc_coherent(&rcfw->pdev->dev, sbuf->size,
+ &sbuf->dma_addr, GFP_ATOMIC);
+ if (!sbuf->sb)
+ goto bail;
+
+ return sbuf;
+bail:
+ kfree(sbuf);
+ return NULL;
+}
+
+void bnxt_qplib_rcfw_free_sbuf(struct bnxt_qplib_rcfw *rcfw,
+ struct bnxt_qplib_rcfw_sbuf *sbuf)
+{
+ if (sbuf->sb)
+ dma_free_coherent(&rcfw->pdev->dev, sbuf->size,
+ sbuf->sb, sbuf->dma_addr);
+ kfree(sbuf);
+}
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
index d3567d75bf58..09ce121770cd 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
@@ -73,6 +73,7 @@
#define RCFW_MAX_OUTSTANDING_CMD BNXT_QPLIB_CMDQE_MAX_CNT
#define RCFW_MAX_COOKIE_VALUE 0x7FFF
#define RCFW_CMD_IS_BLOCKING 0x8000
+#define RCFW_BLOCKED_CMD_WAIT_COUNT 0x4E20
/* Cmdq contains a fix number of a 16-Byte slots */
struct bnxt_qplib_cmdqe {
@@ -94,32 +95,6 @@ struct bnxt_qplib_crsbe {
u8 data[1024];
};
-/* CRSQ SB */
-#define BNXT_QPLIB_CRSBE_MAX_CNT 4
-#define BNXT_QPLIB_CRSBE_UNITS sizeof(struct bnxt_qplib_crsbe)
-#define BNXT_QPLIB_CRSBE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_CRSBE_UNITS)
-
-#define MAX_CRSB_IDX (BNXT_QPLIB_CRSBE_MAX_CNT - 1)
-#define MAX_CRSB_IDX_PER_PG (BNXT_QPLIB_CRSBE_CNT_PER_PG - 1)
-
-static inline u32 get_crsb_pg(u32 val)
-{
- return (val & ~MAX_CRSB_IDX_PER_PG) / BNXT_QPLIB_CRSBE_CNT_PER_PG;
-}
-
-static inline u32 get_crsb_idx(u32 val)
-{
- return val & MAX_CRSB_IDX_PER_PG;
-}
-
-static inline void bnxt_qplib_crsb_dma_next(dma_addr_t *pg_map_arr,
- u32 prod, dma_addr_t *dma_addr)
-{
- *dma_addr = pg_map_arr[(prod) / BNXT_QPLIB_CRSBE_CNT_PER_PG];
- *dma_addr += ((prod) % BNXT_QPLIB_CRSBE_CNT_PER_PG) *
- BNXT_QPLIB_CRSBE_UNITS;
-}
-
/* CREQ */
/* Allocate 1 per QP for async error notification for now */
#define BNXT_QPLIB_CREQE_MAX_CNT (64 * 1024)
@@ -158,17 +133,19 @@ static inline u32 get_creq_idx(u32 val)
#define CREQ_DB(db, raw_cons, cp_bit) \
writel(CREQ_DB_CP_FLAGS | ((raw_cons) & ((cp_bit) - 1)), db)
+#define CREQ_ENTRY_POLL_BUDGET 0x100
+
/* HWQ */
-struct bnxt_qplib_crsqe {
- struct creq_qp_event qp_event;
+
+struct bnxt_qplib_crsq {
+ struct creq_qp_event *resp;
u32 req_size;
};
-struct bnxt_qplib_crsq {
- struct bnxt_qplib_crsqe *crsq;
- u32 prod;
- u32 cons;
- u32 max_elements;
+struct bnxt_qplib_rcfw_sbuf {
+ void *sb;
+ dma_addr_t dma_addr;
+ u32 size;
};
/* RCFW Communication Channels */
@@ -185,7 +162,7 @@ struct bnxt_qplib_rcfw {
wait_queue_head_t waitq;
int (*aeq_handler)(struct bnxt_qplib_rcfw *,
struct creq_func_event *);
- atomic_t seq_num;
+ u32 seq_num;
/* Bar region info */
void __iomem *cmdq_bar_reg_iomem;
@@ -203,8 +180,7 @@ struct bnxt_qplib_rcfw {
/* Actual Cmd and Resp Queues */
struct bnxt_qplib_hwq cmdq;
- struct bnxt_qplib_crsq crsq;
- struct bnxt_qplib_hwq crsb;
+ struct bnxt_qplib_crsq *crsqe_tbl;
};
void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw);
@@ -219,11 +195,14 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
(struct bnxt_qplib_rcfw *,
struct creq_func_event *));
-int bnxt_qplib_rcfw_block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie);
-int bnxt_qplib_rcfw_wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie);
-void *bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
- struct cmdq_base *req, void **crsbe,
- u8 is_block);
+struct bnxt_qplib_rcfw_sbuf *bnxt_qplib_rcfw_alloc_sbuf(
+ struct bnxt_qplib_rcfw *rcfw,
+ u32 size);
+void bnxt_qplib_rcfw_free_sbuf(struct bnxt_qplib_rcfw *rcfw,
+ struct bnxt_qplib_rcfw_sbuf *sbuf);
+int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
+ struct cmdq_base *req, struct creq_base *resp,
+ void *sbuf, u8 is_block);
int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw);
int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h
index 6277d802ca4b..2e4855509719 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h
@@ -48,6 +48,10 @@ extern const struct bnxt_qplib_gid bnxt_qplib_gid_zero;
#define HWQ_CMP(idx, hwq) ((idx) & ((hwq)->max_elements - 1))
+#define HWQ_FREE_SLOTS(hwq) (hwq->max_elements - \
+ ((HWQ_CMP(hwq->prod, hwq)\
+ - HWQ_CMP(hwq->cons, hwq))\
+ & (hwq->max_elements - 1)))
enum bnxt_qplib_hwq_type {
HWQ_TYPE_CTX,
HWQ_TYPE_QUEUE,
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
index 7b31eccedf11..fde18cf0e406 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
@@ -55,37 +55,30 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
struct bnxt_qplib_dev_attr *attr)
{
struct cmdq_query_func req;
- struct creq_query_func_resp *resp;
+ struct creq_query_func_resp resp;
+ struct bnxt_qplib_rcfw_sbuf *sbuf;
struct creq_query_func_resp_sb *sb;
u16 cmd_flags = 0;
u32 temp;
u8 *tqm_alloc;
- int i;
+ int i, rc = 0;
RCFW_CMD_PREP(req, QUERY_FUNC, cmd_flags);
- req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS;
- resp = (struct creq_query_func_resp *)
- bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void **)&sb,
- 0);
- if (!resp) {
- dev_err(&rcfw->pdev->dev, "QPLIB: SP: QUERY_FUNC send failed");
- return -EINVAL;
- }
- if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
- /* Cmd timed out */
- dev_err(&rcfw->pdev->dev, "QPLIB: SP: QUERY_FUNC timed out");
- return -ETIMEDOUT;
- }
- if (resp->status ||
- le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
- dev_err(&rcfw->pdev->dev, "QPLIB: SP: QUERY_FUNC failed ");
+ sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
+ if (!sbuf) {
dev_err(&rcfw->pdev->dev,
- "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
- resp->status, le16_to_cpu(req.cookie),
- le16_to_cpu(resp->cookie));
- return -EINVAL;
+ "QPLIB: SP: QUERY_FUNC alloc side buffer failed");
+ return -ENOMEM;
}
+
+ sb = sbuf->sb;
+ req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS;
+ rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
+ (void *)sbuf, 0);
+ if (rc)
+ goto bail;
+
/* Extract the context from the side buffer */
attr->max_qp = le32_to_cpu(sb->max_qp);
attr->max_qp_rd_atom =
@@ -95,6 +88,11 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
sb->max_qp_init_rd_atom > BNXT_QPLIB_MAX_OUT_RD_ATOM ?
BNXT_QPLIB_MAX_OUT_RD_ATOM : sb->max_qp_init_rd_atom;
attr->max_qp_wqes = le16_to_cpu(sb->max_qp_wr);
+ /*
+ * 128 WQEs needs to be reserved for the HW (8916). Prevent
+ * reporting the max number
+ */
+ attr->max_qp_wqes -= BNXT_QPLIB_RESERVED_QP_WRS;
attr->max_qp_sges = sb->max_sge;
attr->max_cq = le32_to_cpu(sb->max_cq);
attr->max_cq_wqes = le32_to_cpu(sb->max_cqe);
@@ -130,7 +128,10 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
attr->tqm_alloc_reqs[i * 4 + 2] = *(++tqm_alloc);
attr->tqm_alloc_reqs[i * 4 + 3] = *(++tqm_alloc);
}
- return 0;
+
+bail:
+ bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
+ return rc;
}
/* SGID */
@@ -178,8 +179,9 @@ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
/* Remove GID from the SGID table */
if (update) {
struct cmdq_delete_gid req;
- struct creq_delete_gid_resp *resp;
+ struct creq_delete_gid_resp resp;
u16 cmd_flags = 0;
+ int rc;
RCFW_CMD_PREP(req, DELETE_GID, cmd_flags);
if (sgid_tbl->hw_id[index] == 0xFFFF) {
@@ -188,31 +190,10 @@ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
return -EINVAL;
}
req.gid_index = cpu_to_le16(sgid_tbl->hw_id[index]);
- resp = (struct creq_delete_gid_resp *)
- bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, NULL,
- 0);
- if (!resp) {
- dev_err(&res->pdev->dev,
- "QPLIB: SP: DELETE_GID send failed");
- return -EINVAL;
- }
- if (!bnxt_qplib_rcfw_wait_for_resp(rcfw,
- le16_to_cpu(req.cookie))) {
- /* Cmd timed out */
- dev_err(&res->pdev->dev,
- "QPLIB: SP: DELETE_GID timed out");
- return -ETIMEDOUT;
- }
- if (resp->status ||
- le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
- dev_err(&res->pdev->dev,
- "QPLIB: SP: DELETE_GID failed ");
- dev_err(&res->pdev->dev,
- "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
- resp->status, le16_to_cpu(req.cookie),
- le16_to_cpu(resp->cookie));
- return -EINVAL;
- }
+ rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
+ (void *)&resp, NULL, 0);
+ if (rc)
+ return rc;
}
memcpy(&sgid_tbl->tbl[index], &bnxt_qplib_gid_zero,
sizeof(bnxt_qplib_gid_zero));
@@ -234,7 +215,7 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
struct bnxt_qplib_res,
sgid_tbl);
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
- int i, free_idx, rc = 0;
+ int i, free_idx;
if (!sgid_tbl) {
dev_err(&res->pdev->dev, "QPLIB: SGID table not allocated");
@@ -266,10 +247,11 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
}
if (update) {
struct cmdq_add_gid req;
- struct creq_add_gid_resp *resp;
+ struct creq_add_gid_resp resp;
u16 cmd_flags = 0;
u32 temp32[4];
u16 temp16[3];
+ int rc;
RCFW_CMD_PREP(req, ADD_GID, cmd_flags);
@@ -290,31 +272,11 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
req.src_mac[1] = cpu_to_be16(temp16[1]);
req.src_mac[2] = cpu_to_be16(temp16[2]);
- resp = (struct creq_add_gid_resp *)
- bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
- NULL, 0);
- if (!resp) {
- dev_err(&res->pdev->dev,
- "QPLIB: SP: ADD_GID send failed");
- return -EINVAL;
- }
- if (!bnxt_qplib_rcfw_wait_for_resp(rcfw,
- le16_to_cpu(req.cookie))) {
- /* Cmd timed out */
- dev_err(&res->pdev->dev,
- "QPIB: SP: ADD_GID timed out");
- return -ETIMEDOUT;
- }
- if (resp->status ||
- le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
- dev_err(&res->pdev->dev, "QPLIB: SP: ADD_GID failed ");
- dev_err(&res->pdev->dev,
- "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
- resp->status, le16_to_cpu(req.cookie),
- le16_to_cpu(resp->cookie));
- return -EINVAL;
- }
- sgid_tbl->hw_id[free_idx] = le32_to_cpu(resp->xid);
+ rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
+ (void *)&resp, NULL, 0);
+ if (rc)
+ return rc;
+ sgid_tbl->hw_id[free_idx] = le32_to_cpu(resp.xid);
}
/* Add GID to the sgid_tbl */
memcpy(&sgid_tbl->tbl[free_idx], gid, sizeof(*gid));
@@ -325,7 +287,7 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
*index = free_idx;
/* unlock */
- return rc;
+ return 0;
}
/* pkeys */
@@ -422,10 +384,11 @@ int bnxt_qplib_create_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah)
{
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
struct cmdq_create_ah req;
- struct creq_create_ah_resp *resp;
+ struct creq_create_ah_resp resp;
u16 cmd_flags = 0;
u32 temp32[4];
u16 temp16[3];
+ int rc;
RCFW_CMD_PREP(req, CREATE_AH, cmd_flags);
@@ -450,28 +413,12 @@ int bnxt_qplib_create_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah)
req.dest_mac[1] = cpu_to_le16(temp16[1]);
req.dest_mac[2] = cpu_to_le16(temp16[2]);
- resp = (struct creq_create_ah_resp *)
- bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
- NULL, 1);
- if (!resp) {
- dev_err(&rcfw->pdev->dev, "QPLIB: SP: CREATE_AH send failed");
- return -EINVAL;
- }
- if (!bnxt_qplib_rcfw_block_for_resp(rcfw, le16_to_cpu(req.cookie))) {
- /* Cmd timed out */
- dev_err(&rcfw->pdev->dev, "QPLIB: SP: CREATE_AH timed out");
- return -ETIMEDOUT;
- }
- if (resp->status ||
- le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
- dev_err(&rcfw->pdev->dev, "QPLIB: SP: CREATE_AH failed ");
- dev_err(&rcfw->pdev->dev,
- "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
- resp->status, le16_to_cpu(req.cookie),
- le16_to_cpu(resp->cookie));
- return -EINVAL;
- }
- ah->id = le32_to_cpu(resp->xid);
+ rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
+ NULL, 1);
+ if (rc)
+ return rc;
+
+ ah->id = le32_to_cpu(resp.xid);
return 0;
}
@@ -479,35 +426,19 @@ int bnxt_qplib_destroy_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah)
{
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
struct cmdq_destroy_ah req;
- struct creq_destroy_ah_resp *resp;
+ struct creq_destroy_ah_resp resp;
u16 cmd_flags = 0;
+ int rc;
/* Clean up the AH table in the device */
RCFW_CMD_PREP(req, DESTROY_AH, cmd_flags);
req.ah_cid = cpu_to_le32(ah->id);
- resp = (struct creq_destroy_ah_resp *)
- bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
- NULL, 1);
- if (!resp) {
- dev_err(&rcfw->pdev->dev, "QPLIB: SP: DESTROY_AH send failed");
- return -EINVAL;
- }
- if (!bnxt_qplib_rcfw_block_for_resp(rcfw, le16_to_cpu(req.cookie))) {
- /* Cmd timed out */
- dev_err(&rcfw->pdev->dev, "QPLIB: SP: DESTROY_AH timed out");
- return -ETIMEDOUT;
- }
- if (resp->status ||
- le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
- dev_err(&rcfw->pdev->dev, "QPLIB: SP: DESTROY_AH failed ");
- dev_err(&rcfw->pdev->dev,
- "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
- resp->status, le16_to_cpu(req.cookie),
- le16_to_cpu(resp->cookie));
- return -EINVAL;
- }
+ rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
+ NULL, 1);
+ if (rc)
+ return rc;
return 0;
}
@@ -516,8 +447,9 @@ int bnxt_qplib_free_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw)
{
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
struct cmdq_deallocate_key req;
- struct creq_deallocate_key_resp *resp;
+ struct creq_deallocate_key_resp resp;
u16 cmd_flags = 0;
+ int rc;
if (mrw->lkey == 0xFFFFFFFF) {
dev_info(&res->pdev->dev,
@@ -536,27 +468,11 @@ int bnxt_qplib_free_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw)
else
req.key = cpu_to_le32(mrw->lkey);
- resp = (struct creq_deallocate_key_resp *)
- bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
- NULL, 0);
- if (!resp) {
- dev_err(&res->pdev->dev, "QPLIB: SP: FREE_MR send failed");
- return -EINVAL;
- }
- if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
- /* Cmd timed out */
- dev_err(&res->pdev->dev, "QPLIB: SP: FREE_MR timed out");
- return -ETIMEDOUT;
- }
- if (resp->status ||
- le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
- dev_err(&res->pdev->dev, "QPLIB: SP: FREE_MR failed ");
- dev_err(&res->pdev->dev,
- "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
- resp->status, le16_to_cpu(req.cookie),
- le16_to_cpu(resp->cookie));
- return -EINVAL;
- }
+ rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
+ NULL, 0);
+ if (rc)
+ return rc;
+
/* Free the qplib's MRW memory */
if (mrw->hwq.max_elements)
bnxt_qplib_free_hwq(res->pdev, &mrw->hwq);
@@ -568,9 +484,10 @@ int bnxt_qplib_alloc_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw)
{
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
struct cmdq_allocate_mrw req;
- struct creq_allocate_mrw_resp *resp;
+ struct creq_allocate_mrw_resp resp;
u16 cmd_flags = 0;
unsigned long tmp;
+ int rc;
RCFW_CMD_PREP(req, ALLOCATE_MRW, cmd_flags);
@@ -584,33 +501,17 @@ int bnxt_qplib_alloc_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw)
tmp = (unsigned long)mrw;
req.mrw_handle = cpu_to_le64(tmp);
- resp = (struct creq_allocate_mrw_resp *)
- bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
- NULL, 0);
- if (!resp) {
- dev_err(&rcfw->pdev->dev, "QPLIB: SP: ALLOC_MRW send failed");
- return -EINVAL;
- }
- if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
- /* Cmd timed out */
- dev_err(&rcfw->pdev->dev, "QPLIB: SP: ALLOC_MRW timed out");
- return -ETIMEDOUT;
- }
- if (resp->status ||
- le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
- dev_err(&rcfw->pdev->dev, "QPLIB: SP: ALLOC_MRW failed ");
- dev_err(&rcfw->pdev->dev,
- "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
- resp->status, le16_to_cpu(req.cookie),
- le16_to_cpu(resp->cookie));
- return -EINVAL;
- }
+ rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
+ (void *)&resp, NULL, 0);
+ if (rc)
+ return rc;
+
if ((mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1) ||
(mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2A) ||
(mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B))
- mrw->rkey = le32_to_cpu(resp->xid);
+ mrw->rkey = le32_to_cpu(resp.xid);
else
- mrw->lkey = le32_to_cpu(resp->xid);
+ mrw->lkey = le32_to_cpu(resp.xid);
return 0;
}
@@ -619,40 +520,17 @@ int bnxt_qplib_dereg_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw,
{
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
struct cmdq_deregister_mr req;
- struct creq_deregister_mr_resp *resp;
+ struct creq_deregister_mr_resp resp;
u16 cmd_flags = 0;
int rc;
RCFW_CMD_PREP(req, DEREGISTER_MR, cmd_flags);
req.lkey = cpu_to_le32(mrw->lkey);
- resp = (struct creq_deregister_mr_resp *)
- bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
- NULL, block);
- if (!resp) {
- dev_err(&rcfw->pdev->dev, "QPLIB: SP: DEREG_MR send failed");
- return -EINVAL;
- }
- if (block)
- rc = bnxt_qplib_rcfw_block_for_resp(rcfw,
- le16_to_cpu(req.cookie));
- else
- rc = bnxt_qplib_rcfw_wait_for_resp(rcfw,
- le16_to_cpu(req.cookie));
- if (!rc) {
- /* Cmd timed out */
- dev_err(&res->pdev->dev, "QPLIB: SP: DEREG_MR timed out");
- return -ETIMEDOUT;
- }
- if (resp->status ||
- le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
- dev_err(&rcfw->pdev->dev, "QPLIB: SP: DEREG_MR failed ");
- dev_err(&rcfw->pdev->dev,
- "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
- resp->status, le16_to_cpu(req.cookie),
- le16_to_cpu(resp->cookie));
- return -EINVAL;
- }
+ rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
+ (void *)&resp, NULL, block);
+ if (rc)
+ return rc;
/* Free the qplib's MR memory */
if (mrw->hwq.max_elements) {
@@ -669,7 +547,7 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr,
{
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
struct cmdq_register_mr req;
- struct creq_register_mr_resp *resp;
+ struct creq_register_mr_resp resp;
u16 cmd_flags = 0, level;
int pg_ptrs, pages, i, rc;
dma_addr_t **pbl_ptr;
@@ -730,36 +608,11 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr,
req.key = cpu_to_le32(mr->lkey);
req.mr_size = cpu_to_le64(mr->total_size);
- resp = (struct creq_register_mr_resp *)
- bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
- NULL, block);
- if (!resp) {
- dev_err(&res->pdev->dev, "SP: REG_MR send failed");
- rc = -EINVAL;
- goto fail;
- }
- if (block)
- rc = bnxt_qplib_rcfw_block_for_resp(rcfw,
- le16_to_cpu(req.cookie));
- else
- rc = bnxt_qplib_rcfw_wait_for_resp(rcfw,
- le16_to_cpu(req.cookie));
- if (!rc) {
- /* Cmd timed out */
- dev_err(&res->pdev->dev, "SP: REG_MR timed out");
- rc = -ETIMEDOUT;
- goto fail;
- }
- if (resp->status ||
- le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
- dev_err(&res->pdev->dev, "QPLIB: SP: REG_MR failed ");
- dev_err(&res->pdev->dev,
- "QPLIB: SP: with status 0x%x cmdq 0x%x resp 0x%x",
- resp->status, le16_to_cpu(req.cookie),
- le16_to_cpu(resp->cookie));
- rc = -EINVAL;
+ rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
+ (void *)&resp, NULL, block);
+ if (rc)
goto fail;
- }
+
return 0;
fail:
@@ -804,35 +657,15 @@ int bnxt_qplib_map_tc2cos(struct bnxt_qplib_res *res, u16 *cids)
{
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
struct cmdq_map_tc_to_cos req;
- struct creq_map_tc_to_cos_resp *resp;
+ struct creq_map_tc_to_cos_resp resp;
u16 cmd_flags = 0;
- int tleft;
+ int rc = 0;
RCFW_CMD_PREP(req, MAP_TC_TO_COS, cmd_flags);
req.cos0 = cpu_to_le16(cids[0]);
req.cos1 = cpu_to_le16(cids[1]);
- resp = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, NULL, 0);
- if (!resp) {
- dev_err(&res->pdev->dev, "QPLIB: SP: MAP_TC2COS send failed");
- return -EINVAL;
- }
-
- tleft = bnxt_qplib_rcfw_block_for_resp(rcfw, le16_to_cpu(req.cookie));
- if (!tleft) {
- dev_err(&res->pdev->dev, "QPLIB: SP: MAP_TC2COS timed out");
- return -ETIMEDOUT;
- }
-
- if (resp->status ||
- le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
- dev_err(&res->pdev->dev, "QPLIB: SP: MAP_TC2COS failed ");
- dev_err(&res->pdev->dev,
- "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
- resp->status, le16_to_cpu(req.cookie),
- le16_to_cpu(resp->cookie));
- return -EINVAL;
- }
-
+ rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
+ (void *)&resp, NULL, 0);
return 0;
}
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
index 1442a617e968..a543f959098b 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
@@ -40,6 +40,8 @@
#ifndef __BNXT_QPLIB_SP_H__
#define __BNXT_QPLIB_SP_H__
+#define BNXT_QPLIB_RESERVED_QP_WRS 128
+
struct bnxt_qplib_dev_attr {
char fw_ver[32];
u16 max_sgid;
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 76fb39415e18..e49b34c3b136 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -1900,8 +1900,7 @@ static int send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
int win;
skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
- req = __skb_put(skb, sizeof(*req));
- memset(req, 0, sizeof(*req));
+ req = __skb_put_zero(skb, sizeof(*req));
req->op_compl = htonl(WR_OP_V(FW_OFLD_CONNECTION_WR));
req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16)));
req->le.filter = cpu_to_be32(cxgb4_select_ntuple(
@@ -3803,8 +3802,7 @@ static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb,
req_skb = alloc_skb(sizeof(struct fw_ofld_connection_wr), GFP_KERNEL);
if (!req_skb)
return;
- req = __skb_put(req_skb, sizeof(*req));
- memset(req, 0, sizeof(*req));
+ req = __skb_put_zero(req_skb, sizeof(*req));
req->op_compl = htonl(WR_OP_V(FW_OFLD_CONNECTION_WR) | FW_WR_COMPL_F);
req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16)));
req->le.version_cpl = htonl(FW_OFLD_CONNECTION_WR_CPL_F);
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index 394cfe2625fe..e16fcaf6b5a3 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -44,8 +44,7 @@ static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
wr_len = sizeof *res_wr + sizeof *res;
set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
- res_wr = __skb_put(skb, wr_len);
- memset(res_wr, 0, wr_len);
+ res_wr = __skb_put_zero(skb, wr_len);
res_wr->op_nres = cpu_to_be32(
FW_WR_OP_V(FW_RI_RES_WR) |
FW_RI_RES_WR_NRES_V(1) |
@@ -114,8 +113,7 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
}
set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
- res_wr = __skb_put(skb, wr_len);
- memset(res_wr, 0, wr_len);
+ res_wr = __skb_put_zero(skb, wr_len);
res_wr->op_nres = cpu_to_be32(
FW_WR_OP_V(FW_RI_RES_WR) |
FW_RI_RES_WR_NRES_V(1) |
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index f96a96dbcf1f..ae0b79aeea2e 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -767,7 +767,7 @@ void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev,
kfree(entry);
}
- list_for_each_safe(pos, nxt, &uctx->qpids) {
+ list_for_each_safe(pos, nxt, &uctx->cqids) {
entry = list_entry(pos, struct c4iw_qid_list, entry);
list_del_init(&entry->entry);
kfree(entry);
@@ -880,13 +880,15 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
rdev->free_workq = create_singlethread_workqueue("iw_cxgb4_free");
if (!rdev->free_workq) {
err = -ENOMEM;
- goto err_free_status_page;
+ goto err_free_status_page_and_wr_log;
}
rdev->status_page->db_off = 0;
return 0;
-err_free_status_page:
+err_free_status_page_and_wr_log:
+ if (c4iw_wr_log && rdev->wr_log)
+ kfree(rdev->wr_log);
free_page((unsigned long)rdev->status_page);
destroy_ocqp_pool:
c4iw_ocqp_pool_destroy(rdev);
@@ -903,9 +905,11 @@ static void c4iw_rdev_close(struct c4iw_rdev *rdev)
{
destroy_workqueue(rdev->free_workq);
kfree(rdev->wr_log);
+ c4iw_release_dev_ucontext(rdev, &rdev->uctx);
free_page((unsigned long)rdev->status_page);
c4iw_pblpool_destroy(rdev);
c4iw_rqtpool_destroy(rdev);
+ c4iw_ocqp_pool_destroy(rdev);
c4iw_destroy_resource(&rdev->resource);
}
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index ca992e4b66e4..5332f06b99ba 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -81,8 +81,7 @@ static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr,
}
set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
- req = __skb_put(skb, wr_len);
- memset(req, 0, wr_len);
+ req = __skb_put_zero(skb, wr_len);
INIT_ULPTX_WR(req, wr_len, 0, 0);
req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR) |
(wait ? FW_WR_COMPL_F : 0));
@@ -142,8 +141,7 @@ static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len,
}
set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
- req = __skb_put(skb, wr_len);
- memset(req, 0, wr_len);
+ req = __skb_put_zero(skb, wr_len);
INIT_ULPTX_WR(req, wr_len, 0, 0);
if (i == (num_wqe-1)) {
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index b23a0b057347..bfc77596acbe 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -293,8 +293,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
}
set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
- res_wr = __skb_put(skb, wr_len);
- memset(res_wr, 0, wr_len);
+ res_wr = __skb_put_zero(skb, wr_len);
res_wr->op_nres = cpu_to_be32(
FW_WR_OP_V(FW_RI_RES_WR) |
FW_RI_RES_WR_NRES_V(2) |
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 2ab505d1e8e3..dc2f59e33971 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -223,8 +223,8 @@ static int translate_eth_proto_oper(u32 eth_proto_oper, u8 *active_speed,
return 0;
}
-static void mlx5_query_port_roce(struct ib_device *device, u8 port_num,
- struct ib_port_attr *props)
+static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
+ struct ib_port_attr *props)
{
struct mlx5_ib_dev *dev = to_mdev(device);
struct mlx5_core_dev *mdev = dev->mdev;
@@ -232,12 +232,14 @@ static void mlx5_query_port_roce(struct ib_device *device, u8 port_num,
enum ib_mtu ndev_ib_mtu;
u16 qkey_viol_cntr;
u32 eth_prot_oper;
+ int err;
/* Possible bad flows are checked before filling out props so in case
* of an error it will still be zeroed out.
*/
- if (mlx5_query_port_eth_proto_oper(mdev, &eth_prot_oper, port_num))
- return;
+ err = mlx5_query_port_eth_proto_oper(mdev, &eth_prot_oper, port_num);
+ if (err)
+ return err;
translate_eth_proto_oper(eth_prot_oper, &props->active_speed,
&props->active_width);
@@ -258,7 +260,7 @@ static void mlx5_query_port_roce(struct ib_device *device, u8 port_num,
ndev = mlx5_ib_get_netdev(device, port_num);
if (!ndev)
- return;
+ return 0;
if (mlx5_lag_is_active(dev->mdev)) {
rcu_read_lock();
@@ -281,75 +283,49 @@ static void mlx5_query_port_roce(struct ib_device *device, u8 port_num,
dev_put(ndev);
props->active_mtu = min(props->max_mtu, ndev_ib_mtu);
+ return 0;
}
-static void ib_gid_to_mlx5_roce_addr(const union ib_gid *gid,
- const struct ib_gid_attr *attr,
- void *mlx5_addr)
+static int set_roce_addr(struct mlx5_ib_dev *dev, u8 port_num,
+ unsigned int index, const union ib_gid *gid,
+ const struct ib_gid_attr *attr)
{
-#define MLX5_SET_RA(p, f, v) MLX5_SET(roce_addr_layout, p, f, v)
- char *mlx5_addr_l3_addr = MLX5_ADDR_OF(roce_addr_layout, mlx5_addr,
- source_l3_address);
- void *mlx5_addr_mac = MLX5_ADDR_OF(roce_addr_layout, mlx5_addr,
- source_mac_47_32);
-
- if (!gid)
- return;
+ enum ib_gid_type gid_type = IB_GID_TYPE_IB;
+ u8 roce_version = 0;
+ u8 roce_l3_type = 0;
+ bool vlan = false;
+ u8 mac[ETH_ALEN];
+ u16 vlan_id = 0;
- ether_addr_copy(mlx5_addr_mac, attr->ndev->dev_addr);
+ if (gid) {
+ gid_type = attr->gid_type;
+ ether_addr_copy(mac, attr->ndev->dev_addr);
- if (is_vlan_dev(attr->ndev)) {
- MLX5_SET_RA(mlx5_addr, vlan_valid, 1);
- MLX5_SET_RA(mlx5_addr, vlan_id, vlan_dev_vlan_id(attr->ndev));
+ if (is_vlan_dev(attr->ndev)) {
+ vlan = true;
+ vlan_id = vlan_dev_vlan_id(attr->ndev);
+ }
}
- switch (attr->gid_type) {
+ switch (gid_type) {
case IB_GID_TYPE_IB:
- MLX5_SET_RA(mlx5_addr, roce_version, MLX5_ROCE_VERSION_1);
+ roce_version = MLX5_ROCE_VERSION_1;
break;
case IB_GID_TYPE_ROCE_UDP_ENCAP:
- MLX5_SET_RA(mlx5_addr, roce_version, MLX5_ROCE_VERSION_2);
+ roce_version = MLX5_ROCE_VERSION_2;
+ if (ipv6_addr_v4mapped((void *)gid))
+ roce_l3_type = MLX5_ROCE_L3_TYPE_IPV4;
+ else
+ roce_l3_type = MLX5_ROCE_L3_TYPE_IPV6;
break;
default:
- WARN_ON(true);
+ mlx5_ib_warn(dev, "Unexpected GID type %u\n", gid_type);
}
- if (attr->gid_type != IB_GID_TYPE_IB) {
- if (ipv6_addr_v4mapped((void *)gid))
- MLX5_SET_RA(mlx5_addr, roce_l3_type,
- MLX5_ROCE_L3_TYPE_IPV4);
- else
- MLX5_SET_RA(mlx5_addr, roce_l3_type,
- MLX5_ROCE_L3_TYPE_IPV6);
- }
-
- if ((attr->gid_type == IB_GID_TYPE_IB) ||
- !ipv6_addr_v4mapped((void *)gid))
- memcpy(mlx5_addr_l3_addr, gid, sizeof(*gid));
- else
- memcpy(&mlx5_addr_l3_addr[12], &gid->raw[12], 4);
-}
-
-static int set_roce_addr(struct ib_device *device, u8 port_num,
- unsigned int index,
- const union ib_gid *gid,
- const struct ib_gid_attr *attr)
-{
- struct mlx5_ib_dev *dev = to_mdev(device);
- u32 in[MLX5_ST_SZ_DW(set_roce_address_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(set_roce_address_out)] = {0};
- void *in_addr = MLX5_ADDR_OF(set_roce_address_in, in, roce_address);
- enum rdma_link_layer ll = mlx5_ib_port_link_layer(device, port_num);
-
- if (ll != IB_LINK_LAYER_ETHERNET)
- return -EINVAL;
-
- ib_gid_to_mlx5_roce_addr(gid, attr, in_addr);
-
- MLX5_SET(set_roce_address_in, in, roce_address_index, index);
- MLX5_SET(set_roce_address_in, in, opcode, MLX5_CMD_OP_SET_ROCE_ADDRESS);
- return mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
+ return mlx5_core_roce_gid_set(dev->mdev, index, roce_version,
+ roce_l3_type, gid->raw, mac, vlan,
+ vlan_id);
}
static int mlx5_ib_add_gid(struct ib_device *device, u8 port_num,
@@ -357,13 +333,13 @@ static int mlx5_ib_add_gid(struct ib_device *device, u8 port_num,
const struct ib_gid_attr *attr,
__always_unused void **context)
{
- return set_roce_addr(device, port_num, index, gid, attr);
+ return set_roce_addr(to_mdev(device), port_num, index, gid, attr);
}
static int mlx5_ib_del_gid(struct ib_device *device, u8 port_num,
unsigned int index, __always_unused void **context)
{
- return set_roce_addr(device, port_num, index, NULL, NULL);
+ return set_roce_addr(to_mdev(device), port_num, index, NULL, NULL);
}
__be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num,
@@ -978,20 +954,31 @@ out:
int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
struct ib_port_attr *props)
{
+ unsigned int count;
+ int ret;
+
switch (mlx5_get_vport_access_method(ibdev)) {
case MLX5_VPORT_ACCESS_METHOD_MAD:
- return mlx5_query_mad_ifc_port(ibdev, port, props);
+ ret = mlx5_query_mad_ifc_port(ibdev, port, props);
+ break;
case MLX5_VPORT_ACCESS_METHOD_HCA:
- return mlx5_query_hca_port(ibdev, port, props);
+ ret = mlx5_query_hca_port(ibdev, port, props);
+ break;
case MLX5_VPORT_ACCESS_METHOD_NIC:
- mlx5_query_port_roce(ibdev, port, props);
- return 0;
+ ret = mlx5_query_port_roce(ibdev, port, props);
+ break;
default:
- return -EINVAL;
+ ret = -EINVAL;
}
+
+ if (!ret && props) {
+ count = mlx5_core_reserved_gids_count(to_mdev(ibdev)->mdev);
+ props->gid_tbl_len -= count;
+ }
+ return ret;
}
static int mlx5_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
@@ -3691,8 +3678,10 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status;
dev->ib_dev.get_port_immutable = mlx5_port_immutable;
dev->ib_dev.get_dev_fw_str = get_dev_fw_str;
- dev->ib_dev.alloc_rdma_netdev = mlx5_ib_alloc_rdma_netdev;
- dev->ib_dev.free_rdma_netdev = mlx5_ib_free_rdma_netdev;
+ if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads)) {
+ dev->ib_dev.alloc_rdma_netdev = mlx5_ib_alloc_rdma_netdev;
+ dev->ib_dev.free_rdma_netdev = mlx5_ib_free_rdma_netdev;
+ }
if (mlx5_core_is_pf(mdev)) {
dev->ib_dev.get_vf_config = mlx5_ib_get_vf_config;
dev->ib_dev.set_vf_link_state = mlx5_ib_set_vf_link_state;
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index 30b256a2c54e..de4025deaa4a 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -742,7 +742,7 @@ int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb,
if (type == NES_TIMER_TYPE_SEND) {
new_send->seq_num = ntohl(tcp_hdr(skb)->seq);
- atomic_inc(&new_send->skb->users);
+ refcount_inc(&new_send->skb->users);
spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
cm_node->send_entry = new_send;
add_ref_cm_node(cm_node);
@@ -924,7 +924,7 @@ static void nes_cm_timer_tick(unsigned long pass)
flags);
break;
}
- atomic_inc(&send_entry->skb->users);
+ refcount_inc(&send_entry->skb->users);
cm_packets_retrans++;
nes_debug(NES_DBG_CM, "Retransmitting send_entry %p "
"for node %p, jiffies = %lu, time to send = "
diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
index 485c1fef238b..b5851fd67d4f 100644
--- a/drivers/infiniband/hw/qedr/main.c
+++ b/drivers/infiniband/hw/qedr/main.c
@@ -37,7 +37,7 @@
#include <linux/iommu.h>
#include <linux/pci.h>
#include <net/addrconf.h>
-#include <linux/qed/qede_roce.h>
+
#include <linux/qed/qed_chain.h>
#include <linux/qed/qed_if.h>
#include "qedr.h"
@@ -276,7 +276,7 @@ static int qedr_alloc_resources(struct qedr_dev *dev)
QED_CHAIN_CNT_TYPE_U16,
n_entries,
sizeof(struct regpair *),
- &cnq->pbl);
+ &cnq->pbl, NULL);
if (rc)
goto err4;
@@ -902,7 +902,7 @@ static void qedr_mac_address_change(struct qedr_dev *dev)
* initialization done before RoCE driver notifies
* event to stack.
*/
-static void qedr_notify(struct qedr_dev *dev, enum qede_roce_event event)
+static void qedr_notify(struct qedr_dev *dev, enum qede_rdma_event event)
{
switch (event) {
case QEDE_UP:
@@ -931,12 +931,12 @@ static struct qedr_driver qedr_drv = {
static int __init qedr_init_module(void)
{
- return qede_roce_register_driver(&qedr_drv);
+ return qede_rdma_register_driver(&qedr_drv);
}
static void __exit qedr_exit_module(void)
{
- qede_roce_unregister_driver(&qedr_drv);
+ qede_rdma_unregister_driver(&qedr_drv);
}
module_init(qedr_init_module);
diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h
index 80333ec2c8b6..ab7784bfdac6 100644
--- a/drivers/infiniband/hw/qedr/qedr.h
+++ b/drivers/infiniband/hw/qedr/qedr.h
@@ -36,8 +36,8 @@
#include <rdma/ib_addr.h>
#include <linux/qed/qed_if.h>
#include <linux/qed/qed_chain.h>
-#include <linux/qed/qed_roce_if.h>
-#include <linux/qed/qede_roce.h>
+#include <linux/qed/qed_rdma_if.h>
+#include <linux/qed/qede_rdma.h>
#include <linux/qed/roce_common.h>
#include "qedr_hsi_rdma.h"
@@ -58,7 +58,10 @@
#define QEDR_MSG_QP " QP"
#define QEDR_MSG_GSI " GSI"
-#define QEDR_CQ_MAGIC_NUMBER (0x11223344)
+#define QEDR_CQ_MAGIC_NUMBER (0x11223344)
+
+#define FW_PAGE_SIZE (RDMA_RING_PAGE_SIZE)
+#define FW_PAGE_SHIFT (12)
struct qedr_dev;
diff --git a/drivers/infiniband/hw/qedr/qedr_cm.c b/drivers/infiniband/hw/qedr/qedr_cm.c
index eb3dce72fc21..4689e802b332 100644
--- a/drivers/infiniband/hw/qedr/qedr_cm.c
+++ b/drivers/infiniband/hw/qedr/qedr_cm.c
@@ -44,7 +44,7 @@
#include <rdma/ib_cache.h>
#include <linux/qed/qed_if.h>
-#include <linux/qed/qed_roce_if.h>
+#include <linux/qed/qed_rdma_if.h>
#include "qedr.h"
#include "verbs.h"
#include <rdma/qedr-abi.h>
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 17685cfea6a2..548e4d1e998f 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -653,14 +653,15 @@ static int qedr_prepare_pbl_tbl(struct qedr_dev *dev,
static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem,
struct qedr_pbl *pbl,
- struct qedr_pbl_info *pbl_info)
+ struct qedr_pbl_info *pbl_info, u32 pg_shift)
{
int shift, pg_cnt, pages, pbe_cnt, total_num_pbes = 0;
+ u32 fw_pg_cnt, fw_pg_per_umem_pg;
struct qedr_pbl *pbl_tbl;
struct scatterlist *sg;
struct regpair *pbe;
+ u64 pg_addr;
int entry;
- u32 addr;
if (!pbl_info->num_pbes)
return;
@@ -683,29 +684,35 @@ static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem,
shift = umem->page_shift;
+ fw_pg_per_umem_pg = BIT(umem->page_shift - pg_shift);
+
for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
pages = sg_dma_len(sg) >> shift;
+ pg_addr = sg_dma_address(sg);
for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
- /* store the page address in pbe */
- pbe->lo = cpu_to_le32(sg_dma_address(sg) +
- (pg_cnt << shift));
- addr = upper_32_bits(sg_dma_address(sg) +
- (pg_cnt << shift));
- pbe->hi = cpu_to_le32(addr);
- pbe_cnt++;
- total_num_pbes++;
- pbe++;
-
- if (total_num_pbes == pbl_info->num_pbes)
- return;
-
- /* If the given pbl is full storing the pbes,
- * move to next pbl.
- */
- if (pbe_cnt == (pbl_info->pbl_size / sizeof(u64))) {
- pbl_tbl++;
- pbe = (struct regpair *)pbl_tbl->va;
- pbe_cnt = 0;
+ for (fw_pg_cnt = 0; fw_pg_cnt < fw_pg_per_umem_pg;) {
+ pbe->lo = cpu_to_le32(pg_addr);
+ pbe->hi = cpu_to_le32(upper_32_bits(pg_addr));
+
+ pg_addr += BIT(pg_shift);
+ pbe_cnt++;
+ total_num_pbes++;
+ pbe++;
+
+ if (total_num_pbes == pbl_info->num_pbes)
+ return;
+
+ /* If the given pbl is full storing the pbes,
+ * move to next pbl.
+ */
+ if (pbe_cnt ==
+ (pbl_info->pbl_size / sizeof(u64))) {
+ pbl_tbl++;
+ pbe = (struct regpair *)pbl_tbl->va;
+ pbe_cnt = 0;
+ }
+
+ fw_pg_cnt++;
}
}
}
@@ -754,7 +761,7 @@ static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx,
u64 buf_addr, size_t buf_len,
int access, int dmasync)
{
- int page_cnt;
+ u32 fw_pages;
int rc;
q->buf_addr = buf_addr;
@@ -766,8 +773,10 @@ static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx,
return PTR_ERR(q->umem);
}
- page_cnt = ib_umem_page_count(q->umem);
- rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, page_cnt, 0);
+ fw_pages = ib_umem_page_count(q->umem) <<
+ (q->umem->page_shift - FW_PAGE_SHIFT);
+
+ rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, fw_pages, 0);
if (rc)
goto err0;
@@ -777,7 +786,8 @@ static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx,
goto err0;
}
- qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info);
+ qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info,
+ FW_PAGE_SHIFT);
return 0;
@@ -925,7 +935,7 @@ struct ib_cq *qedr_create_cq(struct ib_device *ibdev,
QED_CHAIN_CNT_TYPE_U32,
chain_entries,
sizeof(union rdma_cqe),
- &cq->pbl);
+ &cq->pbl, NULL);
if (rc)
goto err1;
@@ -1413,7 +1423,7 @@ qedr_roce_create_kernel_qp(struct qedr_dev *dev,
QED_CHAIN_CNT_TYPE_U32,
n_sq_elems,
QEDR_SQE_ELEMENT_SIZE,
- &qp->sq.pbl);
+ &qp->sq.pbl, NULL);
if (rc)
return rc;
@@ -1427,7 +1437,7 @@ qedr_roce_create_kernel_qp(struct qedr_dev *dev,
QED_CHAIN_CNT_TYPE_U32,
n_rq_elems,
QEDR_RQE_ELEMENT_SIZE,
- &qp->rq.pbl);
+ &qp->rq.pbl, NULL);
if (rc)
return rc;
@@ -2226,7 +2236,7 @@ struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
goto err1;
qedr_populate_pbls(dev, mr->umem, mr->info.pbl_table,
- &mr->info.pbl_info);
+ &mr->info.pbl_info, mr->umem->page_shift);
rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
if (rc) {
@@ -3209,6 +3219,10 @@ static int process_req(struct qedr_dev *dev, struct qedr_qp *qp,
case IB_WC_REG_MR:
qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
break;
+ case IB_WC_RDMA_READ:
+ case IB_WC_SEND:
+ wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len;
+ break;
default:
break;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index 83d709e74dfb..073e66783f1d 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -740,13 +740,8 @@ static int init_send_wqe(struct rxe_qp *qp, struct ib_send_wr *ibwr,
sge = ibwr->sg_list;
for (i = 0; i < num_sge; i++, sge++) {
- if (qp->is_user && copy_from_user(p, (__user void *)
- (uintptr_t)sge->addr, sge->length))
- return -EFAULT;
-
- else if (!qp->is_user)
- memcpy(p, (void *)(uintptr_t)sge->addr,
- sge->length);
+ memcpy(p, (void *)(uintptr_t)sge->addr,
+ sge->length);
p += sge->length;
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 0060b2f9f659..efe7402f4885 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -863,7 +863,6 @@ dev_stop:
set_bit(IPOIB_STOP_REAPER, &priv->flags);
cancel_delayed_work(&priv->ah_reap_task);
set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
- napi_enable(&priv->napi);
ipoib_ib_dev_stop(dev);
return -1;
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 5da6f2e9f22e..d129625af0a7 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -1596,6 +1596,8 @@ static void ipoib_dev_uninit_default(struct net_device *dev)
ipoib_transport_dev_cleanup(dev);
+ netif_napi_del(&priv->napi);
+
ipoib_cm_dev_cleanup(dev);
kfree(priv->rx_ring);
@@ -1649,6 +1651,7 @@ out_rx_ring_cleanup:
kfree(priv->rx_ring);
out:
+ netif_napi_del(&priv->napi);
return -ENOMEM;
}
@@ -2237,6 +2240,7 @@ event_failed:
device_init_failed:
free_netdev(priv->dev);
+ kfree(priv);
alloc_mem_failed:
return ERR_PTR(result);
@@ -2277,7 +2281,7 @@ static void ipoib_add_one(struct ib_device *device)
static void ipoib_remove_one(struct ib_device *device, void *client_data)
{
- struct ipoib_dev_priv *priv, *tmp;
+ struct ipoib_dev_priv *priv, *tmp, *cpriv, *tcpriv;
struct list_head *dev_list = client_data;
if (!dev_list)
@@ -2300,7 +2304,14 @@ static void ipoib_remove_one(struct ib_device *device, void *client_data)
flush_workqueue(priv->wq);
unregister_netdev(priv->dev);
- free_netdev(priv->dev);
+ if (device->free_rdma_netdev)
+ device->free_rdma_netdev(priv->dev);
+ else
+ free_netdev(priv->dev);
+
+ list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list)
+ kfree(cpriv);
+
kfree(priv);
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
index 28884781311b..3e44087935ae 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
@@ -64,8 +64,9 @@ nla_put_failure:
return -EMSGSIZE;
}
-static int ipoib_changelink(struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[])
+static int ipoib_changelink(struct net_device *dev, struct nlattr *tb[],
+ struct nlattr *data[],
+ struct netlink_ext_ack *extack)
{
u16 mode, umcast;
int ret = 0;
@@ -93,7 +94,8 @@ out_err:
}
static int ipoib_new_child_link(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[])
+ struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
{
struct net_device *pdev;
struct ipoib_dev_priv *ppriv;
@@ -133,7 +135,7 @@ static int ipoib_new_child_link(struct net *src_net, struct net_device *dev,
child_pkey, IPOIB_RTNL_CHILD);
if (!err && data)
- err = ipoib_changelink(dev, tb, data);
+ err = ipoib_changelink(dev, tb, data, extack);
return err;
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
index 36dc4fcaa3cd..081b33deff1b 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
@@ -133,13 +133,13 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
snprintf(intf_name, sizeof intf_name, "%s.%04x",
ppriv->dev->name, pkey);
+ if (!rtnl_trylock())
+ return restart_syscall();
+
priv = ipoib_intf_alloc(ppriv->ca, ppriv->port, intf_name);
if (!priv)
return -ENOMEM;
- if (!rtnl_trylock())
- return restart_syscall();
-
down_write(&ppriv->vlan_rwsem);
/*
@@ -167,8 +167,10 @@ out:
rtnl_unlock();
- if (result)
+ if (result) {
free_netdev(priv->dev);
+ kfree(priv);
+ }
return result;
}
@@ -209,6 +211,7 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
if (dev) {
free_netdev(dev);
+ kfree(priv);
return 0;
}
diff --git a/drivers/input/misc/soc_button_array.c b/drivers/input/misc/soc_button_array.c
index e37d37273182..f600f3a7a3c6 100644
--- a/drivers/input/misc/soc_button_array.c
+++ b/drivers/input/misc/soc_button_array.c
@@ -248,7 +248,8 @@ static struct soc_button_info *soc_button_get_button_info(struct device *dev)
if (!btns_desc) {
dev_err(dev, "ACPI Button Descriptors not found\n");
- return ERR_PTR(-ENODEV);
+ button_info = ERR_PTR(-ENODEV);
+ goto out;
}
/* The first package describes the collection */
@@ -264,24 +265,31 @@ static struct soc_button_info *soc_button_get_button_info(struct device *dev)
}
if (collection_uid == -1) {
dev_err(dev, "Invalid Button Collection Descriptor\n");
- return ERR_PTR(-ENODEV);
+ button_info = ERR_PTR(-ENODEV);
+ goto out;
}
/* There are package.count - 1 buttons + 1 terminating empty entry */
button_info = devm_kcalloc(dev, btns_desc->package.count,
sizeof(*button_info), GFP_KERNEL);
- if (!button_info)
- return ERR_PTR(-ENOMEM);
+ if (!button_info) {
+ button_info = ERR_PTR(-ENOMEM);
+ goto out;
+ }
/* Parse the button descriptors */
for (i = 1, btn = 0; i < btns_desc->package.count; i++, btn++) {
if (soc_button_parse_btn_desc(dev,
&btns_desc->package.elements[i],
collection_uid,
- &button_info[btn]))
- return ERR_PTR(-ENODEV);
+ &button_info[btn])) {
+ button_info = ERR_PTR(-ENODEV);
+ goto out;
+ }
}
+out:
+ kfree(buf.pointer);
return button_info;
}
diff --git a/drivers/input/rmi4/rmi_f54.c b/drivers/input/rmi4/rmi_f54.c
index dea63e2db3e6..f5206e2c767e 100644
--- a/drivers/input/rmi4/rmi_f54.c
+++ b/drivers/input/rmi4/rmi_f54.c
@@ -31,9 +31,6 @@
#define F54_GET_REPORT 1
#define F54_FORCE_CAL 2
-/* Fixed sizes of reports */
-#define F54_QUERY_LEN 27
-
/* F54 capabilities */
#define F54_CAP_BASELINE (1 << 2)
#define F54_CAP_IMAGE8 (1 << 3)
@@ -95,7 +92,6 @@ struct rmi_f54_reports {
struct f54_data {
struct rmi_function *fn;
- u8 qry[F54_QUERY_LEN];
u8 num_rx_electrodes;
u8 num_tx_electrodes;
u8 capabilities;
@@ -632,22 +628,23 @@ static int rmi_f54_detect(struct rmi_function *fn)
{
int error;
struct f54_data *f54;
+ u8 buf[6];
f54 = dev_get_drvdata(&fn->dev);
error = rmi_read_block(fn->rmi_dev, fn->fd.query_base_addr,
- &f54->qry, sizeof(f54->qry));
+ buf, sizeof(buf));
if (error) {
dev_err(&fn->dev, "%s: Failed to query F54 properties\n",
__func__);
return error;
}
- f54->num_rx_electrodes = f54->qry[0];
- f54->num_tx_electrodes = f54->qry[1];
- f54->capabilities = f54->qry[2];
- f54->clock_rate = f54->qry[3] | (f54->qry[4] << 8);
- f54->family = f54->qry[5];
+ f54->num_rx_electrodes = buf[0];
+ f54->num_tx_electrodes = buf[1];
+ f54->capabilities = buf[2];
+ f54->clock_rate = buf[3] | (buf[4] << 8);
+ f54->family = buf[5];
rmi_dbg(RMI_DEBUG_FN, &fn->dev, "F54 num_rx_electrodes: %d\n",
f54->num_rx_electrodes);
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index 09720d950686..f932a83b4990 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -723,6 +723,13 @@ static const struct dmi_system_id __initconst i8042_dmi_notimeout_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK U574"),
},
},
+ {
+ /* Fujitsu UH554 laptop */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK UH544"),
+ },
+ },
{ }
};
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index eb7fbe159963..929f8558bf1c 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -140,7 +140,7 @@ static inline void gic_map_to_vpe(unsigned int intr, unsigned int vpe)
}
#ifdef CONFIG_CLKSRC_MIPS_GIC
-u64 gic_read_count(void)
+u64 notrace gic_read_count(void)
{
unsigned int hi, hi2, lo;
@@ -167,7 +167,7 @@ unsigned int gic_get_count_width(void)
return bits;
}
-void gic_write_compare(u64 cnt)
+void notrace gic_write_compare(u64 cnt)
{
if (mips_cm_is64) {
gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE), cnt);
@@ -179,7 +179,7 @@ void gic_write_compare(u64 cnt)
}
}
-void gic_write_cpu_compare(u64 cnt, int cpu)
+void notrace gic_write_cpu_compare(u64 cnt, int cpu)
{
unsigned long flags;
diff --git a/drivers/isdn/gigaset/asyncdata.c b/drivers/isdn/gigaset/asyncdata.c
index 4caecdcc6f29..bc208557f783 100644
--- a/drivers/isdn/gigaset/asyncdata.c
+++ b/drivers/isdn/gigaset/asyncdata.c
@@ -264,7 +264,7 @@ byte_stuff:
/* skip remainder of packet */
bcs->rx_skb = skb = NULL;
} else {
- *(u8 *)__skb_put(skb, 1) = c;
+ __skb_put_u8(skb, c);
fcs = crc_ccitt_byte(fcs, c);
}
}
@@ -315,7 +315,7 @@ static unsigned iraw_loop(unsigned numbytes, struct inbuf_t *inbuf)
/* regular data byte: append to current skb */
inputstate |= INS_have_data;
- *(u8 *)__skb_put(skb, 1) = bitrev8(c);
+ __skb_put_u8(skb, bitrev8(c));
}
/* pass data up */
diff --git a/drivers/isdn/gigaset/isocdata.c b/drivers/isdn/gigaset/isocdata.c
index 74e250664ce9..97e00118ccfe 100644
--- a/drivers/isdn/gigaset/isocdata.c
+++ b/drivers/isdn/gigaset/isocdata.c
@@ -511,7 +511,7 @@ static inline void hdlc_putbyte(unsigned char c, struct bc_state *bcs)
bcs->rx_skb = NULL;
return;
}
- *(u8 *)__skb_put(bcs->rx_skb, 1) = c;
+ __skb_put_u8(bcs->rx_skb, c);
}
/* hdlc_flush
diff --git a/drivers/isdn/hysdn/hycapi.c b/drivers/isdn/hysdn/hycapi.c
index 87119b517508..eac0f51a0f60 100644
--- a/drivers/isdn/hysdn/hycapi.c
+++ b/drivers/isdn/hysdn/hycapi.c
@@ -173,8 +173,8 @@ hycapi_register_internal(struct capi_ctr *ctrl, __u16 appl,
}
skb_put_data(skb, &len, sizeof(__u16));
skb_put_data(skb, &appl, sizeof(__u16));
- memcpy(skb_put(skb, sizeof(__u8)), &_command, sizeof(_command));
- memcpy(skb_put(skb, sizeof(__u8)), &_subcommand, sizeof(_subcommand));
+ skb_put_data(skb, &_command, sizeof(__u8));
+ skb_put_data(skb, &_subcommand, sizeof(__u8));
skb_put_data(skb, &MessageNumber, sizeof(__u16));
skb_put_data(skb, &MessageBufferSize, sizeof(__u16));
skb_put_data(skb, &(rp->level3cnt), sizeof(__u16));
@@ -281,8 +281,8 @@ static void hycapi_release_internal(struct capi_ctr *ctrl, __u16 appl)
}
skb_put_data(skb, &len, sizeof(__u16));
skb_put_data(skb, &appl, sizeof(__u16));
- memcpy(skb_put(skb, sizeof(__u8)), &_command, sizeof(_command));
- memcpy(skb_put(skb, sizeof(__u8)), &_subcommand, sizeof(_subcommand));
+ skb_put_data(skb, &_command, sizeof(__u8));
+ skb_put_data(skb, &_subcommand, sizeof(__u8));
skb_put_data(skb, &MessageNumber, sizeof(__u16));
hycapi_send_message(ctrl, skb);
hycapi_applications[appl - 1].ctrl_mask &= ~(1 << (ctrl->cnr - 1));
diff --git a/drivers/isdn/i4l/isdn_bsdcomp.c b/drivers/isdn/i4l/isdn_bsdcomp.c
index 3035210a6119..99012c047751 100644
--- a/drivers/isdn/i4l/isdn_bsdcomp.c
+++ b/drivers/isdn/i4l/isdn_bsdcomp.c
@@ -472,7 +472,7 @@ static int bsd_compress(void *state, struct sk_buff *skb_in, struct sk_buff *skb
accm |= ((ent) << bitno); \
do { \
if (skb_out && skb_tailroom(skb_out) > 0) \
- *(u8 *)skb_put(skb_out, 1) = (u8)(accm >> 24); \
+ skb_put_u8(skb_out, (u8)(accm >> 24)); \
accm <<= 8; \
bitno += 8; \
} while (bitno <= 24); \
diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c
index b7e3f1cde683..88e5a025cea7 100644
--- a/drivers/isdn/i4l/isdn_ppp.c
+++ b/drivers/isdn/i4l/isdn_ppp.c
@@ -2258,7 +2258,7 @@ static void isdn_ppp_ccp_xmit_reset(struct ippp_struct *is, int proto,
/* Now stuff remaining bytes */
if (len) {
- p = skb_put_data(skb, data, len);
+ skb_put_data(skb, data, len);
}
/* skb is now ready for xmit */
diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
index 99e5f9751e8b..c5603d1a07d6 100644
--- a/drivers/isdn/mISDN/socket.c
+++ b/drivers/isdn/mISDN/socket.c
@@ -155,7 +155,7 @@ mISDN_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
copied = skb->len + MISDN_HEADER_LEN;
if (len < copied) {
if (flags & MSG_PEEK)
- atomic_dec(&skb->users);
+ refcount_dec(&skb->users);
else
skb_queue_head(&sk->sk_receive_queue, skb);
return -ENOSPC;
diff --git a/drivers/leds/leds-bcm6328.c b/drivers/leds/leds-bcm6328.c
index 1548259297c1..2cfd9389ee96 100644
--- a/drivers/leds/leds-bcm6328.c
+++ b/drivers/leds/leds-bcm6328.c
@@ -242,7 +242,7 @@ static int bcm6328_hwled(struct device *dev, struct device_node *nc, u32 reg,
spin_lock_irqsave(lock, flags);
val = bcm6328_led_read(addr);
- val |= (BIT(reg) << (((sel % 4) * 4) + 16));
+ val |= (BIT(reg % 4) << (((sel % 4) * 4) + 16));
bcm6328_led_write(addr, val);
spin_unlock_irqrestore(lock, flags);
}
@@ -269,7 +269,7 @@ static int bcm6328_hwled(struct device *dev, struct device_node *nc, u32 reg,
spin_lock_irqsave(lock, flags);
val = bcm6328_led_read(addr);
- val |= (BIT(reg) << ((sel % 4) * 4));
+ val |= (BIT(reg % 4) << ((sel % 4) * 4));
bcm6328_led_write(addr, val);
spin_unlock_irqrestore(lock, flags);
}
diff --git a/drivers/leds/trigger/ledtrig-heartbeat.c b/drivers/leds/trigger/ledtrig-heartbeat.c
index afa3b4099214..e95ea65380c8 100644
--- a/drivers/leds/trigger/ledtrig-heartbeat.c
+++ b/drivers/leds/trigger/ledtrig-heartbeat.c
@@ -20,7 +20,6 @@
#include <linux/sched/loadavg.h>
#include <linux/leds.h>
#include <linux/reboot.h>
-#include <linux/suspend.h>
#include "../leds.h"
static int panic_heartbeats;
@@ -163,30 +162,6 @@ static struct led_trigger heartbeat_led_trigger = {
.deactivate = heartbeat_trig_deactivate,
};
-static int heartbeat_pm_notifier(struct notifier_block *nb,
- unsigned long pm_event, void *unused)
-{
- int rc;
-
- switch (pm_event) {
- case PM_SUSPEND_PREPARE:
- case PM_HIBERNATION_PREPARE:
- case PM_RESTORE_PREPARE:
- led_trigger_unregister(&heartbeat_led_trigger);
- break;
- case PM_POST_SUSPEND:
- case PM_POST_HIBERNATION:
- case PM_POST_RESTORE:
- rc = led_trigger_register(&heartbeat_led_trigger);
- if (rc)
- pr_err("could not re-register heartbeat trigger\n");
- break;
- default:
- break;
- }
- return NOTIFY_DONE;
-}
-
static int heartbeat_reboot_notifier(struct notifier_block *nb,
unsigned long code, void *unused)
{
@@ -201,10 +176,6 @@ static int heartbeat_panic_notifier(struct notifier_block *nb,
return NOTIFY_DONE;
}
-static struct notifier_block heartbeat_pm_nb = {
- .notifier_call = heartbeat_pm_notifier,
-};
-
static struct notifier_block heartbeat_reboot_nb = {
.notifier_call = heartbeat_reboot_notifier,
};
@@ -221,14 +192,12 @@ static int __init heartbeat_trig_init(void)
atomic_notifier_chain_register(&panic_notifier_list,
&heartbeat_panic_nb);
register_reboot_notifier(&heartbeat_reboot_nb);
- register_pm_notifier(&heartbeat_pm_nb);
}
return rc;
}
static void __exit heartbeat_trig_exit(void)
{
- unregister_pm_notifier(&heartbeat_pm_nb);
unregister_reboot_notifier(&heartbeat_reboot_nb);
atomic_notifier_chain_unregister(&panic_notifier_list,
&heartbeat_panic_nb);
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index 7910bfe50da4..93b181088168 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -1105,10 +1105,13 @@ static void schedule_autocommit(struct dm_integrity_c *ic)
static void submit_flush_bio(struct dm_integrity_c *ic, struct dm_integrity_io *dio)
{
struct bio *bio;
- spin_lock_irq(&ic->endio_wait.lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ic->endio_wait.lock, flags);
bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
bio_list_add(&ic->flush_bio_list, bio);
- spin_unlock_irq(&ic->endio_wait.lock);
+ spin_unlock_irqrestore(&ic->endio_wait.lock, flags);
+
queue_work(ic->commit_wq, &ic->commit_work);
}
@@ -3040,6 +3043,11 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
ti->error = "The device is too small";
goto bad;
}
+ if (ti->len > ic->provided_data_sectors) {
+ r = -EINVAL;
+ ti->error = "Not enough provided sectors for requested mapping size";
+ goto bad;
+ }
if (!buffer_sectors)
buffer_sectors = 1;
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 3702e502466d..8d5ca30f6551 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -317,8 +317,8 @@ static void do_region(int op, int op_flags, unsigned region,
else if (op == REQ_OP_WRITE_SAME)
special_cmd_max_sectors = q->limits.max_write_same_sectors;
if ((op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES ||
- op == REQ_OP_WRITE_SAME) &&
- special_cmd_max_sectors == 0) {
+ op == REQ_OP_WRITE_SAME) && special_cmd_max_sectors == 0) {
+ atomic_inc(&io->count);
dec_count(io, region, -EOPNOTSUPP);
return;
}
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 7d893228c40f..b4b75dad816a 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -1927,7 +1927,7 @@ struct dm_raid_superblock {
/********************************************************************
* BELOW FOLLOW V1.9.0 EXTENSIONS TO THE PRISTINE SUPERBLOCK FORMAT!!!
*
- * FEATURE_FLAG_SUPPORTS_V190 in the features member indicates that those exist
+ * FEATURE_FLAG_SUPPORTS_V190 in the compat_features member indicates that those exist
*/
__le32 flags; /* Flags defining array states for reshaping */
@@ -2092,6 +2092,11 @@ static void super_sync(struct mddev *mddev, struct md_rdev *rdev)
sb->layout = cpu_to_le32(mddev->layout);
sb->stripe_sectors = cpu_to_le32(mddev->chunk_sectors);
+ /********************************************************************
+ * BELOW FOLLOW V1.9.0 EXTENSIONS TO THE PRISTINE SUPERBLOCK FORMAT!!!
+ *
+ * FEATURE_FLAG_SUPPORTS_V190 in the compat_features member indicates that those exist
+ */
sb->new_level = cpu_to_le32(mddev->new_level);
sb->new_layout = cpu_to_le32(mddev->new_layout);
sb->new_stripe_sectors = cpu_to_le32(mddev->new_chunk_sectors);
@@ -2438,8 +2443,14 @@ static int super_validate(struct raid_set *rs, struct md_rdev *rdev)
mddev->bitmap_info.default_offset = mddev->bitmap_info.offset;
if (!test_and_clear_bit(FirstUse, &rdev->flags)) {
- /* Retrieve device size stored in superblock to be prepared for shrink */
- rdev->sectors = le64_to_cpu(sb->sectors);
+ /*
+ * Retrieve rdev size stored in superblock to be prepared for shrink.
+ * Check extended superblock members are present otherwise the size
+ * will not be set!
+ */
+ if (le32_to_cpu(sb->compat_features) & FEATURE_FLAG_SUPPORTS_V190)
+ rdev->sectors = le64_to_cpu(sb->sectors);
+
rdev->recovery_offset = le64_to_cpu(sb->disk_recovery_offset);
if (rdev->recovery_offset == MaxSector)
set_bit(In_sync, &rdev->flags);
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index e61c45047c25..4da8858856fb 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -145,6 +145,7 @@ static void dispatch_bios(void *context, struct bio_list *bio_list)
struct dm_raid1_bio_record {
struct mirror *m;
+ /* if details->bi_bdev == NULL, details were not saved */
struct dm_bio_details details;
region_t write_region;
};
@@ -1198,6 +1199,8 @@ static int mirror_map(struct dm_target *ti, struct bio *bio)
struct dm_raid1_bio_record *bio_record =
dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record));
+ bio_record->details.bi_bdev = NULL;
+
if (rw == WRITE) {
/* Save region for mirror_end_io() handler */
bio_record->write_region = dm_rh_bio_to_region(ms->rh, bio);
@@ -1256,12 +1259,22 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
}
if (error == -EOPNOTSUPP)
- return error;
+ goto out;
if ((error == -EWOULDBLOCK) && (bio->bi_opf & REQ_RAHEAD))
- return error;
+ goto out;
if (unlikely(error)) {
+ if (!bio_record->details.bi_bdev) {
+ /*
+ * There wasn't enough memory to record necessary
+ * information for a retry or there was no other
+ * mirror in-sync.
+ */
+ DMERR_LIMIT("Mirror read failed.");
+ return -EIO;
+ }
+
m = bio_record->m;
DMERR("Mirror read failed from %s. Trying alternative device.",
@@ -1277,6 +1290,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
bd = &bio_record->details;
dm_bio_restore(bd, bio);
+ bio_record->details.bi_bdev = NULL;
bio->bi_error = 0;
queue_bio(ms, bio, rw);
@@ -1285,6 +1299,9 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
DMERR("All replicated volumes dead, failing I/O");
}
+out:
+ bio_record->details.bi_bdev = NULL;
+
return error;
}
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 17ad50daed08..28808e5ec0fd 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -1094,6 +1094,19 @@ static void process_prepared_discard_passdown_pt1(struct dm_thin_new_mapping *m)
return;
}
+ /*
+ * Increment the unmapped blocks. This prevents a race between the
+ * passdown io and reallocation of freed blocks.
+ */
+ r = dm_pool_inc_data_range(pool->pmd, m->data_block, data_end);
+ if (r) {
+ metadata_operation_failed(pool, "dm_pool_inc_data_range", r);
+ bio_io_error(m->bio);
+ cell_defer_no_holder(tc, m->cell);
+ mempool_free(m, pool->mapping_pool);
+ return;
+ }
+
discard_parent = bio_alloc(GFP_NOIO, 1);
if (!discard_parent) {
DMWARN("%s: unable to allocate top level discard bio for passdown. Skipping passdown.",
@@ -1114,19 +1127,6 @@ static void process_prepared_discard_passdown_pt1(struct dm_thin_new_mapping *m)
end_discard(&op, r);
}
}
-
- /*
- * Increment the unmapped blocks. This prevents a race between the
- * passdown io and reallocation of freed blocks.
- */
- r = dm_pool_inc_data_range(pool->pmd, m->data_block, data_end);
- if (r) {
- metadata_operation_failed(pool, "dm_pool_inc_data_range", r);
- bio_io_error(m->bio);
- cell_defer_no_holder(tc, m->cell);
- mempool_free(m, pool->mapping_pool);
- return;
- }
}
static void process_prepared_discard_passdown_pt2(struct dm_thin_new_mapping *m)
diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c
index 75488e65cd96..8d46e3ad9529 100644
--- a/drivers/mfd/arizona-core.c
+++ b/drivers/mfd/arizona-core.c
@@ -245,8 +245,7 @@ static int arizona_poll_reg(struct arizona *arizona,
int ret;
ret = regmap_read_poll_timeout(arizona->regmap,
- ARIZONA_INTERRUPT_RAW_STATUS_5, val,
- ((val & mask) == target),
+ reg, val, ((val & mask) == target),
ARIZONA_REG_POLL_DELAY_US,
timeout_ms * 1000);
if (ret)
diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
index 1842ed341af1..de962c2d5e00 100644
--- a/drivers/mmc/host/meson-gx-mmc.c
+++ b/drivers/mmc/host/meson-gx-mmc.c
@@ -210,6 +210,15 @@ static void meson_mmc_get_transfer_mode(struct mmc_host *mmc,
int i;
bool use_desc_chain_mode = true;
+ /*
+ * Broken SDIO with AP6255-based WiFi on Khadas VIM Pro has been
+ * reported. For some strange reason this occurs in descriptor
+ * chain mode only. So let's fall back to bounce buffer mode
+ * for command SD_IO_RW_EXTENDED.
+ */
+ if (mrq->cmd->opcode == SD_IO_RW_EXTENDED)
+ return;
+
for_each_sg(data->sg, sg, data->sg_len, i)
/* check for 8 byte alignment */
if (sg->offset & 7) {
diff --git a/drivers/net/arcnet/arcdevice.h b/drivers/net/arcnet/arcdevice.h
index 20bfb9ba83ea..cbb4f8566bbe 100644
--- a/drivers/net/arcnet/arcdevice.h
+++ b/drivers/net/arcnet/arcdevice.h
@@ -269,6 +269,10 @@ struct arcnet_local {
struct timer_list timer;
+ struct net_device *dev;
+ int reply_status;
+ struct tasklet_struct reply_tasklet;
+
/*
* Buffer management: an ARCnet card has 4 x 512-byte buffers, each of
* which can be used for either sending or receiving. The new dynamic
diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c
index 62ee439d5882..fcfccbb3d9a2 100644
--- a/drivers/net/arcnet/arcnet.c
+++ b/drivers/net/arcnet/arcnet.c
@@ -51,6 +51,7 @@
#include <net/arp.h>
#include <linux/init.h>
#include <linux/jiffies.h>
+#include <linux/errqueue.h>
#include <linux/leds.h>
@@ -391,6 +392,52 @@ static void arcnet_timer(unsigned long data)
}
}
+static void arcnet_reply_tasklet(unsigned long data)
+{
+ struct arcnet_local *lp = (struct arcnet_local *)data;
+
+ struct sk_buff *ackskb, *skb;
+ struct sock_exterr_skb *serr;
+ struct sock *sk;
+ int ret;
+
+ local_irq_disable();
+ skb = lp->outgoing.skb;
+ if (!skb || !skb->sk) {
+ local_irq_enable();
+ return;
+ }
+
+ sock_hold(skb->sk);
+ sk = skb->sk;
+ ackskb = skb_clone_sk(skb);
+ sock_put(skb->sk);
+
+ if (!ackskb) {
+ local_irq_enable();
+ return;
+ }
+
+ serr = SKB_EXT_ERR(ackskb);
+ memset(serr, 0, sizeof(*serr));
+ serr->ee.ee_errno = ENOMSG;
+ serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS;
+ serr->ee.ee_data = skb_shinfo(skb)->tskey;
+ serr->ee.ee_info = lp->reply_status;
+
+ /* finally erasing outgoing skb */
+ dev_kfree_skb(lp->outgoing.skb);
+ lp->outgoing.skb = NULL;
+
+ ackskb->dev = lp->dev;
+
+ ret = sock_queue_err_skb(sk, ackskb);
+ if (ret)
+ kfree_skb(ackskb);
+
+ local_irq_enable();
+};
+
struct net_device *alloc_arcdev(const char *name)
{
struct net_device *dev;
@@ -401,6 +448,7 @@ struct net_device *alloc_arcdev(const char *name)
if (dev) {
struct arcnet_local *lp = netdev_priv(dev);
+ lp->dev = dev;
spin_lock_init(&lp->lock);
init_timer(&lp->timer);
lp->timer.data = (unsigned long) dev;
@@ -436,6 +484,9 @@ int arcnet_open(struct net_device *dev)
arc_cont(D_PROTO, "\n");
}
+ tasklet_init(&lp->reply_tasklet, arcnet_reply_tasklet,
+ (unsigned long)lp);
+
arc_printk(D_INIT, dev, "arcnet_open: resetting card.\n");
/* try to put the card in a defined state - if it fails the first
@@ -527,6 +578,8 @@ int arcnet_close(struct net_device *dev)
netif_stop_queue(dev);
netif_carrier_off(dev);
+ tasklet_kill(&lp->reply_tasklet);
+
/* flush TX and disable RX */
lp->hw.intmask(dev, 0);
lp->hw.command(dev, NOTXcmd); /* stop transmit */
@@ -635,13 +688,13 @@ netdev_tx_t arcnet_send_packet(struct sk_buff *skb,
txbuf = -1;
if (txbuf != -1) {
+ lp->outgoing.skb = skb;
if (proto->prepare_tx(dev, pkt, skb->len, txbuf) &&
!proto->ack_tx) {
/* done right away and we don't want to acknowledge
* the package later - forget about it now
*/
dev->stats.tx_bytes += skb->len;
- dev_kfree_skb(skb);
} else {
/* do it the 'split' way */
lp->outgoing.proto = proto;
@@ -756,6 +809,7 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id)
struct net_device *dev = dev_id;
struct arcnet_local *lp;
int recbuf, status, diagstatus, didsomething, boguscount;
+ unsigned long flags;
int retval = IRQ_NONE;
arc_printk(D_DURING, dev, "\n");
@@ -765,7 +819,7 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id)
lp = netdev_priv(dev);
BUG_ON(!lp);
- spin_lock(&lp->lock);
+ spin_lock_irqsave(&lp->lock, flags);
/* RESET flag was enabled - if device is not running, we must
* clear it right away (but nothing else).
@@ -774,7 +828,7 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id)
if (lp->hw.status(dev) & RESETflag)
lp->hw.command(dev, CFLAGScmd | RESETclear);
lp->hw.intmask(dev, 0);
- spin_unlock(&lp->lock);
+ spin_unlock_irqrestore(&lp->lock, flags);
return retval;
}
@@ -842,8 +896,16 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id)
/* a transmit finished, and we're interested in it. */
if ((status & lp->intmask & TXFREEflag) || lp->timed_out) {
+ int ackstatus;
lp->intmask &= ~(TXFREEflag | EXCNAKflag);
+ if (status & TXACKflag)
+ ackstatus = 2;
+ else if (lp->excnak_pending)
+ ackstatus = 1;
+ else
+ ackstatus = 0;
+
arc_printk(D_DURING, dev, "TX IRQ (stat=%Xh)\n",
status);
@@ -866,18 +928,11 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id)
if (lp->outgoing.proto &&
lp->outgoing.proto->ack_tx) {
- int ackstatus;
-
- if (status & TXACKflag)
- ackstatus = 2;
- else if (lp->excnak_pending)
- ackstatus = 1;
- else
- ackstatus = 0;
-
lp->outgoing.proto
->ack_tx(dev, ackstatus);
}
+ lp->reply_status = ackstatus;
+ tasklet_hi_schedule(&lp->reply_tasklet);
}
if (lp->cur_tx != -1)
release_arcbuf(dev, lp->cur_tx);
@@ -998,7 +1053,7 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id)
udelay(1);
lp->hw.intmask(dev, lp->intmask);
- spin_unlock(&lp->lock);
+ spin_unlock_irqrestore(&lp->lock, flags);
return retval;
}
EXPORT_SYMBOL(arcnet_interrupt);
diff --git a/drivers/net/arcnet/capmode.c b/drivers/net/arcnet/capmode.c
index a80f4eb9262d..b780be6f41ff 100644
--- a/drivers/net/arcnet/capmode.c
+++ b/drivers/net/arcnet/capmode.c
@@ -212,7 +212,7 @@ static int ack_tx(struct net_device *dev, int acked)
ackpkt->soft.cap.proto = 0; /* using protocol 0 for acknowledge */
ackpkt->soft.cap.mes.ack = acked;
- arc_printk(D_PROTO, dev, "Ackknowledge for cap packet %x.\n",
+ arc_printk(D_PROTO, dev, "Acknowledge for cap packet %x.\n",
*((int *)&ackpkt->soft.cap.cookie[0]));
ackskb->protocol = cpu_to_be16(ETH_P_ARCNET);
diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c
index 239de38fbd6a..2d956cb59d06 100644
--- a/drivers/net/arcnet/com20020-pci.c
+++ b/drivers/net/arcnet/com20020-pci.c
@@ -93,6 +93,27 @@ static void led_recon_set(struct led_classdev *led_cdev,
outb(!!value, priv->misc + ci->leds[card->index].red);
}
+static ssize_t backplane_mode_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct net_device *net_dev = to_net_dev(dev);
+ struct arcnet_local *lp = netdev_priv(net_dev);
+
+ return sprintf(buf, "%s\n", lp->backplane ? "true" : "false");
+}
+static DEVICE_ATTR_RO(backplane_mode);
+
+static struct attribute *com20020_state_attrs[] = {
+ &dev_attr_backplane_mode.attr,
+ NULL,
+};
+
+static struct attribute_group com20020_state_group = {
+ .name = NULL,
+ .attrs = com20020_state_attrs,
+};
+
static void com20020pci_remove(struct pci_dev *pdev);
static int com20020pci_probe(struct pci_dev *pdev,
@@ -135,6 +156,7 @@ static int com20020pci_probe(struct pci_dev *pdev,
for (i = 0; i < ci->devcount; i++) {
struct com20020_pci_channel_map *cm = &ci->chan_map_tbl[i];
struct com20020_dev *card;
+ int dev_id_mask = 0xf;
dev = alloc_arcdev(device);
if (!dev) {
@@ -166,8 +188,10 @@ static int com20020pci_probe(struct pci_dev *pdev,
arcnet_outb(0x00, ioaddr, COM20020_REG_W_COMMAND);
arcnet_inb(ioaddr, COM20020_REG_R_DIAGSTAT);
+ SET_NETDEV_DEV(dev, &pdev->dev);
dev->base_addr = ioaddr;
dev->dev_addr[0] = node;
+ dev->sysfs_groups[0] = &com20020_state_group;
dev->irq = pdev->irq;
lp->card_name = "PCI COM20020";
lp->card_flags = ci->flags;
@@ -177,10 +201,15 @@ static int com20020pci_probe(struct pci_dev *pdev,
lp->timeout = timeout;
lp->hw.owner = THIS_MODULE;
+ lp->backplane = (inb(priv->misc) >> (2 + i)) & 0x1;
+
+ if (!strncmp(ci->name, "EAE PLX-PCI FB2", 15))
+ lp->backplane = 1;
+
/* Get the dev_id from the PLX rotary coder */
if (!strncmp(ci->name, "EAE PLX-PCI MA1", 15))
- dev->dev_id = 0xc;
- dev->dev_id ^= inb(priv->misc + ci->rotary) >> 4;
+ dev_id_mask = 0x3;
+ dev->dev_id = (inb(priv->misc + ci->rotary) >> 4) & dev_id_mask;
snprintf(dev->name, sizeof(dev->name), "arc%d-%d", dev->dev_id, i);
@@ -361,6 +390,31 @@ static struct com20020_pci_card_info card_info_eae_ma1 = {
.flags = ARC_CAN_10MBIT,
};
+static struct com20020_pci_card_info card_info_eae_fb2 = {
+ .name = "EAE PLX-PCI FB2",
+ .devcount = 1,
+ .chan_map_tbl = {
+ {
+ .bar = 2,
+ .offset = 0x00,
+ .size = 0x08,
+ },
+ },
+ .misc_map = {
+ .bar = 2,
+ .offset = 0x10,
+ .size = 0x04,
+ },
+ .leds = {
+ {
+ .green = 0x0,
+ .red = 0x1,
+ },
+ },
+ .rotary = 0x0,
+ .flags = ARC_CAN_10MBIT,
+};
+
static const struct pci_device_id com20020pci_id_table[] = {
{
0x1571, 0xa001,
@@ -507,6 +561,12 @@ static const struct pci_device_id com20020pci_id_table[] = {
(kernel_ulong_t)&card_info_eae_ma1
},
{
+ 0x10B5, 0x9050,
+ 0x10B5, 0x3294,
+ 0, 0,
+ (kernel_ulong_t)&card_info_eae_fb2
+ },
+ {
0x14BA, 0x6000,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
diff --git a/drivers/net/arcnet/com20020.c b/drivers/net/arcnet/com20020.c
index 13d9ad4b3f5c..78043a9c5981 100644
--- a/drivers/net/arcnet/com20020.c
+++ b/drivers/net/arcnet/com20020.c
@@ -246,8 +246,6 @@ int com20020_found(struct net_device *dev, int shared)
return -ENODEV;
}
- dev->base_addr = ioaddr;
-
arc_printk(D_NORMAL, dev, "%s: station %02Xh found at %03lXh, IRQ %d.\n",
lp->card_name, dev->dev_addr[0], dev->base_addr, dev->irq);
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index b796db7dd621..c02cc817a490 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -925,7 +925,6 @@ static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[],
struct learning_pkt pkt;
struct sk_buff *skb;
int size = sizeof(struct learning_pkt);
- char *data;
memset(&pkt, 0, size);
ether_addr_copy(pkt.mac_dst, mac_addr);
@@ -936,7 +935,7 @@ static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[],
if (!skb)
return;
- data = skb_put_data(skb, &pkt, size);
+ skb_put_data(skb, &pkt, size);
skb_reset_mac_header(skb);
skb->network_header = skb->mac_header + ETH_HLEN;
diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
index 47a8103610bc..a1b33aa6054a 100644
--- a/drivers/net/bonding/bond_netlink.c
+++ b/drivers/net/bonding/bond_netlink.c
@@ -118,7 +118,8 @@ static const struct nla_policy bond_slave_policy[IFLA_BOND_SLAVE_MAX + 1] = {
[IFLA_BOND_SLAVE_QUEUE_ID] = { .type = NLA_U16 },
};
-static int bond_validate(struct nlattr *tb[], struct nlattr *data[])
+static int bond_validate(struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
{
if (tb[IFLA_ADDRESS]) {
if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
@@ -131,7 +132,8 @@ static int bond_validate(struct nlattr *tb[], struct nlattr *data[])
static int bond_slave_changelink(struct net_device *bond_dev,
struct net_device *slave_dev,
- struct nlattr *tb[], struct nlattr *data[])
+ struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
{
struct bonding *bond = netdev_priv(bond_dev);
struct bond_opt_value newval;
@@ -156,8 +158,9 @@ static int bond_slave_changelink(struct net_device *bond_dev,
return 0;
}
-static int bond_changelink(struct net_device *bond_dev,
- struct nlattr *tb[], struct nlattr *data[])
+static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
+ struct nlattr *data[],
+ struct netlink_ext_ack *extack)
{
struct bonding *bond = netdev_priv(bond_dev);
struct bond_opt_value newval;
@@ -438,11 +441,12 @@ static int bond_changelink(struct net_device *bond_dev,
}
static int bond_newlink(struct net *src_net, struct net_device *bond_dev,
- struct nlattr *tb[], struct nlattr *data[])
+ struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
{
int err;
- err = bond_changelink(bond_dev, tb, data);
+ err = bond_changelink(bond_dev, tb, data, extack);
if (err < 0)
return err;
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index 8ca683396fcc..a12d603d41c6 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -464,7 +464,7 @@ const struct bond_opt_value *bond_opt_get_val(unsigned int option, u64 val)
/* Searches for a value in opt's values[] table which matches the flagmask */
static const struct bond_opt_value *bond_opt_get_flags(const struct bond_option *opt,
- u32 flagmask)
+ u32 flagmask)
{
int i;
@@ -744,14 +744,14 @@ static int bond_option_mode_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
if (!bond_mode_uses_arp(newval->value) && bond->params.arp_interval) {
- netdev_info(bond->dev, "%s mode is incompatible with arp monitoring, start mii monitoring\n",
- newval->string);
+ netdev_dbg(bond->dev, "%s mode is incompatible with arp monitoring, start mii monitoring\n",
+ newval->string);
/* disable arp monitoring */
bond->params.arp_interval = 0;
/* set miimon to default value */
bond->params.miimon = BOND_DEFAULT_MIIMON;
- netdev_info(bond->dev, "Setting MII monitoring interval to %d\n",
- bond->params.miimon);
+ netdev_dbg(bond->dev, "Setting MII monitoring interval to %d\n",
+ bond->params.miimon);
}
/* don't cache arp_validate between modes */
@@ -794,7 +794,7 @@ static int bond_option_active_slave_set(struct bonding *bond,
block_netpoll_tx();
/* check to see if we are clearing active */
if (!slave_dev) {
- netdev_info(bond->dev, "Clearing current active slave\n");
+ netdev_dbg(bond->dev, "Clearing current active slave\n");
RCU_INIT_POINTER(bond->curr_active_slave, NULL);
bond_select_active_slave(bond);
} else {
@@ -805,13 +805,13 @@ static int bond_option_active_slave_set(struct bonding *bond,
if (new_active == old_active) {
/* do nothing */
- netdev_info(bond->dev, "%s is already the current active slave\n",
- new_active->dev->name);
+ netdev_dbg(bond->dev, "%s is already the current active slave\n",
+ new_active->dev->name);
} else {
if (old_active && (new_active->link == BOND_LINK_UP) &&
bond_slave_is_up(new_active)) {
- netdev_info(bond->dev, "Setting %s as active slave\n",
- new_active->dev->name);
+ netdev_dbg(bond->dev, "Setting %s as active slave\n",
+ new_active->dev->name);
bond_change_active_slave(bond, new_active);
} else {
netdev_err(bond->dev, "Could not set %s as active slave; either %s is down or the link is down\n",
@@ -833,17 +833,17 @@ static int bond_option_active_slave_set(struct bonding *bond,
static int bond_option_miimon_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
- netdev_info(bond->dev, "Setting MII monitoring interval to %llu\n",
- newval->value);
+ netdev_dbg(bond->dev, "Setting MII monitoring interval to %llu\n",
+ newval->value);
bond->params.miimon = newval->value;
if (bond->params.updelay)
- netdev_info(bond->dev, "Note: Updating updelay (to %d) since it is a multiple of the miimon value\n",
- bond->params.updelay * bond->params.miimon);
+ netdev_dbg(bond->dev, "Note: Updating updelay (to %d) since it is a multiple of the miimon value\n",
+ bond->params.updelay * bond->params.miimon);
if (bond->params.downdelay)
- netdev_info(bond->dev, "Note: Updating downdelay (to %d) since it is a multiple of the miimon value\n",
- bond->params.downdelay * bond->params.miimon);
+ netdev_dbg(bond->dev, "Note: Updating downdelay (to %d) since it is a multiple of the miimon value\n",
+ bond->params.downdelay * bond->params.miimon);
if (newval->value && bond->params.arp_interval) {
- netdev_info(bond->dev, "MII monitoring cannot be used with ARP monitoring - disabling ARP monitoring...\n");
+ netdev_dbg(bond->dev, "MII monitoring cannot be used with ARP monitoring - disabling ARP monitoring...\n");
bond->params.arp_interval = 0;
if (bond->params.arp_validate)
bond->params.arp_validate = BOND_ARP_VALIDATE_NONE;
@@ -885,8 +885,8 @@ static int bond_option_updelay_set(struct bonding *bond,
bond->params.miimon);
}
bond->params.updelay = value / bond->params.miimon;
- netdev_info(bond->dev, "Setting up delay to %d\n",
- bond->params.updelay * bond->params.miimon);
+ netdev_dbg(bond->dev, "Setting up delay to %d\n",
+ bond->params.updelay * bond->params.miimon);
return 0;
}
@@ -907,8 +907,8 @@ static int bond_option_downdelay_set(struct bonding *bond,
bond->params.miimon);
}
bond->params.downdelay = value / bond->params.miimon;
- netdev_info(bond->dev, "Setting down delay to %d\n",
- bond->params.downdelay * bond->params.miimon);
+ netdev_dbg(bond->dev, "Setting down delay to %d\n",
+ bond->params.downdelay * bond->params.miimon);
return 0;
}
@@ -916,8 +916,8 @@ static int bond_option_downdelay_set(struct bonding *bond,
static int bond_option_use_carrier_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
- netdev_info(bond->dev, "Setting use_carrier to %llu\n",
- newval->value);
+ netdev_dbg(bond->dev, "Setting use_carrier to %llu\n",
+ newval->value);
bond->params.use_carrier = newval->value;
return 0;
@@ -930,16 +930,16 @@ static int bond_option_use_carrier_set(struct bonding *bond,
static int bond_option_arp_interval_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
- netdev_info(bond->dev, "Setting ARP monitoring interval to %llu\n",
- newval->value);
+ netdev_dbg(bond->dev, "Setting ARP monitoring interval to %llu\n",
+ newval->value);
bond->params.arp_interval = newval->value;
if (newval->value) {
if (bond->params.miimon) {
- netdev_info(bond->dev, "ARP monitoring cannot be used with MII monitoring. Disabling MII monitoring\n");
+ netdev_dbg(bond->dev, "ARP monitoring cannot be used with MII monitoring. Disabling MII monitoring\n");
bond->params.miimon = 0;
}
if (!bond->params.arp_targets[0])
- netdev_info(bond->dev, "ARP monitoring has been set up, but no ARP targets have been specified\n");
+ netdev_dbg(bond->dev, "ARP monitoring has been set up, but no ARP targets have been specified\n");
}
if (bond->dev->flags & IFF_UP) {
/* If the interface is up, we may need to fire off
@@ -1000,7 +1000,7 @@ static int _bond_option_arp_ip_target_add(struct bonding *bond, __be32 target)
return -EINVAL;
}
- netdev_info(bond->dev, "Adding ARP target %pI4\n", &target);
+ netdev_dbg(bond->dev, "Adding ARP target %pI4\n", &target);
_bond_options_arp_ip_target_set(bond, ind, target, jiffies);
@@ -1036,7 +1036,7 @@ static int bond_option_arp_ip_target_rem(struct bonding *bond, __be32 target)
if (ind == 0 && !targets[1] && bond->params.arp_interval)
netdev_warn(bond->dev, "Removing last arp target with arp_interval on\n");
- netdev_info(bond->dev, "Removing ARP target %pI4\n", &target);
+ netdev_dbg(bond->dev, "Removing ARP target %pI4\n", &target);
bond_for_each_slave(bond, slave, iter) {
targets_rx = slave->target_last_arp_rx;
@@ -1088,8 +1088,8 @@ static int bond_option_arp_ip_targets_set(struct bonding *bond,
static int bond_option_arp_validate_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
- netdev_info(bond->dev, "Setting arp_validate to %s (%llu)\n",
- newval->string, newval->value);
+ netdev_dbg(bond->dev, "Setting arp_validate to %s (%llu)\n",
+ newval->string, newval->value);
if (bond->dev->flags & IFF_UP) {
if (!newval->value)
@@ -1105,8 +1105,8 @@ static int bond_option_arp_validate_set(struct bonding *bond,
static int bond_option_arp_all_targets_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
- netdev_info(bond->dev, "Setting arp_all_targets to %s (%llu)\n",
- newval->string, newval->value);
+ netdev_dbg(bond->dev, "Setting arp_all_targets to %s (%llu)\n",
+ newval->string, newval->value);
bond->params.arp_all_targets = newval->value;
return 0;
@@ -1126,7 +1126,7 @@ static int bond_option_primary_set(struct bonding *bond,
*p = '\0';
/* check to see if we are clearing primary */
if (!strlen(primary)) {
- netdev_info(bond->dev, "Setting primary slave to None\n");
+ netdev_dbg(bond->dev, "Setting primary slave to None\n");
RCU_INIT_POINTER(bond->primary_slave, NULL);
memset(bond->params.primary, 0, sizeof(bond->params.primary));
bond_select_active_slave(bond);
@@ -1135,8 +1135,8 @@ static int bond_option_primary_set(struct bonding *bond,
bond_for_each_slave(bond, slave, iter) {
if (strncmp(slave->dev->name, primary, IFNAMSIZ) == 0) {
- netdev_info(bond->dev, "Setting %s as primary slave\n",
- slave->dev->name);
+ netdev_dbg(bond->dev, "Setting %s as primary slave\n",
+ slave->dev->name);
rcu_assign_pointer(bond->primary_slave, slave);
strcpy(bond->params.primary, slave->dev->name);
bond_select_active_slave(bond);
@@ -1145,15 +1145,15 @@ static int bond_option_primary_set(struct bonding *bond,
}
if (rtnl_dereference(bond->primary_slave)) {
- netdev_info(bond->dev, "Setting primary slave to None\n");
+ netdev_dbg(bond->dev, "Setting primary slave to None\n");
RCU_INIT_POINTER(bond->primary_slave, NULL);
bond_select_active_slave(bond);
}
strncpy(bond->params.primary, primary, IFNAMSIZ);
bond->params.primary[IFNAMSIZ - 1] = 0;
- netdev_info(bond->dev, "Recording %s as primary, but it has not been enslaved to %s yet\n",
- primary, bond->dev->name);
+ netdev_dbg(bond->dev, "Recording %s as primary, but it has not been enslaved to %s yet\n",
+ primary, bond->dev->name);
out:
unblock_netpoll_tx();
@@ -1164,8 +1164,8 @@ out:
static int bond_option_primary_reselect_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
- netdev_info(bond->dev, "Setting primary_reselect to %s (%llu)\n",
- newval->string, newval->value);
+ netdev_dbg(bond->dev, "Setting primary_reselect to %s (%llu)\n",
+ newval->string, newval->value);
bond->params.primary_reselect = newval->value;
block_netpoll_tx();
@@ -1178,8 +1178,8 @@ static int bond_option_primary_reselect_set(struct bonding *bond,
static int bond_option_fail_over_mac_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
- netdev_info(bond->dev, "Setting fail_over_mac to %s (%llu)\n",
- newval->string, newval->value);
+ netdev_dbg(bond->dev, "Setting fail_over_mac to %s (%llu)\n",
+ newval->string, newval->value);
bond->params.fail_over_mac = newval->value;
return 0;
@@ -1188,8 +1188,8 @@ static int bond_option_fail_over_mac_set(struct bonding *bond,
static int bond_option_xmit_hash_policy_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
- netdev_info(bond->dev, "Setting xmit hash policy to %s (%llu)\n",
- newval->string, newval->value);
+ netdev_dbg(bond->dev, "Setting xmit hash policy to %s (%llu)\n",
+ newval->string, newval->value);
bond->params.xmit_policy = newval->value;
return 0;
@@ -1198,8 +1198,8 @@ static int bond_option_xmit_hash_policy_set(struct bonding *bond,
static int bond_option_resend_igmp_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
- netdev_info(bond->dev, "Setting resend_igmp to %llu\n",
- newval->value);
+ netdev_dbg(bond->dev, "Setting resend_igmp to %llu\n",
+ newval->value);
bond->params.resend_igmp = newval->value;
return 0;
@@ -1237,8 +1237,8 @@ static int bond_option_all_slaves_active_set(struct bonding *bond,
static int bond_option_min_links_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
- netdev_info(bond->dev, "Setting min links value to %llu\n",
- newval->value);
+ netdev_dbg(bond->dev, "Setting min links value to %llu\n",
+ newval->value);
bond->params.min_links = newval->value;
bond_set_carrier(bond);
@@ -1256,6 +1256,8 @@ static int bond_option_lp_interval_set(struct bonding *bond,
static int bond_option_pps_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
+ netdev_dbg(bond->dev, "Setting packets per slave to %llu\n",
+ newval->value);
bond->params.packets_per_slave = newval->value;
if (newval->value > 0) {
bond->params.reciprocal_packets_per_slave =
@@ -1274,8 +1276,8 @@ static int bond_option_pps_set(struct bonding *bond,
static int bond_option_lacp_rate_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
- netdev_info(bond->dev, "Setting LACP rate to %s (%llu)\n",
- newval->string, newval->value);
+ netdev_dbg(bond->dev, "Setting LACP rate to %s (%llu)\n",
+ newval->string, newval->value);
bond->params.lacp_fast = newval->value;
bond_3ad_update_lacp_rate(bond);
@@ -1285,8 +1287,8 @@ static int bond_option_lacp_rate_set(struct bonding *bond,
static int bond_option_ad_select_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
- netdev_info(bond->dev, "Setting ad_select to %s (%llu)\n",
- newval->string, newval->value);
+ netdev_dbg(bond->dev, "Setting ad_select to %s (%llu)\n",
+ newval->string, newval->value);
bond->params.ad_select = newval->value;
return 0;
@@ -1347,7 +1349,7 @@ out:
return ret;
err_no_cmd:
- netdev_info(bond->dev, "invalid input for queue_id set\n");
+ netdev_dbg(bond->dev, "invalid input for queue_id set\n");
ret = -EPERM;
goto out;
@@ -1369,20 +1371,20 @@ static int bond_option_slaves_set(struct bonding *bond,
dev = __dev_get_by_name(dev_net(bond->dev), ifname);
if (!dev) {
- netdev_info(bond->dev, "interface %s does not exist!\n",
- ifname);
+ netdev_dbg(bond->dev, "interface %s does not exist!\n",
+ ifname);
ret = -ENODEV;
goto out;
}
switch (command[0]) {
case '+':
- netdev_info(bond->dev, "Adding slave %s\n", dev->name);
+ netdev_dbg(bond->dev, "Adding slave %s\n", dev->name);
ret = bond_enslave(bond->dev, dev);
break;
case '-':
- netdev_info(bond->dev, "Removing slave %s\n", dev->name);
+ netdev_dbg(bond->dev, "Removing slave %s\n", dev->name);
ret = bond_release(bond->dev, dev);
break;
@@ -1402,8 +1404,8 @@ err_no_cmd:
static int bond_option_tlb_dynamic_lb_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
- netdev_info(bond->dev, "Setting dynamic-lb to %s (%llu)\n",
- newval->string, newval->value);
+ netdev_dbg(bond->dev, "Setting dynamic-lb to %s (%llu)\n",
+ newval->string, newval->value);
bond->params.tlb_dynamic_lb = newval->value;
return 0;
@@ -1412,8 +1414,8 @@ static int bond_option_tlb_dynamic_lb_set(struct bonding *bond,
static int bond_option_ad_actor_sys_prio_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
- netdev_info(bond->dev, "Setting ad_actor_sys_prio to %llu\n",
- newval->value);
+ netdev_dbg(bond->dev, "Setting ad_actor_sys_prio to %llu\n",
+ newval->value);
bond->params.ad_actor_sys_prio = newval->value;
bond_3ad_update_ad_actor_settings(bond);
@@ -1442,7 +1444,7 @@ static int bond_option_ad_actor_system_set(struct bonding *bond,
if (!is_valid_ether_addr(mac))
goto err;
- netdev_info(bond->dev, "Setting ad_actor_system to %pM\n", mac);
+ netdev_dbg(bond->dev, "Setting ad_actor_system to %pM\n", mac);
ether_addr_copy(bond->params.ad_actor_system, mac);
bond_3ad_update_ad_actor_settings(bond);
@@ -1456,8 +1458,8 @@ err:
static int bond_option_ad_user_port_key_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
- netdev_info(bond->dev, "Setting ad_user_port_key to %llu\n",
- newval->value);
+ netdev_dbg(bond->dev, "Setting ad_user_port_key to %llu\n",
+ newval->value);
bond->params.ad_user_port_key = newval->value;
return 0;
diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
index 4534326e20ac..438966bf51c2 100644
--- a/drivers/net/caif/caif_hsi.c
+++ b/drivers/net/caif/caif_hsi.c
@@ -426,7 +426,6 @@ static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
/* Check for embedded CAIF frame. */
if (desc->offset) {
struct sk_buff *skb;
- u8 *dst = NULL;
int len = 0;
pfrm = ((u8 *)desc) + desc->offset;
@@ -454,7 +453,7 @@ static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
}
caif_assert(skb != NULL);
- dst = skb_put_data(skb, pfrm, len);
+ skb_put_data(skb, pfrm, len);
skb->protocol = htons(ETH_P_CAIF);
skb_reset_mac_header(skb);
@@ -555,7 +554,6 @@ static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
/* Parse payload. */
while (nfrms < CFHSI_MAX_PKTS && *plen) {
struct sk_buff *skb;
- u8 *dst = NULL;
u8 *pcffrm = NULL;
int len;
@@ -584,7 +582,7 @@ static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
}
caif_assert(skb != NULL);
- dst = skb_put_data(skb, pcffrm, len);
+ skb_put_data(skb, pcffrm, len);
skb->protocol = htons(ETH_P_CAIF);
skb_reset_mac_header(skb);
@@ -1354,7 +1352,8 @@ static void cfhsi_netlink_parms(struct nlattr *data[], struct cfhsi *cfhsi)
}
static int caif_hsi_changelink(struct net_device *dev, struct nlattr *tb[],
- struct nlattr *data[])
+ struct nlattr *data[],
+ struct netlink_ext_ack *extack)
{
cfhsi_netlink_parms(data, netdev_priv(dev));
netdev_state_change(dev);
@@ -1401,7 +1400,8 @@ static int caif_hsi_fill_info(struct sk_buff *skb, const struct net_device *dev)
}
static int caif_hsi_newlink(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[])
+ struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
{
struct cfhsi *cfhsi = NULL;
struct cfhsi_ops *(*get_ops)(void);
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
index 5c57be2082ba..709838e4c062 100644
--- a/drivers/net/caif/caif_serial.c
+++ b/drivers/net/caif/caif_serial.c
@@ -171,7 +171,6 @@ static void ldisc_receive(struct tty_struct *tty, const u8 *data,
struct sk_buff *skb = NULL;
struct ser_device *ser;
int ret;
- u8 *p;
ser = tty->disc_data;
@@ -198,7 +197,7 @@ static void ldisc_receive(struct tty_struct *tty, const u8 *data,
skb = netdev_alloc_skb(ser->dev, count+1);
if (skb == NULL)
return;
- p = skb_put_data(skb, data, count);
+ skb_put_data(skb, data, count);
skb->protocol = htons(ETH_P_CAIF);
skb_reset_mac_header(skb);
diff --git a/drivers/net/caif/caif_spi.c b/drivers/net/caif/caif_spi.c
index 24a5f5ca2037..207cb8423de0 100644
--- a/drivers/net/caif/caif_spi.c
+++ b/drivers/net/caif/caif_spi.c
@@ -526,7 +526,6 @@ int cfspi_rxfrm(struct cfspi *cfspi, u8 *buf, size_t len)
struct sk_buff *skb = NULL;
int spad = 0;
int epad = 0;
- u8 *dst = NULL;
int pkt_len = 0;
/*
@@ -548,7 +547,7 @@ int cfspi_rxfrm(struct cfspi *cfspi, u8 *buf, size_t len)
skb = netdev_alloc_skb(cfspi->ndev, pkt_len + 1);
caif_assert(skb != NULL);
- dst = skb_put_data(skb, src, pkt_len);
+ skb_put_data(skb, src, pkt_len);
src += pkt_len;
skb->protocol = htons(ETH_P_CAIF);
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index a3011c001080..365a8cc62405 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -848,7 +848,8 @@ static const struct nla_policy can_policy[IFLA_CAN_MAX + 1] = {
= { .len = sizeof(struct can_bittiming_const) },
};
-static int can_validate(struct nlattr *tb[], struct nlattr *data[])
+static int can_validate(struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
{
bool is_can_fd = false;
@@ -880,8 +881,9 @@ static int can_validate(struct nlattr *tb[], struct nlattr *data[])
return 0;
}
-static int can_changelink(struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[])
+static int can_changelink(struct net_device *dev, struct nlattr *tb[],
+ struct nlattr *data[],
+ struct netlink_ext_ack *extack)
{
struct can_priv *priv = netdev_priv(dev);
int err;
@@ -1146,7 +1148,8 @@ nla_put_failure:
}
static int can_newlink(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[])
+ struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
{
return -EOPNOTSUPP;
}
diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c
index cfe889e8f172..8404e8852a0f 100644
--- a/drivers/net/can/vxcan.c
+++ b/drivers/net/can/vxcan.c
@@ -163,7 +163,8 @@ static void vxcan_setup(struct net_device *dev)
static struct rtnl_link_ops vxcan_link_ops;
static int vxcan_newlink(struct net *net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[])
+ struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
{
struct vxcan_priv *priv;
struct net_device *peer;
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 76e98e8ed315..648f91b58d1e 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -498,10 +498,8 @@ static void bcm_sf2_identify_ports(struct bcm_sf2_priv *priv,
struct device_node *dn)
{
struct device_node *port;
- const char *phy_mode_str;
int mode;
unsigned int port_num;
- int ret;
priv->moca_port = -1;
@@ -515,15 +513,11 @@ static void bcm_sf2_identify_ports(struct bcm_sf2_priv *priv,
* time
*/
mode = of_get_phy_mode(port);
- if (mode < 0) {
- ret = of_property_read_string(port, "phy-mode",
- &phy_mode_str);
- if (ret < 0)
- continue;
-
- if (!strcasecmp(phy_mode_str, "internal"))
- priv->int_phy_mask |= 1 << port_num;
- }
+ if (mode < 0)
+ continue;
+
+ if (mode == PHY_INTERFACE_MODE_INTERNAL)
+ priv->int_phy_mask |= 1 << port_num;
if (mode == PHY_INTERFACE_MODE_MOCA)
priv->moca_port = port_num;
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 19772e3dd67e..53b088166c28 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -941,6 +941,26 @@ static int mv88e6xxx_atu_setup(struct mv88e6xxx_chip *chip)
return mv88e6xxx_g1_atu_set_age_time(chip, 300000);
}
+static int mv88e6xxx_irl_setup(struct mv88e6xxx_chip *chip)
+{
+ int port;
+ int err;
+
+ if (!chip->info->ops->irl_init_all)
+ return 0;
+
+ for (port = 0; port < mv88e6xxx_num_ports(chip); port++) {
+ /* Disable ingress rate limiting by resetting all per port
+ * ingress rate limit resources to their initial state.
+ */
+ err = chip->info->ops->irl_init_all(chip, port);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
static int mv88e6xxx_pvt_map(struct mv88e6xxx_chip *chip, int dev, int port)
{
u16 pvlan = 0;
@@ -2102,6 +2122,10 @@ static int mv88e6xxx_setup(struct dsa_switch *ds)
goto unlock;
}
+ err = mv88e6xxx_irl_setup(chip);
+ if (err)
+ goto unlock;
+
err = mv88e6xxx_phy_setup(chip);
if (err)
goto unlock;
@@ -2339,6 +2363,7 @@ static int mv88e6xxx_set_eeprom(struct dsa_switch *ds,
static const struct mv88e6xxx_ops mv88e6085_ops = {
/* MV88E6XXX_FAMILY_6097 */
+ .irl_init_all = mv88e6352_g2_irl_init_all,
.set_switch_mac = mv88e6xxx_g1_set_switch_mac,
.phy_read = mv88e6185_phy_ppu_read,
.phy_write = mv88e6185_phy_ppu_write,
@@ -2393,6 +2418,7 @@ static const struct mv88e6xxx_ops mv88e6095_ops = {
static const struct mv88e6xxx_ops mv88e6097_ops = {
/* MV88E6XXX_FAMILY_6097 */
+ .irl_init_all = mv88e6352_g2_irl_init_all,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
@@ -2423,6 +2449,7 @@ static const struct mv88e6xxx_ops mv88e6097_ops = {
static const struct mv88e6xxx_ops mv88e6123_ops = {
/* MV88E6XXX_FAMILY_6165 */
+ .irl_init_all = mv88e6352_g2_irl_init_all,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
@@ -2479,6 +2506,7 @@ static const struct mv88e6xxx_ops mv88e6131_ops = {
static const struct mv88e6xxx_ops mv88e6141_ops = {
/* MV88E6XXX_FAMILY_6341 */
+ .irl_init_all = mv88e6352_g2_irl_init_all,
.get_eeprom = mv88e6xxx_g2_get_eeprom8,
.set_eeprom = mv88e6xxx_g2_set_eeprom8,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
@@ -2512,6 +2540,7 @@ static const struct mv88e6xxx_ops mv88e6141_ops = {
static const struct mv88e6xxx_ops mv88e6161_ops = {
/* MV88E6XXX_FAMILY_6165 */
+ .irl_init_all = mv88e6352_g2_irl_init_all,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
@@ -2542,6 +2571,7 @@ static const struct mv88e6xxx_ops mv88e6161_ops = {
static const struct mv88e6xxx_ops mv88e6165_ops = {
/* MV88E6XXX_FAMILY_6165 */
+ .irl_init_all = mv88e6352_g2_irl_init_all,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
.phy_read = mv88e6165_phy_read,
.phy_write = mv88e6165_phy_write,
@@ -2565,6 +2595,7 @@ static const struct mv88e6xxx_ops mv88e6165_ops = {
static const struct mv88e6xxx_ops mv88e6171_ops = {
/* MV88E6XXX_FAMILY_6351 */
+ .irl_init_all = mv88e6352_g2_irl_init_all,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
@@ -2596,6 +2627,7 @@ static const struct mv88e6xxx_ops mv88e6171_ops = {
static const struct mv88e6xxx_ops mv88e6172_ops = {
/* MV88E6XXX_FAMILY_6352 */
+ .irl_init_all = mv88e6352_g2_irl_init_all,
.get_eeprom = mv88e6xxx_g2_get_eeprom16,
.set_eeprom = mv88e6xxx_g2_set_eeprom16,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
@@ -2630,6 +2662,7 @@ static const struct mv88e6xxx_ops mv88e6172_ops = {
static const struct mv88e6xxx_ops mv88e6175_ops = {
/* MV88E6XXX_FAMILY_6351 */
+ .irl_init_all = mv88e6352_g2_irl_init_all,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
@@ -2661,6 +2694,7 @@ static const struct mv88e6xxx_ops mv88e6175_ops = {
static const struct mv88e6xxx_ops mv88e6176_ops = {
/* MV88E6XXX_FAMILY_6352 */
+ .irl_init_all = mv88e6352_g2_irl_init_all,
.get_eeprom = mv88e6xxx_g2_get_eeprom16,
.set_eeprom = mv88e6xxx_g2_set_eeprom16,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
@@ -2722,6 +2756,7 @@ static const struct mv88e6xxx_ops mv88e6185_ops = {
static const struct mv88e6xxx_ops mv88e6190_ops = {
/* MV88E6XXX_FAMILY_6390 */
+ .irl_init_all = mv88e6390_g2_irl_init_all,
.get_eeprom = mv88e6xxx_g2_get_eeprom8,
.set_eeprom = mv88e6xxx_g2_set_eeprom8,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
@@ -2755,6 +2790,7 @@ static const struct mv88e6xxx_ops mv88e6190_ops = {
static const struct mv88e6xxx_ops mv88e6190x_ops = {
/* MV88E6XXX_FAMILY_6390 */
+ .irl_init_all = mv88e6390_g2_irl_init_all,
.get_eeprom = mv88e6xxx_g2_get_eeprom8,
.set_eeprom = mv88e6xxx_g2_set_eeprom8,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
@@ -2788,6 +2824,7 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = {
static const struct mv88e6xxx_ops mv88e6191_ops = {
/* MV88E6XXX_FAMILY_6390 */
+ .irl_init_all = mv88e6390_g2_irl_init_all,
.get_eeprom = mv88e6xxx_g2_get_eeprom8,
.set_eeprom = mv88e6xxx_g2_set_eeprom8,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
@@ -2821,6 +2858,7 @@ static const struct mv88e6xxx_ops mv88e6191_ops = {
static const struct mv88e6xxx_ops mv88e6240_ops = {
/* MV88E6XXX_FAMILY_6352 */
+ .irl_init_all = mv88e6352_g2_irl_init_all,
.get_eeprom = mv88e6xxx_g2_get_eeprom16,
.set_eeprom = mv88e6xxx_g2_set_eeprom16,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
@@ -2855,6 +2893,7 @@ static const struct mv88e6xxx_ops mv88e6240_ops = {
static const struct mv88e6xxx_ops mv88e6290_ops = {
/* MV88E6XXX_FAMILY_6390 */
+ .irl_init_all = mv88e6390_g2_irl_init_all,
.get_eeprom = mv88e6xxx_g2_get_eeprom8,
.set_eeprom = mv88e6xxx_g2_set_eeprom8,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
@@ -2889,6 +2928,7 @@ static const struct mv88e6xxx_ops mv88e6290_ops = {
static const struct mv88e6xxx_ops mv88e6320_ops = {
/* MV88E6XXX_FAMILY_6320 */
+ .irl_init_all = mv88e6352_g2_irl_init_all,
.get_eeprom = mv88e6xxx_g2_get_eeprom16,
.set_eeprom = mv88e6xxx_g2_set_eeprom16,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
@@ -2920,6 +2960,7 @@ static const struct mv88e6xxx_ops mv88e6320_ops = {
static const struct mv88e6xxx_ops mv88e6321_ops = {
/* MV88E6XXX_FAMILY_6321 */
+ .irl_init_all = mv88e6352_g2_irl_init_all,
.get_eeprom = mv88e6xxx_g2_get_eeprom16,
.set_eeprom = mv88e6xxx_g2_set_eeprom16,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
@@ -2950,6 +2991,7 @@ static const struct mv88e6xxx_ops mv88e6321_ops = {
static const struct mv88e6xxx_ops mv88e6341_ops = {
/* MV88E6XXX_FAMILY_6341 */
+ .irl_init_all = mv88e6352_g2_irl_init_all,
.get_eeprom = mv88e6xxx_g2_get_eeprom8,
.set_eeprom = mv88e6xxx_g2_set_eeprom8,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
@@ -2983,6 +3025,7 @@ static const struct mv88e6xxx_ops mv88e6341_ops = {
static const struct mv88e6xxx_ops mv88e6350_ops = {
/* MV88E6XXX_FAMILY_6351 */
+ .irl_init_all = mv88e6352_g2_irl_init_all,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
@@ -3014,6 +3057,7 @@ static const struct mv88e6xxx_ops mv88e6350_ops = {
static const struct mv88e6xxx_ops mv88e6351_ops = {
/* MV88E6XXX_FAMILY_6351 */
+ .irl_init_all = mv88e6352_g2_irl_init_all,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
@@ -3045,6 +3089,7 @@ static const struct mv88e6xxx_ops mv88e6351_ops = {
static const struct mv88e6xxx_ops mv88e6352_ops = {
/* MV88E6XXX_FAMILY_6352 */
+ .irl_init_all = mv88e6352_g2_irl_init_all,
.get_eeprom = mv88e6xxx_g2_get_eeprom16,
.set_eeprom = mv88e6xxx_g2_set_eeprom16,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
@@ -3079,6 +3124,7 @@ static const struct mv88e6xxx_ops mv88e6352_ops = {
static const struct mv88e6xxx_ops mv88e6390_ops = {
/* MV88E6XXX_FAMILY_6390 */
+ .irl_init_all = mv88e6390_g2_irl_init_all,
.get_eeprom = mv88e6xxx_g2_get_eeprom8,
.set_eeprom = mv88e6xxx_g2_set_eeprom8,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
@@ -3115,6 +3161,7 @@ static const struct mv88e6xxx_ops mv88e6390_ops = {
static const struct mv88e6xxx_ops mv88e6390x_ops = {
/* MV88E6XXX_FAMILY_6390 */
+ .irl_init_all = mv88e6390_g2_irl_init_all,
.get_eeprom = mv88e6xxx_g2_get_eeprom8,
.set_eeprom = mv88e6xxx_g2_set_eeprom8,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h
index 409452ebc4b9..086444016352 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.h
+++ b/drivers/net/dsa/mv88e6xxx/chip.h
@@ -121,8 +121,6 @@ enum mv88e6xxx_cap {
MV88E6XXX_CAP_G2_INT, /* (0x00) Interrupt Status */
MV88E6XXX_CAP_G2_MGMT_EN_2X, /* (0x02) MGMT Enable Register 2x */
MV88E6XXX_CAP_G2_MGMT_EN_0X, /* (0x03) MGMT Enable Register 0x */
- MV88E6XXX_CAP_G2_IRL_CMD, /* (0x09) Ingress Rate Command */
- MV88E6XXX_CAP_G2_IRL_DATA, /* (0x0a) Ingress Rate Data */
MV88E6XXX_CAP_G2_POT, /* (0x0f) Priority Override Table */
/* Per VLAN Spanning Tree Unit (STU).
@@ -149,15 +147,8 @@ enum mv88e6xxx_cap {
#define MV88E6XXX_FLAG_G2_INT BIT_ULL(MV88E6XXX_CAP_G2_INT)
#define MV88E6XXX_FLAG_G2_MGMT_EN_2X BIT_ULL(MV88E6XXX_CAP_G2_MGMT_EN_2X)
#define MV88E6XXX_FLAG_G2_MGMT_EN_0X BIT_ULL(MV88E6XXX_CAP_G2_MGMT_EN_0X)
-#define MV88E6XXX_FLAG_G2_IRL_CMD BIT_ULL(MV88E6XXX_CAP_G2_IRL_CMD)
-#define MV88E6XXX_FLAG_G2_IRL_DATA BIT_ULL(MV88E6XXX_CAP_G2_IRL_DATA)
#define MV88E6XXX_FLAG_G2_POT BIT_ULL(MV88E6XXX_CAP_G2_POT)
-/* Ingress Rate Limit unit */
-#define MV88E6XXX_FLAGS_IRL \
- (MV88E6XXX_FLAG_G2_IRL_CMD | \
- MV88E6XXX_FLAG_G2_IRL_DATA)
-
/* Multi-chip Addressing Mode */
#define MV88E6XXX_FLAGS_MULTI_CHIP \
(MV88E6XXX_FLAG_SMI_CMD | \
@@ -175,7 +166,6 @@ enum mv88e6xxx_cap {
MV88E6XXX_FLAG_G2_MGMT_EN_2X | \
MV88E6XXX_FLAG_G2_MGMT_EN_0X | \
MV88E6XXX_FLAG_G2_POT | \
- MV88E6XXX_FLAGS_IRL | \
MV88E6XXX_FLAGS_MULTI_CHIP)
#define MV88E6XXX_FLAGS_FAMILY_6165 \
@@ -185,7 +175,6 @@ enum mv88e6xxx_cap {
MV88E6XXX_FLAG_G2_MGMT_EN_2X | \
MV88E6XXX_FLAG_G2_MGMT_EN_0X | \
MV88E6XXX_FLAG_G2_POT | \
- MV88E6XXX_FLAGS_IRL | \
MV88E6XXX_FLAGS_MULTI_CHIP)
#define MV88E6XXX_FLAGS_FAMILY_6185 \
@@ -200,7 +189,6 @@ enum mv88e6xxx_cap {
MV88E6XXX_FLAG_G2_MGMT_EN_2X | \
MV88E6XXX_FLAG_G2_MGMT_EN_0X | \
MV88E6XXX_FLAG_G2_POT | \
- MV88E6XXX_FLAGS_IRL | \
MV88E6XXX_FLAGS_MULTI_CHIP)
#define MV88E6XXX_FLAGS_FAMILY_6341 \
@@ -209,7 +197,6 @@ enum mv88e6xxx_cap {
MV88E6XXX_FLAG_GLOBAL2 | \
MV88E6XXX_FLAG_G2_INT | \
MV88E6XXX_FLAG_G2_POT | \
- MV88E6XXX_FLAGS_IRL | \
MV88E6XXX_FLAGS_MULTI_CHIP)
#define MV88E6XXX_FLAGS_FAMILY_6351 \
@@ -219,7 +206,6 @@ enum mv88e6xxx_cap {
MV88E6XXX_FLAG_G2_MGMT_EN_2X | \
MV88E6XXX_FLAG_G2_MGMT_EN_0X | \
MV88E6XXX_FLAG_G2_POT | \
- MV88E6XXX_FLAGS_IRL | \
MV88E6XXX_FLAGS_MULTI_CHIP)
#define MV88E6XXX_FLAGS_FAMILY_6352 \
@@ -230,14 +216,12 @@ enum mv88e6xxx_cap {
MV88E6XXX_FLAG_G2_MGMT_EN_2X | \
MV88E6XXX_FLAG_G2_MGMT_EN_0X | \
MV88E6XXX_FLAG_G2_POT | \
- MV88E6XXX_FLAGS_IRL | \
MV88E6XXX_FLAGS_MULTI_CHIP)
#define MV88E6XXX_FLAGS_FAMILY_6390 \
(MV88E6XXX_FLAG_EEE | \
MV88E6XXX_FLAG_GLOBAL2 | \
MV88E6XXX_FLAG_G2_INT | \
- MV88E6XXX_FLAGS_IRL | \
MV88E6XXX_FLAGS_MULTI_CHIP)
struct mv88e6xxx_ops;
@@ -358,6 +342,9 @@ struct mv88e6xxx_mdio_bus {
};
struct mv88e6xxx_ops {
+ /* Ingress Rate Limit unit (IRL) operations */
+ int (*irl_init_all)(struct mv88e6xxx_chip *chip, int port);
+
int (*get_eeprom)(struct mv88e6xxx_chip *chip,
struct ethtool_eeprom *eeprom, u8 *data);
int (*set_eeprom)(struct mv88e6xxx_chip *chip,
diff --git a/drivers/net/dsa/mv88e6xxx/global2.c b/drivers/net/dsa/mv88e6xxx/global2.c
index 8e8bc4ee464f..158d0f499874 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.c
+++ b/drivers/net/dsa/mv88e6xxx/global2.c
@@ -1,6 +1,5 @@
/*
- * Marvell 88E6xxx Switch Global 2 Registers support (device address
- * 0x1C)
+ * Marvell 88E6xxx Switch Global 2 Registers support
*
* Copyright (c) 2008 Marvell Semiconductor
*
@@ -13,6 +12,7 @@
* (at your option) any later version.
*/
+#include <linux/bitfield.h>
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
@@ -22,22 +22,22 @@
static int mv88e6xxx_g2_read(struct mv88e6xxx_chip *chip, int reg, u16 *val)
{
- return mv88e6xxx_read(chip, ADDR_GLOBAL2, reg, val);
+ return mv88e6xxx_read(chip, MV88E6XXX_G2, reg, val);
}
static int mv88e6xxx_g2_write(struct mv88e6xxx_chip *chip, int reg, u16 val)
{
- return mv88e6xxx_write(chip, ADDR_GLOBAL2, reg, val);
+ return mv88e6xxx_write(chip, MV88E6XXX_G2, reg, val);
}
static int mv88e6xxx_g2_update(struct mv88e6xxx_chip *chip, int reg, u16 update)
{
- return mv88e6xxx_update(chip, ADDR_GLOBAL2, reg, update);
+ return mv88e6xxx_update(chip, MV88E6XXX_G2, reg, update);
}
static int mv88e6xxx_g2_wait(struct mv88e6xxx_chip *chip, int reg, u16 mask)
{
- return mv88e6xxx_wait(chip, ADDR_GLOBAL2, reg, mask);
+ return mv88e6xxx_wait(chip, MV88E6XXX_G2, reg, mask);
}
/* Offset 0x02: Management Enable 2x */
@@ -51,7 +51,7 @@ int mv88e6095_g2_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip)
* addresses matching 01:80:c2:00:00:2x as MGMT.
*/
if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_MGMT_EN_2X)) {
- err = mv88e6xxx_g2_write(chip, GLOBAL2_MGMT_EN_2X, 0xffff);
+ err = mv88e6xxx_g2_write(chip, MV88E6XXX_G2_MGMT_EN_2X, 0xffff);
if (err)
return err;
}
@@ -60,7 +60,8 @@ int mv88e6095_g2_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip)
* addresses matching 01:80:c2:00:00:0x as MGMT.
*/
if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_MGMT_EN_0X))
- return mv88e6xxx_g2_write(chip, GLOBAL2_MGMT_EN_0X, 0xffff);
+ return mv88e6xxx_g2_write(chip, MV88E6XXX_G2_MGMT_EN_0X,
+ 0xffff);
return 0;
}
@@ -72,7 +73,7 @@ static int mv88e6xxx_g2_device_mapping_write(struct mv88e6xxx_chip *chip,
{
u16 val = (target << 8) | (port & 0xf);
- return mv88e6xxx_g2_update(chip, GLOBAL2_DEVICE_MAPPING, val);
+ return mv88e6xxx_g2_update(chip, MV88E6XXX_G2_DEVICE_MAPPING, val);
}
static int mv88e6xxx_g2_set_device_mapping(struct mv88e6xxx_chip *chip)
@@ -101,15 +102,14 @@ static int mv88e6xxx_g2_set_device_mapping(struct mv88e6xxx_chip *chip)
/* Offset 0x07: Trunk Mask Table register */
static int mv88e6xxx_g2_trunk_mask_write(struct mv88e6xxx_chip *chip, int num,
- bool hask, u16 mask)
+ bool hash, u16 mask)
{
- const u16 port_mask = BIT(mv88e6xxx_num_ports(chip)) - 1;
- u16 val = (num << 12) | (mask & port_mask);
+ u16 val = (num << 12) | (mask & mv88e6xxx_port_mask(chip));
- if (hask)
- val |= GLOBAL2_TRUNK_MASK_HASK;
+ if (hash)
+ val |= MV88E6XXX_G2_TRUNK_MASK_HASH;
- return mv88e6xxx_g2_update(chip, GLOBAL2_TRUNK_MASK, val);
+ return mv88e6xxx_g2_update(chip, MV88E6XXX_G2_TRUNK_MASK, val);
}
/* Offset 0x08: Trunk Mapping Table register */
@@ -120,7 +120,7 @@ static int mv88e6xxx_g2_trunk_mapping_write(struct mv88e6xxx_chip *chip, int id,
const u16 port_mask = BIT(mv88e6xxx_num_ports(chip)) - 1;
u16 val = (id << 11) | (map & port_mask);
- return mv88e6xxx_g2_update(chip, GLOBAL2_TRUNK_MAPPING, val);
+ return mv88e6xxx_g2_update(chip, MV88E6XXX_G2_TRUNK_MAPPING, val);
}
static int mv88e6xxx_g2_clear_trunk(struct mv88e6xxx_chip *chip)
@@ -149,27 +149,36 @@ static int mv88e6xxx_g2_clear_trunk(struct mv88e6xxx_chip *chip)
* Offset 0x0A: Ingress Rate Data register
*/
-static int mv88e6xxx_g2_clear_irl(struct mv88e6xxx_chip *chip)
+static int mv88e6xxx_g2_irl_wait(struct mv88e6xxx_chip *chip)
{
- int port, err;
+ return mv88e6xxx_g2_wait(chip, MV88E6XXX_G2_IRL_CMD,
+ MV88E6XXX_G2_IRL_CMD_BUSY);
+}
- /* Init all Ingress Rate Limit resources of all ports */
- for (port = 0; port < mv88e6xxx_num_ports(chip); ++port) {
- /* XXX newer chips (like 88E6390) have different 2-bit ops */
- err = mv88e6xxx_g2_write(chip, GLOBAL2_IRL_CMD,
- GLOBAL2_IRL_CMD_OP_INIT_ALL |
- (port << 8));
- if (err)
- break;
+static int mv88e6xxx_g2_irl_op(struct mv88e6xxx_chip *chip, u16 op, int port,
+ int res, int reg)
+{
+ int err;
- /* Wait for the operation to complete */
- err = mv88e6xxx_g2_wait(chip, GLOBAL2_IRL_CMD,
- GLOBAL2_IRL_CMD_BUSY);
- if (err)
- break;
- }
+ err = mv88e6xxx_g2_write(chip, MV88E6XXX_G2_IRL_CMD,
+ MV88E6XXX_G2_IRL_CMD_BUSY | op | (port << 8) |
+ (res << 5) | reg);
+ if (err)
+ return err;
- return err;
+ return mv88e6xxx_g2_irl_wait(chip);
+}
+
+int mv88e6352_g2_irl_init_all(struct mv88e6xxx_chip *chip, int port)
+{
+ return mv88e6xxx_g2_irl_op(chip, MV88E6352_G2_IRL_CMD_OP_INIT_ALL, port,
+ 0, 0);
+}
+
+int mv88e6390_g2_irl_init_all(struct mv88e6xxx_chip *chip, int port)
+{
+ return mv88e6xxx_g2_irl_op(chip, MV88E6390_G2_IRL_CMD_OP_INIT_ALL, port,
+ 0, 0);
}
/* Offset 0x0B: Cross-chip Port VLAN (Addr) Register
@@ -178,7 +187,8 @@ static int mv88e6xxx_g2_clear_irl(struct mv88e6xxx_chip *chip)
static int mv88e6xxx_g2_pvt_op_wait(struct mv88e6xxx_chip *chip)
{
- return mv88e6xxx_g2_wait(chip, GLOBAL2_PVT_ADDR, GLOBAL2_PVT_ADDR_BUSY);
+ return mv88e6xxx_g2_wait(chip, MV88E6XXX_G2_PVT_ADDR,
+ MV88E6XXX_G2_PVT_ADDR_BUSY);
}
static int mv88e6xxx_g2_pvt_op(struct mv88e6xxx_chip *chip, int src_dev,
@@ -186,13 +196,14 @@ static int mv88e6xxx_g2_pvt_op(struct mv88e6xxx_chip *chip, int src_dev,
{
int err;
- /* 9-bit Cross-chip PVT pointer: with GLOBAL2_MISC_5_BIT_PORT cleared,
- * source device is 5-bit, source port is 4-bit.
+ /* 9-bit Cross-chip PVT pointer: with MV88E6XXX_G2_MISC_5_BIT_PORT
+ * cleared, source device is 5-bit, source port is 4-bit.
*/
+ op |= MV88E6XXX_G2_PVT_ADDR_BUSY;
op |= (src_dev & 0x1f) << 4;
op |= (src_port & 0xf);
- err = mv88e6xxx_g2_write(chip, GLOBAL2_PVT_ADDR, op);
+ err = mv88e6xxx_g2_write(chip, MV88E6XXX_G2_PVT_ADDR, op);
if (err)
return err;
@@ -208,12 +219,12 @@ int mv88e6xxx_g2_pvt_write(struct mv88e6xxx_chip *chip, int src_dev,
if (err)
return err;
- err = mv88e6xxx_g2_write(chip, GLOBAL2_PVT_DATA, data);
+ err = mv88e6xxx_g2_write(chip, MV88E6XXX_G2_PVT_DATA, data);
if (err)
return err;
return mv88e6xxx_g2_pvt_op(chip, src_dev, src_port,
- GLOBAL2_PVT_ADDR_OP_WRITE_PVLAN);
+ MV88E6XXX_G2_PVT_ADDR_OP_WRITE_PVLAN);
}
/* Offset 0x0D: Switch MAC/WoL/WoF register */
@@ -223,7 +234,7 @@ static int mv88e6xxx_g2_switch_mac_write(struct mv88e6xxx_chip *chip,
{
u16 val = (pointer << 8) | data;
- return mv88e6xxx_g2_update(chip, GLOBAL2_SWITCH_MAC, val);
+ return mv88e6xxx_g2_update(chip, MV88E6XXX_G2_SWITCH_MAC, val);
}
int mv88e6xxx_g2_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr)
@@ -246,7 +257,7 @@ static int mv88e6xxx_g2_pot_write(struct mv88e6xxx_chip *chip, int pointer,
{
u16 val = (pointer << 8) | (data & 0x7);
- return mv88e6xxx_g2_update(chip, GLOBAL2_PRIO_OVERRIDE, val);
+ return mv88e6xxx_g2_update(chip, MV88E6XXX_G2_PRIO_OVERRIDE, val);
}
static int mv88e6xxx_g2_clear_pot(struct mv88e6xxx_chip *chip)
@@ -270,16 +281,17 @@ static int mv88e6xxx_g2_clear_pot(struct mv88e6xxx_chip *chip)
static int mv88e6xxx_g2_eeprom_wait(struct mv88e6xxx_chip *chip)
{
- return mv88e6xxx_g2_wait(chip, GLOBAL2_EEPROM_CMD,
- GLOBAL2_EEPROM_CMD_BUSY |
- GLOBAL2_EEPROM_CMD_RUNNING);
+ return mv88e6xxx_g2_wait(chip, MV88E6XXX_G2_EEPROM_CMD,
+ MV88E6XXX_G2_EEPROM_CMD_BUSY |
+ MV88E6XXX_G2_EEPROM_CMD_RUNNING);
}
static int mv88e6xxx_g2_eeprom_cmd(struct mv88e6xxx_chip *chip, u16 cmd)
{
int err;
- err = mv88e6xxx_g2_write(chip, GLOBAL2_EEPROM_CMD, cmd);
+ err = mv88e6xxx_g2_write(chip, MV88E6XXX_G2_EEPROM_CMD,
+ MV88E6XXX_G2_EEPROM_CMD_BUSY | cmd);
if (err)
return err;
@@ -289,14 +301,14 @@ static int mv88e6xxx_g2_eeprom_cmd(struct mv88e6xxx_chip *chip, u16 cmd)
static int mv88e6xxx_g2_eeprom_read8(struct mv88e6xxx_chip *chip,
u16 addr, u8 *data)
{
- u16 cmd = GLOBAL2_EEPROM_CMD_OP_READ;
+ u16 cmd = MV88E6XXX_G2_EEPROM_CMD_OP_READ;
int err;
err = mv88e6xxx_g2_eeprom_wait(chip);
if (err)
return err;
- err = mv88e6xxx_g2_write(chip, GLOBAL2_EEPROM_ADDR, addr);
+ err = mv88e6xxx_g2_write(chip, MV88E6390_G2_EEPROM_ADDR, addr);
if (err)
return err;
@@ -304,7 +316,7 @@ static int mv88e6xxx_g2_eeprom_read8(struct mv88e6xxx_chip *chip,
if (err)
return err;
- err = mv88e6xxx_g2_read(chip, GLOBAL2_EEPROM_CMD, &cmd);
+ err = mv88e6xxx_g2_read(chip, MV88E6XXX_G2_EEPROM_CMD, &cmd);
if (err)
return err;
@@ -316,14 +328,15 @@ static int mv88e6xxx_g2_eeprom_read8(struct mv88e6xxx_chip *chip,
static int mv88e6xxx_g2_eeprom_write8(struct mv88e6xxx_chip *chip,
u16 addr, u8 data)
{
- u16 cmd = GLOBAL2_EEPROM_CMD_OP_WRITE | GLOBAL2_EEPROM_CMD_WRITE_EN;
+ u16 cmd = MV88E6XXX_G2_EEPROM_CMD_OP_WRITE |
+ MV88E6XXX_G2_EEPROM_CMD_WRITE_EN;
int err;
err = mv88e6xxx_g2_eeprom_wait(chip);
if (err)
return err;
- err = mv88e6xxx_g2_write(chip, GLOBAL2_EEPROM_ADDR, addr);
+ err = mv88e6xxx_g2_write(chip, MV88E6390_G2_EEPROM_ADDR, addr);
if (err)
return err;
@@ -333,7 +346,7 @@ static int mv88e6xxx_g2_eeprom_write8(struct mv88e6xxx_chip *chip,
static int mv88e6xxx_g2_eeprom_read16(struct mv88e6xxx_chip *chip,
u8 addr, u16 *data)
{
- u16 cmd = GLOBAL2_EEPROM_CMD_OP_READ | addr;
+ u16 cmd = MV88E6XXX_G2_EEPROM_CMD_OP_READ | addr;
int err;
err = mv88e6xxx_g2_eeprom_wait(chip);
@@ -344,20 +357,20 @@ static int mv88e6xxx_g2_eeprom_read16(struct mv88e6xxx_chip *chip,
if (err)
return err;
- return mv88e6xxx_g2_read(chip, GLOBAL2_EEPROM_DATA, data);
+ return mv88e6xxx_g2_read(chip, MV88E6352_G2_EEPROM_DATA, data);
}
static int mv88e6xxx_g2_eeprom_write16(struct mv88e6xxx_chip *chip,
u8 addr, u16 data)
{
- u16 cmd = GLOBAL2_EEPROM_CMD_OP_WRITE | addr;
+ u16 cmd = MV88E6XXX_G2_EEPROM_CMD_OP_WRITE | addr;
int err;
err = mv88e6xxx_g2_eeprom_wait(chip);
if (err)
return err;
- err = mv88e6xxx_g2_write(chip, GLOBAL2_EEPROM_DATA, data);
+ err = mv88e6xxx_g2_write(chip, MV88E6352_G2_EEPROM_DATA, data);
if (err)
return err;
@@ -469,11 +482,11 @@ int mv88e6xxx_g2_set_eeprom16(struct mv88e6xxx_chip *chip,
int err;
/* Ensure the RO WriteEn bit is set */
- err = mv88e6xxx_g2_read(chip, GLOBAL2_EEPROM_CMD, &val);
+ err = mv88e6xxx_g2_read(chip, MV88E6XXX_G2_EEPROM_CMD, &val);
if (err)
return err;
- if (!(val & GLOBAL2_EEPROM_CMD_WRITE_EN))
+ if (!(val & MV88E6XXX_G2_EEPROM_CMD_WRITE_EN))
return -EROFS;
eeprom->len = 0;
@@ -532,178 +545,213 @@ int mv88e6xxx_g2_set_eeprom16(struct mv88e6xxx_chip *chip,
static int mv88e6xxx_g2_smi_phy_wait(struct mv88e6xxx_chip *chip)
{
- return mv88e6xxx_g2_wait(chip, GLOBAL2_SMI_PHY_CMD,
- GLOBAL2_SMI_PHY_CMD_BUSY);
+ return mv88e6xxx_g2_wait(chip, MV88E6XXX_G2_SMI_PHY_CMD,
+ MV88E6XXX_G2_SMI_PHY_CMD_BUSY);
}
static int mv88e6xxx_g2_smi_phy_cmd(struct mv88e6xxx_chip *chip, u16 cmd)
{
int err;
- err = mv88e6xxx_g2_write(chip, GLOBAL2_SMI_PHY_CMD, cmd);
+ err = mv88e6xxx_g2_write(chip, MV88E6XXX_G2_SMI_PHY_CMD,
+ MV88E6XXX_G2_SMI_PHY_CMD_BUSY | cmd);
if (err)
return err;
return mv88e6xxx_g2_smi_phy_wait(chip);
}
-static int mv88e6xxx_g2_smi_phy_write_addr(struct mv88e6xxx_chip *chip,
- int addr, int device, int reg,
- bool external)
+static int mv88e6xxx_g2_smi_phy_access(struct mv88e6xxx_chip *chip,
+ bool external, bool c45, u16 op, int dev,
+ int reg)
{
- int cmd = SMI_CMD_OP_45_WRITE_ADDR | (addr << 5) | device;
- int err;
+ u16 cmd = op;
if (external)
- cmd |= GLOBAL2_SMI_PHY_CMD_EXTERNAL;
+ cmd |= MV88E6390_G2_SMI_PHY_CMD_FUNC_EXTERNAL;
+ else
+ cmd |= MV88E6390_G2_SMI_PHY_CMD_FUNC_INTERNAL; /* empty mask */
- err = mv88e6xxx_g2_smi_phy_wait(chip);
- if (err)
- return err;
+ if (c45)
+ cmd |= MV88E6XXX_G2_SMI_PHY_CMD_MODE_45; /* empty mask */
+ else
+ cmd |= MV88E6XXX_G2_SMI_PHY_CMD_MODE_22;
- err = mv88e6xxx_g2_write(chip, GLOBAL2_SMI_PHY_DATA, reg);
- if (err)
- return err;
+ dev <<= __bf_shf(MV88E6XXX_G2_SMI_PHY_CMD_DEV_ADDR_MASK);
+ cmd |= dev & MV88E6XXX_G2_SMI_PHY_CMD_DEV_ADDR_MASK;
+ cmd |= reg & MV88E6XXX_G2_SMI_PHY_CMD_REG_ADDR_MASK;
return mv88e6xxx_g2_smi_phy_cmd(chip, cmd);
}
-static int mv88e6xxx_g2_smi_phy_read_c45(struct mv88e6xxx_chip *chip,
- int addr, int reg_c45, u16 *val,
- bool external)
+static int mv88e6xxx_g2_smi_phy_access_c22(struct mv88e6xxx_chip *chip,
+ bool external, u16 op, int dev,
+ int reg)
+{
+ return mv88e6xxx_g2_smi_phy_access(chip, external, false, op, dev, reg);
+}
+
+/* IEEE 802.3 Clause 22 Read Data Register */
+static int mv88e6xxx_g2_smi_phy_read_data_c22(struct mv88e6xxx_chip *chip,
+ bool external, int dev, int reg,
+ u16 *data)
{
- int device = (reg_c45 >> 16) & 0x1f;
- int reg = reg_c45 & 0xffff;
+ u16 op = MV88E6XXX_G2_SMI_PHY_CMD_OP_22_READ_DATA;
int err;
- u16 cmd;
- err = mv88e6xxx_g2_smi_phy_write_addr(chip, addr, device, reg,
- external);
+ err = mv88e6xxx_g2_smi_phy_wait(chip);
if (err)
return err;
- cmd = GLOBAL2_SMI_PHY_CMD_OP_45_READ_DATA | (addr << 5) | device;
+ err = mv88e6xxx_g2_smi_phy_access_c22(chip, external, op, dev, reg);
+ if (err)
+ return err;
- if (external)
- cmd |= GLOBAL2_SMI_PHY_CMD_EXTERNAL;
+ return mv88e6xxx_g2_read(chip, MV88E6XXX_G2_SMI_PHY_DATA, data);
+}
- err = mv88e6xxx_g2_smi_phy_cmd(chip, cmd);
+/* IEEE 802.3 Clause 22 Write Data Register */
+static int mv88e6xxx_g2_smi_phy_write_data_c22(struct mv88e6xxx_chip *chip,
+ bool external, int dev, int reg,
+ u16 data)
+{
+ u16 op = MV88E6XXX_G2_SMI_PHY_CMD_OP_22_WRITE_DATA;
+ int err;
+
+ err = mv88e6xxx_g2_smi_phy_wait(chip);
if (err)
return err;
- err = mv88e6xxx_g2_read(chip, GLOBAL2_SMI_PHY_DATA, val);
+ err = mv88e6xxx_g2_write(chip, MV88E6XXX_G2_SMI_PHY_DATA, data);
if (err)
return err;
- err = *val;
+ return mv88e6xxx_g2_smi_phy_access_c22(chip, external, op, dev, reg);
+}
- return 0;
+static int mv88e6xxx_g2_smi_phy_access_c45(struct mv88e6xxx_chip *chip,
+ bool external, u16 op, int port,
+ int dev)
+{
+ return mv88e6xxx_g2_smi_phy_access(chip, external, true, op, port, dev);
}
-static int mv88e6xxx_g2_smi_phy_read_c22(struct mv88e6xxx_chip *chip,
- int addr, int reg, u16 *val,
- bool external)
+/* IEEE 802.3 Clause 45 Write Address Register */
+static int mv88e6xxx_g2_smi_phy_write_addr_c45(struct mv88e6xxx_chip *chip,
+ bool external, int port, int dev,
+ int addr)
{
- u16 cmd = GLOBAL2_SMI_PHY_CMD_OP_22_READ_DATA | (addr << 5) | reg;
+ u16 op = MV88E6XXX_G2_SMI_PHY_CMD_OP_45_WRITE_ADDR;
int err;
- if (external)
- cmd |= GLOBAL2_SMI_PHY_CMD_EXTERNAL;
-
err = mv88e6xxx_g2_smi_phy_wait(chip);
if (err)
return err;
- err = mv88e6xxx_g2_smi_phy_cmd(chip, cmd);
+ err = mv88e6xxx_g2_write(chip, MV88E6XXX_G2_SMI_PHY_DATA, addr);
if (err)
return err;
- return mv88e6xxx_g2_read(chip, GLOBAL2_SMI_PHY_DATA, val);
+ return mv88e6xxx_g2_smi_phy_access_c45(chip, external, op, port, dev);
}
-int mv88e6xxx_g2_smi_phy_read(struct mv88e6xxx_chip *chip,
- struct mii_bus *bus,
- int addr, int reg, u16 *val)
+/* IEEE 802.3 Clause 45 Read Data Register */
+static int mv88e6xxx_g2_smi_phy_read_data_c45(struct mv88e6xxx_chip *chip,
+ bool external, int port, int dev,
+ u16 *data)
{
- struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv;
- bool external = mdio_bus->external;
+ u16 op = MV88E6XXX_G2_SMI_PHY_CMD_OP_45_READ_DATA;
+ int err;
- if (reg & MII_ADDR_C45)
- return mv88e6xxx_g2_smi_phy_read_c45(chip, addr, reg, val,
- external);
- return mv88e6xxx_g2_smi_phy_read_c22(chip, addr, reg, val, external);
+ err = mv88e6xxx_g2_smi_phy_access_c45(chip, external, op, port, dev);
+ if (err)
+ return err;
+
+ return mv88e6xxx_g2_read(chip, MV88E6XXX_G2_SMI_PHY_DATA, data);
}
-static int mv88e6xxx_g2_smi_phy_write_c45(struct mv88e6xxx_chip *chip,
- int addr, int reg_c45, u16 val,
- bool external)
+static int mv88e6xxx_g2_smi_phy_read_c45(struct mv88e6xxx_chip *chip,
+ bool external, int port, int reg,
+ u16 *data)
{
- int device = (reg_c45 >> 16) & 0x1f;
- int reg = reg_c45 & 0xffff;
+ int dev = (reg >> 16) & 0x1f;
+ int addr = reg & 0xffff;
int err;
- u16 cmd;
- err = mv88e6xxx_g2_smi_phy_write_addr(chip, addr, device, reg,
- external);
+ err = mv88e6xxx_g2_smi_phy_write_addr_c45(chip, external, port, dev,
+ addr);
if (err)
return err;
- cmd = GLOBAL2_SMI_PHY_CMD_OP_45_WRITE_DATA | (addr << 5) | device;
-
- if (external)
- cmd |= GLOBAL2_SMI_PHY_CMD_EXTERNAL;
+ return mv88e6xxx_g2_smi_phy_read_data_c45(chip, external, port, dev,
+ data);
+}
- err = mv88e6xxx_g2_write(chip, GLOBAL2_SMI_PHY_DATA, val);
- if (err)
- return err;
+/* IEEE 802.3 Clause 45 Write Data Register */
+static int mv88e6xxx_g2_smi_phy_write_data_c45(struct mv88e6xxx_chip *chip,
+ bool external, int port, int dev,
+ u16 data)
+{
+ u16 op = MV88E6XXX_G2_SMI_PHY_CMD_OP_45_WRITE_DATA;
+ int err;
- err = mv88e6xxx_g2_smi_phy_cmd(chip, cmd);
+ err = mv88e6xxx_g2_write(chip, MV88E6XXX_G2_SMI_PHY_DATA, data);
if (err)
return err;
- return 0;
+ return mv88e6xxx_g2_smi_phy_access_c45(chip, external, op, port, dev);
}
-static int mv88e6xxx_g2_smi_phy_write_c22(struct mv88e6xxx_chip *chip,
- int addr, int reg, u16 val,
- bool external)
+static int mv88e6xxx_g2_smi_phy_write_c45(struct mv88e6xxx_chip *chip,
+ bool external, int port, int reg,
+ u16 data)
{
- u16 cmd = GLOBAL2_SMI_PHY_CMD_OP_22_WRITE_DATA | (addr << 5) | reg;
+ int dev = (reg >> 16) & 0x1f;
+ int addr = reg & 0xffff;
int err;
- if (external)
- cmd |= GLOBAL2_SMI_PHY_CMD_EXTERNAL;
-
- err = mv88e6xxx_g2_smi_phy_wait(chip);
+ err = mv88e6xxx_g2_smi_phy_write_addr_c45(chip, external, port, dev,
+ addr);
if (err)
return err;
- err = mv88e6xxx_g2_write(chip, GLOBAL2_SMI_PHY_DATA, val);
- if (err)
- return err;
+ return mv88e6xxx_g2_smi_phy_write_data_c45(chip, external, port, dev,
+ data);
+}
- return mv88e6xxx_g2_smi_phy_cmd(chip, cmd);
+int mv88e6xxx_g2_smi_phy_read(struct mv88e6xxx_chip *chip, struct mii_bus *bus,
+ int addr, int reg, u16 *val)
+{
+ struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv;
+ bool external = mdio_bus->external;
+
+ if (reg & MII_ADDR_C45)
+ return mv88e6xxx_g2_smi_phy_read_c45(chip, external, addr, reg,
+ val);
+
+ return mv88e6xxx_g2_smi_phy_read_data_c22(chip, external, addr, reg,
+ val);
}
-int mv88e6xxx_g2_smi_phy_write(struct mv88e6xxx_chip *chip,
- struct mii_bus *bus,
+int mv88e6xxx_g2_smi_phy_write(struct mv88e6xxx_chip *chip, struct mii_bus *bus,
int addr, int reg, u16 val)
{
struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv;
bool external = mdio_bus->external;
if (reg & MII_ADDR_C45)
- return mv88e6xxx_g2_smi_phy_write_c45(chip, addr, reg, val,
- external);
+ return mv88e6xxx_g2_smi_phy_write_c45(chip, external, addr, reg,
+ val);
- return mv88e6xxx_g2_smi_phy_write_c22(chip, addr, reg, val, external);
+ return mv88e6xxx_g2_smi_phy_write_data_c22(chip, external, addr, reg,
+ val);
}
static int mv88e6097_watchdog_action(struct mv88e6xxx_chip *chip, int irq)
{
u16 reg;
- mv88e6xxx_g2_read(chip, GLOBAL2_WDOG_CONTROL, &reg);
+ mv88e6xxx_g2_read(chip, MV88E6352_G2_WDOG_CTL, &reg);
dev_info(chip->dev, "Watchdog event: 0x%04x", reg);
@@ -714,20 +762,20 @@ static void mv88e6097_watchdog_free(struct mv88e6xxx_chip *chip)
{
u16 reg;
- mv88e6xxx_g2_read(chip, GLOBAL2_WDOG_CONTROL, &reg);
+ mv88e6xxx_g2_read(chip, MV88E6352_G2_WDOG_CTL, &reg);
- reg &= ~(GLOBAL2_WDOG_CONTROL_EGRESS_ENABLE |
- GLOBAL2_WDOG_CONTROL_QC_ENABLE);
+ reg &= ~(MV88E6352_G2_WDOG_CTL_EGRESS_ENABLE |
+ MV88E6352_G2_WDOG_CTL_QC_ENABLE);
- mv88e6xxx_g2_write(chip, GLOBAL2_WDOG_CONTROL, reg);
+ mv88e6xxx_g2_write(chip, MV88E6352_G2_WDOG_CTL, reg);
}
static int mv88e6097_watchdog_setup(struct mv88e6xxx_chip *chip)
{
- return mv88e6xxx_g2_write(chip, GLOBAL2_WDOG_CONTROL,
- GLOBAL2_WDOG_CONTROL_EGRESS_ENABLE |
- GLOBAL2_WDOG_CONTROL_QC_ENABLE |
- GLOBAL2_WDOG_CONTROL_SWRESET);
+ return mv88e6xxx_g2_write(chip, MV88E6352_G2_WDOG_CTL,
+ MV88E6352_G2_WDOG_CTL_EGRESS_ENABLE |
+ MV88E6352_G2_WDOG_CTL_QC_ENABLE |
+ MV88E6352_G2_WDOG_CTL_SWRESET);
}
const struct mv88e6xxx_irq_ops mv88e6097_watchdog_ops = {
@@ -738,12 +786,12 @@ const struct mv88e6xxx_irq_ops mv88e6097_watchdog_ops = {
static int mv88e6390_watchdog_setup(struct mv88e6xxx_chip *chip)
{
- return mv88e6xxx_g2_update(chip, GLOBAL2_WDOG_CONTROL,
- GLOBAL2_WDOG_INT_ENABLE |
- GLOBAL2_WDOG_CUT_THROUGH |
- GLOBAL2_WDOG_QUEUE_CONTROLLER |
- GLOBAL2_WDOG_EGRESS |
- GLOBAL2_WDOG_FORCE_IRQ);
+ return mv88e6xxx_g2_update(chip, MV88E6390_G2_WDOG_CTL,
+ MV88E6390_G2_WDOG_CTL_PTR_INT_ENABLE |
+ MV88E6390_G2_WDOG_CTL_CUT_THROUGH |
+ MV88E6390_G2_WDOG_CTL_QUEUE_CONTROLLER |
+ MV88E6390_G2_WDOG_CTL_EGRESS |
+ MV88E6390_G2_WDOG_CTL_FORCE_IRQ);
}
static int mv88e6390_watchdog_action(struct mv88e6xxx_chip *chip, int irq)
@@ -751,17 +799,19 @@ static int mv88e6390_watchdog_action(struct mv88e6xxx_chip *chip, int irq)
int err;
u16 reg;
- mv88e6xxx_g2_write(chip, GLOBAL2_WDOG_CONTROL, GLOBAL2_WDOG_EVENT);
- err = mv88e6xxx_g2_read(chip, GLOBAL2_WDOG_CONTROL, &reg);
+ mv88e6xxx_g2_write(chip, MV88E6390_G2_WDOG_CTL,
+ MV88E6390_G2_WDOG_CTL_PTR_EVENT);
+ err = mv88e6xxx_g2_read(chip, MV88E6390_G2_WDOG_CTL, &reg);
dev_info(chip->dev, "Watchdog event: 0x%04x",
- reg & GLOBAL2_WDOG_DATA_MASK);
+ reg & MV88E6390_G2_WDOG_CTL_DATA_MASK);
- mv88e6xxx_g2_write(chip, GLOBAL2_WDOG_CONTROL, GLOBAL2_WDOG_HISTORY);
- err = mv88e6xxx_g2_read(chip, GLOBAL2_WDOG_CONTROL, &reg);
+ mv88e6xxx_g2_write(chip, MV88E6390_G2_WDOG_CTL,
+ MV88E6390_G2_WDOG_CTL_PTR_HISTORY);
+ err = mv88e6xxx_g2_read(chip, MV88E6390_G2_WDOG_CTL, &reg);
dev_info(chip->dev, "Watchdog history: 0x%04x",
- reg & GLOBAL2_WDOG_DATA_MASK);
+ reg & MV88E6390_G2_WDOG_CTL_DATA_MASK);
/* Trigger a software reset to try to recover the switch */
if (chip->info->ops->reset)
@@ -774,8 +824,8 @@ static int mv88e6390_watchdog_action(struct mv88e6xxx_chip *chip, int irq)
static void mv88e6390_watchdog_free(struct mv88e6xxx_chip *chip)
{
- mv88e6xxx_g2_update(chip, GLOBAL2_WDOG_CONTROL,
- GLOBAL2_WDOG_INT_ENABLE);
+ mv88e6xxx_g2_update(chip, MV88E6390_G2_WDOG_CTL,
+ MV88E6390_G2_WDOG_CTL_PTR_INT_ENABLE);
}
const struct mv88e6xxx_irq_ops mv88e6390_watchdog_ops = {
@@ -813,7 +863,7 @@ static int mv88e6xxx_g2_watchdog_setup(struct mv88e6xxx_chip *chip)
int err;
chip->watchdog_irq = irq_find_mapping(chip->g2_irq.domain,
- GLOBAL2_INT_SOURCE_WATCHDOG);
+ MV88E6XXX_G2_INT_SOURCE_WATCHDOG);
if (chip->watchdog_irq < 0)
return chip->watchdog_irq;
@@ -840,16 +890,16 @@ static int mv88e6xxx_g2_misc_5_bit_port(struct mv88e6xxx_chip *chip,
u16 val;
int err;
- err = mv88e6xxx_g2_read(chip, GLOBAL2_MISC, &val);
+ err = mv88e6xxx_g2_read(chip, MV88E6XXX_G2_MISC, &val);
if (err)
return err;
if (port_5_bit)
- val |= GLOBAL2_MISC_5_BIT_PORT;
+ val |= MV88E6XXX_G2_MISC_5_BIT_PORT;
else
- val &= ~GLOBAL2_MISC_5_BIT_PORT;
+ val &= ~MV88E6XXX_G2_MISC_5_BIT_PORT;
- return mv88e6xxx_g2_write(chip, GLOBAL2_MISC, val);
+ return mv88e6xxx_g2_write(chip, MV88E6XXX_G2_MISC, val);
}
int mv88e6xxx_g2_misc_4_bit_port(struct mv88e6xxx_chip *chip)
@@ -883,7 +933,7 @@ static irqreturn_t mv88e6xxx_g2_irq_thread_fn(int irq, void *dev_id)
u16 reg;
mutex_lock(&chip->reg_lock);
- err = mv88e6xxx_g2_read(chip, GLOBAL2_INT_SOURCE, &reg);
+ err = mv88e6xxx_g2_read(chip, MV88E6XXX_G2_INT_SOURCE, &reg);
mutex_unlock(&chip->reg_lock);
if (err)
goto out;
@@ -910,7 +960,7 @@ static void mv88e6xxx_g2_irq_bus_sync_unlock(struct irq_data *d)
{
struct mv88e6xxx_chip *chip = irq_data_get_irq_chip_data(d);
- mv88e6xxx_g2_write(chip, GLOBAL2_INT_MASK, ~chip->g2_irq.masked);
+ mv88e6xxx_g2_write(chip, MV88E6XXX_G2_INT_MASK, ~chip->g2_irq.masked);
mutex_unlock(&chip->reg_lock);
}
@@ -1012,11 +1062,11 @@ int mv88e6xxx_g2_setup(struct mv88e6xxx_chip *chip)
* highest, and send all special multicast frames to the CPU
* port at the highest priority.
*/
- reg = GLOBAL2_SWITCH_MGMT_FORCE_FLOW_CTRL_PRI | (0x7 << 4);
+ reg = MV88E6XXX_G2_SWITCH_MGMT_FORCE_FLOW_CTL_PRI | (0x7 << 4);
if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_MGMT_EN_0X) ||
mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_MGMT_EN_2X))
- reg |= GLOBAL2_SWITCH_MGMT_RSVD2CPU | 0x7;
- err = mv88e6xxx_g2_write(chip, GLOBAL2_SWITCH_MGMT, reg);
+ reg |= MV88E6XXX_G2_SWITCH_MGMT_RSVD2CPU | 0x7;
+ err = mv88e6xxx_g2_write(chip, MV88E6XXX_G2_SWITCH_MGMT, reg);
if (err)
return err;
@@ -1030,15 +1080,6 @@ int mv88e6xxx_g2_setup(struct mv88e6xxx_chip *chip)
if (err)
return err;
- if (mv88e6xxx_has(chip, MV88E6XXX_FLAGS_IRL)) {
- /* Disable ingress rate limiting by resetting all per port
- * ingress rate limit resources to their initial state.
- */
- err = mv88e6xxx_g2_clear_irl(chip);
- if (err)
- return err;
- }
-
if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_POT)) {
/* Clear the priority override table. */
err = mv88e6xxx_g2_clear_pot(chip);
diff --git a/drivers/net/dsa/mv88e6xxx/global2.h b/drivers/net/dsa/mv88e6xxx/global2.h
index 479d42971706..317ffd8f323d 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.h
+++ b/drivers/net/dsa/mv88e6xxx/global2.h
@@ -1,5 +1,5 @@
/*
- * Marvell 88E6xxx Switch Global 2 Registers support (device address 0x1C)
+ * Marvell 88E6xxx Switch Global 2 Registers support
*
* Copyright (c) 2008 Marvell Semiconductor
*
@@ -17,108 +17,198 @@
#include "chip.h"
-#define ADDR_GLOBAL2 0x1c
-
-#define GLOBAL2_INT_SOURCE 0x00
-#define GLOBAL2_INT_SOURCE_WATCHDOG 15
-#define GLOBAL2_INT_MASK 0x01
-#define GLOBAL2_MGMT_EN_2X 0x02
-#define GLOBAL2_MGMT_EN_0X 0x03
-#define GLOBAL2_FLOW_CONTROL 0x04
-#define GLOBAL2_SWITCH_MGMT 0x05
-#define GLOBAL2_SWITCH_MGMT_USE_DOUBLE_TAG_DATA BIT(15)
-#define GLOBAL2_SWITCH_MGMT_PREVENT_LOOPS BIT(14)
-#define GLOBAL2_SWITCH_MGMT_FLOW_CONTROL_MSG BIT(13)
-#define GLOBAL2_SWITCH_MGMT_FORCE_FLOW_CTRL_PRI BIT(7)
-#define GLOBAL2_SWITCH_MGMT_RSVD2CPU BIT(3)
-#define GLOBAL2_DEVICE_MAPPING 0x06
-#define GLOBAL2_DEVICE_MAPPING_UPDATE BIT(15)
-#define GLOBAL2_DEVICE_MAPPING_TARGET_SHIFT 8
-#define GLOBAL2_DEVICE_MAPPING_PORT_MASK 0x0f
-#define GLOBAL2_TRUNK_MASK 0x07
-#define GLOBAL2_TRUNK_MASK_UPDATE BIT(15)
-#define GLOBAL2_TRUNK_MASK_NUM_SHIFT 12
-#define GLOBAL2_TRUNK_MASK_HASK BIT(11)
-#define GLOBAL2_TRUNK_MAPPING 0x08
-#define GLOBAL2_TRUNK_MAPPING_UPDATE BIT(15)
-#define GLOBAL2_TRUNK_MAPPING_ID_SHIFT 11
-#define GLOBAL2_IRL_CMD 0x09
-#define GLOBAL2_IRL_CMD_BUSY BIT(15)
-#define GLOBAL2_IRL_CMD_OP_INIT_ALL ((0x001 << 12) | GLOBAL2_IRL_CMD_BUSY)
-#define GLOBAL2_IRL_CMD_OP_INIT_SEL ((0x010 << 12) | GLOBAL2_IRL_CMD_BUSY)
-#define GLOBAL2_IRL_CMD_OP_WRITE_SEL ((0x011 << 12) | GLOBAL2_IRL_CMD_BUSY)
-#define GLOBAL2_IRL_CMD_OP_READ_SEL ((0x100 << 12) | GLOBAL2_IRL_CMD_BUSY)
-#define GLOBAL2_IRL_DATA 0x0a
-#define GLOBAL2_PVT_ADDR 0x0b
-#define GLOBAL2_PVT_ADDR_BUSY BIT(15)
-#define GLOBAL2_PVT_ADDR_OP_INIT_ONES ((0x01 << 12) | GLOBAL2_PVT_ADDR_BUSY)
-#define GLOBAL2_PVT_ADDR_OP_WRITE_PVLAN ((0x03 << 12) | GLOBAL2_PVT_ADDR_BUSY)
-#define GLOBAL2_PVT_ADDR_OP_READ ((0x04 << 12) | GLOBAL2_PVT_ADDR_BUSY)
-#define GLOBAL2_PVT_DATA 0x0c
-#define GLOBAL2_SWITCH_MAC 0x0d
-#define GLOBAL2_ATU_STATS 0x0e
-#define GLOBAL2_PRIO_OVERRIDE 0x0f
-#define GLOBAL2_PRIO_OVERRIDE_FORCE_SNOOP BIT(7)
-#define GLOBAL2_PRIO_OVERRIDE_SNOOP_SHIFT 4
-#define GLOBAL2_PRIO_OVERRIDE_FORCE_ARP BIT(3)
-#define GLOBAL2_PRIO_OVERRIDE_ARP_SHIFT 0
-#define GLOBAL2_EEPROM_CMD 0x14
-#define GLOBAL2_EEPROM_CMD_BUSY BIT(15)
-#define GLOBAL2_EEPROM_CMD_OP_WRITE ((0x3 << 12) | GLOBAL2_EEPROM_CMD_BUSY)
-#define GLOBAL2_EEPROM_CMD_OP_READ ((0x4 << 12) | GLOBAL2_EEPROM_CMD_BUSY)
-#define GLOBAL2_EEPROM_CMD_OP_LOAD ((0x6 << 12) | GLOBAL2_EEPROM_CMD_BUSY)
-#define GLOBAL2_EEPROM_CMD_RUNNING BIT(11)
-#define GLOBAL2_EEPROM_CMD_WRITE_EN BIT(10)
-#define GLOBAL2_EEPROM_CMD_ADDR_MASK 0xff
-#define GLOBAL2_EEPROM_DATA 0x15
-#define GLOBAL2_EEPROM_ADDR 0x15 /* 6390, 6341 */
-#define GLOBAL2_PTP_AVB_OP 0x16
-#define GLOBAL2_PTP_AVB_DATA 0x17
-#define GLOBAL2_SMI_PHY_CMD 0x18
-#define GLOBAL2_SMI_PHY_CMD_BUSY BIT(15)
-#define GLOBAL2_SMI_PHY_CMD_EXTERNAL BIT(13)
-#define GLOBAL2_SMI_PHY_CMD_MODE_22 BIT(12)
-#define GLOBAL2_SMI_PHY_CMD_OP_22_WRITE_DATA ((0x1 << 10) | \
- GLOBAL2_SMI_PHY_CMD_MODE_22 | \
- GLOBAL2_SMI_PHY_CMD_BUSY)
-#define GLOBAL2_SMI_PHY_CMD_OP_22_READ_DATA ((0x2 << 10) | \
- GLOBAL2_SMI_PHY_CMD_MODE_22 | \
- GLOBAL2_SMI_PHY_CMD_BUSY)
-#define GLOBAL2_SMI_PHY_CMD_OP_45_WRITE_ADDR ((0x0 << 10) | \
- GLOBAL2_SMI_PHY_CMD_BUSY)
-#define GLOBAL2_SMI_PHY_CMD_OP_45_WRITE_DATA ((0x1 << 10) | \
- GLOBAL2_SMI_PHY_CMD_BUSY)
-#define GLOBAL2_SMI_PHY_CMD_OP_45_READ_DATA ((0x3 << 10) | \
- GLOBAL2_SMI_PHY_CMD_BUSY)
-
-#define GLOBAL2_SMI_PHY_DATA 0x19
-#define GLOBAL2_SCRATCH_MISC 0x1a
-#define GLOBAL2_SCRATCH_BUSY BIT(15)
-#define GLOBAL2_SCRATCH_REGISTER_SHIFT 8
-#define GLOBAL2_SCRATCH_VALUE_MASK 0xff
-#define GLOBAL2_WDOG_CONTROL 0x1b
-#define GLOBAL2_WDOG_CONTROL_EGRESS_EVENT BIT(7)
-#define GLOBAL2_WDOG_CONTROL_RMU_TIMEOUT BIT(6)
-#define GLOBAL2_WDOG_CONTROL_QC_ENABLE BIT(5)
-#define GLOBAL2_WDOG_CONTROL_EGRESS_HISTORY BIT(4)
-#define GLOBAL2_WDOG_CONTROL_EGRESS_ENABLE BIT(3)
-#define GLOBAL2_WDOG_CONTROL_FORCE_IRQ BIT(2)
-#define GLOBAL2_WDOG_CONTROL_HISTORY BIT(1)
-#define GLOBAL2_WDOG_CONTROL_SWRESET BIT(0)
-#define GLOBAL2_WDOG_UPDATE BIT(15)
-#define GLOBAL2_WDOG_INT_SOURCE (0x00 << 8)
-#define GLOBAL2_WDOG_INT_STATUS (0x10 << 8)
-#define GLOBAL2_WDOG_INT_ENABLE (0x11 << 8)
-#define GLOBAL2_WDOG_EVENT (0x12 << 8)
-#define GLOBAL2_WDOG_HISTORY (0x13 << 8)
-#define GLOBAL2_WDOG_DATA_MASK 0xff
-#define GLOBAL2_WDOG_CUT_THROUGH BIT(3)
-#define GLOBAL2_WDOG_QUEUE_CONTROLLER BIT(2)
-#define GLOBAL2_WDOG_EGRESS BIT(1)
-#define GLOBAL2_WDOG_FORCE_IRQ BIT(0)
-#define GLOBAL2_QOS_WEIGHT 0x1c
-#define GLOBAL2_MISC 0x1d
-#define GLOBAL2_MISC_5_BIT_PORT BIT(14)
+#define MV88E6XXX_G2 0x1c
+
+/* Offset 0x00: Interrupt Source Register */
+#define MV88E6XXX_G2_INT_SOURCE 0x00
+#define MV88E6XXX_G2_INT_SOURCE_WATCHDOG 15
+
+/* Offset 0x01: Interrupt Mask Register */
+#define MV88E6XXX_G2_INT_MASK 0x01
+
+/* Offset 0x02: MGMT Enable Register 2x */
+#define MV88E6XXX_G2_MGMT_EN_2X 0x02
+
+/* Offset 0x03: MGMT Enable Register 0x */
+#define MV88E6XXX_G2_MGMT_EN_0X 0x03
+
+/* Offset 0x04: Flow Control Delay Register */
+#define MV88E6XXX_G2_FLOW_CTL 0x04
+
+/* Offset 0x05: Switch Management Register */
+#define MV88E6XXX_G2_SWITCH_MGMT 0x05
+#define MV88E6XXX_G2_SWITCH_MGMT_USE_DOUBLE_TAG_DATA 0x8000
+#define MV88E6XXX_G2_SWITCH_MGMT_PREVENT_LOOPS 0x4000
+#define MV88E6XXX_G2_SWITCH_MGMT_FLOW_CTL_MSG 0x2000
+#define MV88E6XXX_G2_SWITCH_MGMT_FORCE_FLOW_CTL_PRI 0x0080
+#define MV88E6XXX_G2_SWITCH_MGMT_RSVD2CPU 0x0008
+
+/* Offset 0x06: Device Mapping Table Register */
+#define MV88E6XXX_G2_DEVICE_MAPPING 0x06
+#define MV88E6XXX_G2_DEVICE_MAPPING_UPDATE 0x8000
+#define MV88E6XXX_G2_DEVICE_MAPPING_DEV_MASK 0x1f00
+#define MV88E6XXX_G2_DEVICE_MAPPING_PORT_MASK 0x000f
+
+/* Offset 0x07: Trunk Mask Table Register */
+#define MV88E6XXX_G2_TRUNK_MASK 0x07
+#define MV88E6XXX_G2_TRUNK_MASK_UPDATE 0x8000
+#define MV88E6XXX_G2_TRUNK_MASK_NUM_MASK 0x7000
+#define MV88E6XXX_G2_TRUNK_MASK_HASH 0x0800
+
+/* Offset 0x08: Trunk Mapping Table Register */
+#define MV88E6XXX_G2_TRUNK_MAPPING 0x08
+#define MV88E6XXX_G2_TRUNK_MAPPING_UPDATE 0x8000
+#define MV88E6XXX_G2_TRUNK_MAPPING_ID_MASK 0x7800
+
+/* Offset 0x09: Ingress Rate Command Register */
+#define MV88E6XXX_G2_IRL_CMD 0x09
+#define MV88E6XXX_G2_IRL_CMD_BUSY 0x8000
+#define MV88E6352_G2_IRL_CMD_OP_MASK 0x7000
+#define MV88E6352_G2_IRL_CMD_OP_NOOP 0x0000
+#define MV88E6352_G2_IRL_CMD_OP_INIT_ALL 0x1000
+#define MV88E6352_G2_IRL_CMD_OP_INIT_RES 0x2000
+#define MV88E6352_G2_IRL_CMD_OP_WRITE_REG 0x3000
+#define MV88E6352_G2_IRL_CMD_OP_READ_REG 0x4000
+#define MV88E6390_G2_IRL_CMD_OP_MASK 0x6000
+#define MV88E6390_G2_IRL_CMD_OP_READ_REG 0x0000
+#define MV88E6390_G2_IRL_CMD_OP_INIT_ALL 0x2000
+#define MV88E6390_G2_IRL_CMD_OP_INIT_RES 0x4000
+#define MV88E6390_G2_IRL_CMD_OP_WRITE_REG 0x6000
+#define MV88E6352_G2_IRL_CMD_PORT_MASK 0x0f00
+#define MV88E6390_G2_IRL_CMD_PORT_MASK 0x1f00
+#define MV88E6XXX_G2_IRL_CMD_RES_MASK 0x00e0
+#define MV88E6XXX_G2_IRL_CMD_REG_MASK 0x000f
+
+/* Offset 0x0A: Ingress Rate Data Register */
+#define MV88E6XXX_G2_IRL_DATA 0x0a
+#define MV88E6XXX_G2_IRL_DATA_MASK 0xffff
+
+/* Offset 0x0B: Cross-chip Port VLAN Register */
+#define MV88E6XXX_G2_PVT_ADDR 0x0b
+#define MV88E6XXX_G2_PVT_ADDR_BUSY 0x8000
+#define MV88E6XXX_G2_PVT_ADDR_OP_MASK 0x7000
+#define MV88E6XXX_G2_PVT_ADDR_OP_INIT_ONES 0x1000
+#define MV88E6XXX_G2_PVT_ADDR_OP_WRITE_PVLAN 0x3000
+#define MV88E6XXX_G2_PVT_ADDR_OP_READ 0x4000
+#define MV88E6XXX_G2_PVT_ADDR_PTR_MASK 0x01ff
+
+/* Offset 0x0C: Cross-chip Port VLAN Data Register */
+#define MV88E6XXX_G2_PVT_DATA 0x0c
+#define MV88E6XXX_G2_PVT_DATA_MASK 0x7f
+
+/* Offset 0x0D: Switch MAC/WoL/WoF Register */
+#define MV88E6XXX_G2_SWITCH_MAC 0x0d
+#define MV88E6XXX_G2_SWITCH_MAC_UPDATE 0x8000
+#define MV88E6XXX_G2_SWITCH_MAC_PTR_MASK 0x1f00
+#define MV88E6XXX_G2_SWITCH_MAC_DATA_MASK 0x00ff
+
+/* Offset 0x0E: ATU Stats Register */
+#define MV88E6XXX_G2_ATU_STATS 0x0e
+
+/* Offset 0x0F: Priority Override Table */
+#define MV88E6XXX_G2_PRIO_OVERRIDE 0x0f
+#define MV88E6XXX_G2_PRIO_OVERRIDE_UPDATE 0x8000
+#define MV88E6XXX_G2_PRIO_OVERRIDE_FPRISET 0x1000
+#define MV88E6XXX_G2_PRIO_OVERRIDE_PTR_MASK 0x0f00
+#define MV88E6352_G2_PRIO_OVERRIDE_QPRIAVBEN 0x0080
+#define MV88E6352_G2_PRIO_OVERRIDE_DATAAVB_MASK 0x0030
+#define MV88E6XXX_G2_PRIO_OVERRIDE_QFPRIEN 0x0008
+#define MV88E6XXX_G2_PRIO_OVERRIDE_DATA_MASK 0x0007
+
+/* Offset 0x14: EEPROM Command */
+#define MV88E6XXX_G2_EEPROM_CMD 0x14
+#define MV88E6XXX_G2_EEPROM_CMD_BUSY 0x8000
+#define MV88E6XXX_G2_EEPROM_CMD_OP_MASK 0x7000
+#define MV88E6XXX_G2_EEPROM_CMD_OP_WRITE 0x3000
+#define MV88E6XXX_G2_EEPROM_CMD_OP_READ 0x4000
+#define MV88E6XXX_G2_EEPROM_CMD_OP_LOAD 0x6000
+#define MV88E6XXX_G2_EEPROM_CMD_RUNNING 0x0800
+#define MV88E6XXX_G2_EEPROM_CMD_WRITE_EN 0x0400
+#define MV88E6352_G2_EEPROM_CMD_ADDR_MASK 0x00ff
+#define MV88E6390_G2_EEPROM_CMD_DATA_MASK 0x00ff
+
+/* Offset 0x15: EEPROM Data */
+#define MV88E6352_G2_EEPROM_DATA 0x15
+#define MV88E6352_G2_EEPROM_DATA_MASK 0xffff
+
+/* Offset 0x15: EEPROM Addr */
+#define MV88E6390_G2_EEPROM_ADDR 0x15
+#define MV88E6390_G2_EEPROM_ADDR_MASK 0xffff
+
+/* Offset 0x16: AVB Command Register */
+#define MV88E6352_G2_AVB_CMD 0x16
+
+/* Offset 0x17: AVB Data Register */
+#define MV88E6352_G2_AVB_DATA 0x17
+
+/* Offset 0x18: SMI PHY Command Register */
+#define MV88E6XXX_G2_SMI_PHY_CMD 0x18
+#define MV88E6XXX_G2_SMI_PHY_CMD_BUSY 0x8000
+#define MV88E6390_G2_SMI_PHY_CMD_FUNC_MASK 0x6000
+#define MV88E6390_G2_SMI_PHY_CMD_FUNC_INTERNAL 0x0000
+#define MV88E6390_G2_SMI_PHY_CMD_FUNC_EXTERNAL 0x2000
+#define MV88E6390_G2_SMI_PHY_CMD_FUNC_SETUP 0x4000
+#define MV88E6XXX_G2_SMI_PHY_CMD_MODE_MASK 0x1000
+#define MV88E6XXX_G2_SMI_PHY_CMD_MODE_45 0x0000
+#define MV88E6XXX_G2_SMI_PHY_CMD_MODE_22 0x1000
+#define MV88E6XXX_G2_SMI_PHY_CMD_OP_MASK 0x0c00
+#define MV88E6XXX_G2_SMI_PHY_CMD_OP_22_WRITE_DATA 0x0400
+#define MV88E6XXX_G2_SMI_PHY_CMD_OP_22_READ_DATA 0x0800
+#define MV88E6XXX_G2_SMI_PHY_CMD_OP_45_WRITE_ADDR 0x0000
+#define MV88E6XXX_G2_SMI_PHY_CMD_OP_45_WRITE_DATA 0x0400
+#define MV88E6XXX_G2_SMI_PHY_CMD_OP_45_READ_DATA_INC 0x0800
+#define MV88E6XXX_G2_SMI_PHY_CMD_OP_45_READ_DATA 0x0c00
+#define MV88E6XXX_G2_SMI_PHY_CMD_DEV_ADDR_MASK 0x03e0
+#define MV88E6XXX_G2_SMI_PHY_CMD_REG_ADDR_MASK 0x001f
+#define MV88E6XXX_G2_SMI_PHY_CMD_SETUP_PTR_MASK 0x03ff
+
+/* Offset 0x19: SMI PHY Data Register */
+#define MV88E6XXX_G2_SMI_PHY_DATA 0x19
+
+/* Offset 0x1A: Scratch and Misc. Register */
+#define MV88E6XXX_G2_SCRATCH_MISC_MISC 0x1a
+#define MV88E6XXX_G2_SCRATCH_MISC_UPDATE 0x8000
+#define MV88E6XXX_G2_SCRATCH_MISC_PTR_MASK 0x7f00
+#define MV88E6XXX_G2_SCRATCH_MISC_DATA_MASK 0x00ff
+
+/* Offset 0x1B: Watch Dog Control Register */
+#define MV88E6352_G2_WDOG_CTL 0x1b
+#define MV88E6352_G2_WDOG_CTL_EGRESS_EVENT 0x0080
+#define MV88E6352_G2_WDOG_CTL_RMU_TIMEOUT 0x0040
+#define MV88E6352_G2_WDOG_CTL_QC_ENABLE 0x0020
+#define MV88E6352_G2_WDOG_CTL_EGRESS_HISTORY 0x0010
+#define MV88E6352_G2_WDOG_CTL_EGRESS_ENABLE 0x0008
+#define MV88E6352_G2_WDOG_CTL_FORCE_IRQ 0x0004
+#define MV88E6352_G2_WDOG_CTL_HISTORY 0x0002
+#define MV88E6352_G2_WDOG_CTL_SWRESET 0x0001
+
+/* Offset 0x1B: Watch Dog Control Register */
+#define MV88E6390_G2_WDOG_CTL 0x1b
+#define MV88E6390_G2_WDOG_CTL_UPDATE 0x8000
+#define MV88E6390_G2_WDOG_CTL_PTR_MASK 0x7f00
+#define MV88E6390_G2_WDOG_CTL_PTR_INT_SOURCE 0x0000
+#define MV88E6390_G2_WDOG_CTL_PTR_INT_STS 0x1000
+#define MV88E6390_G2_WDOG_CTL_PTR_INT_ENABLE 0x1100
+#define MV88E6390_G2_WDOG_CTL_PTR_EVENT 0x1200
+#define MV88E6390_G2_WDOG_CTL_PTR_HISTORY 0x1300
+#define MV88E6390_G2_WDOG_CTL_DATA_MASK 0x00ff
+#define MV88E6390_G2_WDOG_CTL_CUT_THROUGH 0x0008
+#define MV88E6390_G2_WDOG_CTL_QUEUE_CONTROLLER 0x0004
+#define MV88E6390_G2_WDOG_CTL_EGRESS 0x0002
+#define MV88E6390_G2_WDOG_CTL_FORCE_IRQ 0x0001
+
+/* Offset 0x1C: QoS Weights Register */
+#define MV88E6XXX_G2_QOS_WEIGHTS 0x1c
+#define MV88E6XXX_G2_QOS_WEIGHTS_UPDATE 0x8000
+#define MV88E6352_G2_QOS_WEIGHTS_PTR_MASK 0x3f00
+#define MV88E6390_G2_QOS_WEIGHTS_PTR_MASK 0x7f00
+#define MV88E6XXX_G2_QOS_WEIGHTS_DATA_MASK 0x00ff
+
+/* Offset 0x1D: Misc Register */
+#define MV88E6XXX_G2_MISC 0x1d
+#define MV88E6XXX_G2_MISC_5_BIT_PORT 0x4000
+#define MV88E6352_G2_NOEGR_POLICY 0x2000
+#define MV88E6390_G2_LAG_ID_4 0x2000
#ifdef CONFIG_NET_DSA_MV88E6XXX_GLOBAL2
@@ -127,6 +217,9 @@ static inline int mv88e6xxx_g2_require(struct mv88e6xxx_chip *chip)
return 0;
}
+int mv88e6352_g2_irl_init_all(struct mv88e6xxx_chip *chip, int port);
+int mv88e6390_g2_irl_init_all(struct mv88e6xxx_chip *chip, int port);
+
int mv88e6xxx_g2_smi_phy_read(struct mv88e6xxx_chip *chip,
struct mii_bus *bus,
int addr, int reg, u16 *val);
@@ -169,6 +262,18 @@ static inline int mv88e6xxx_g2_require(struct mv88e6xxx_chip *chip)
return 0;
}
+static inline int mv88e6352_g2_irl_init_all(struct mv88e6xxx_chip *chip,
+ int port)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int mv88e6390_g2_irl_init_all(struct mv88e6xxx_chip *chip,
+ int port)
+{
+ return -EOPNOTSUPP;
+}
+
static inline int mv88e6xxx_g2_smi_phy_read(struct mv88e6xxx_chip *chip,
struct mii_bus *bus,
int addr, int reg, u16 *val)
diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
index 73d825e08be3..a7801f6668a5 100644
--- a/drivers/net/dsa/mv88e6xxx/port.c
+++ b/drivers/net/dsa/mv88e6xxx/port.c
@@ -12,6 +12,7 @@
* (at your option) any later version.
*/
+#include <linux/bitfield.h>
#include <linux/if_bridge.h>
#include <linux/phy.h>
@@ -912,15 +913,13 @@ int mv88e6095_port_tag_remap(struct mv88e6xxx_chip *chip, int port)
}
static int mv88e6xxx_port_ieeepmt_write(struct mv88e6xxx_chip *chip,
- int port, u16 table,
- u8 pointer, u16 data)
+ int port, u16 table, u8 ptr, u16 data)
{
u16 reg;
- reg = MV88E6390_PORT_IEEE_PRIO_MAP_TABLE_UPDATE |
- table |
- (pointer << MV88E6390_PORT_IEEE_PRIO_MAP_TABLE_POINTER_SHIFT) |
- data;
+ reg = MV88E6390_PORT_IEEE_PRIO_MAP_TABLE_UPDATE | table |
+ (ptr << __bf_shf(MV88E6390_PORT_IEEE_PRIO_MAP_TABLE_PTR_MASK)) |
+ (data & MV88E6390_PORT_IEEE_PRIO_MAP_TABLE_DATA_MASK);
return mv88e6xxx_port_write(chip, port,
MV88E6390_PORT_IEEE_PRIO_MAP_TABLE, reg);
diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h
index 8a34ea7c868a..8f3991bf1851 100644
--- a/drivers/net/dsa/mv88e6xxx/port.h
+++ b/drivers/net/dsa/mv88e6xxx/port.h
@@ -222,6 +222,7 @@
/* Offset 0x18: IEEE Priority Mapping Table */
#define MV88E6390_PORT_IEEE_PRIO_MAP_TABLE 0x18
#define MV88E6390_PORT_IEEE_PRIO_MAP_TABLE_UPDATE 0x8000
+#define MV88E6390_PORT_IEEE_PRIO_MAP_TABLE_MASK 0x7000
#define MV88E6390_PORT_IEEE_PRIO_MAP_TABLE_INGRESS_PCP 0x0000
#define MV88E6390_PORT_IEEE_PRIO_MAP_TABLE_EGRESS_GREEN_PCP 0x1000
#define MV88E6390_PORT_IEEE_PRIO_MAP_TABLE_EGRESS_YELLOW_PCP 0x2000
@@ -229,7 +230,8 @@
#define MV88E6390_PORT_IEEE_PRIO_MAP_TABLE_EGRESS_GREEN_DSCP 0x5000
#define MV88E6390_PORT_IEEE_PRIO_MAP_TABLE_EGRESS_YELLOW_DSCP 0x6000
#define MV88E6390_PORT_IEEE_PRIO_MAP_TABLE_EGRESS_AVB_DSCP 0x7000
-#define MV88E6390_PORT_IEEE_PRIO_MAP_TABLE_POINTER_SHIFT 9
+#define MV88E6390_PORT_IEEE_PRIO_MAP_TABLE_PTR_MASK 0x0e00
+#define MV88E6390_PORT_IEEE_PRIO_MAP_TABLE_DATA_MASK 0x01ff
/* Offset 0x18: Port IEEE Priority Remapping Registers (0-3) */
#define MV88E6095_PORT_IEEE_PRIO_REMAP_0123 0x18
diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c
index 411b4f522792..f3c01119b3d1 100644
--- a/drivers/net/dsa/mv88e6xxx/serdes.c
+++ b/drivers/net/dsa/mv88e6xxx/serdes.c
@@ -192,7 +192,7 @@ int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on)
err = mv88e6xxx_port_get_cmode(chip, port, &cmode);
if (err)
- return cmode;
+ return err;
switch (port) {
case 2:
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index 9905b52fe293..d0c165d2086e 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -356,7 +356,8 @@ static void dummy_setup(struct net_device *dev)
dev->max_mtu = ETH_MAX_MTU;
}
-static int dummy_validate(struct nlattr *tb[], struct nlattr *data[])
+static int dummy_validate(struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
{
if (tb[IFLA_ADDRESS]) {
if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
diff --git a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
index 5b6509d59716..305dc1996b4e 100644
--- a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
+++ b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
@@ -70,6 +70,8 @@ enum ena_admin_aq_feature_id {
ENA_ADMIN_MAX_QUEUES_NUM = 2,
+ ENA_ADMIN_HW_HINTS = 3,
+
ENA_ADMIN_RSS_HASH_FUNCTION = 10,
ENA_ADMIN_STATELESS_OFFLOAD_CONFIG = 11,
@@ -749,6 +751,31 @@ struct ena_admin_feature_rss_ind_table {
struct ena_admin_rss_ind_table_entry inline_entry;
};
+/* When hint value is 0, driver should use it's own predefined value */
+struct ena_admin_ena_hw_hints {
+ /* value in ms */
+ u16 mmio_read_timeout;
+
+ /* value in ms */
+ u16 driver_watchdog_timeout;
+
+ /* Per packet tx completion timeout. value in ms */
+ u16 missing_tx_completion_timeout;
+
+ u16 missed_tx_completion_count_threshold_to_reset;
+
+ /* value in ms */
+ u16 admin_completion_tx_timeout;
+
+ u16 netdev_wd_timeout;
+
+ u16 max_tx_sgl_size;
+
+ u16 max_rx_sgl_size;
+
+ u16 reserved[8];
+};
+
struct ena_admin_get_feat_cmd {
struct ena_admin_aq_common_desc aq_common_descriptor;
@@ -782,6 +809,8 @@ struct ena_admin_get_feat_resp {
struct ena_admin_feature_rss_ind_table ind_table;
struct ena_admin_feature_intr_moder_desc intr_moderation;
+
+ struct ena_admin_ena_hw_hints hw_hints;
} u;
};
@@ -857,6 +886,8 @@ enum ena_admin_aenq_notification_syndrom {
ENA_ADMIN_SUSPEND = 0,
ENA_ADMIN_RESUME = 1,
+
+ ENA_ADMIN_UPDATE_HINTS = 2,
};
struct ena_admin_aenq_entry {
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index f5b237e0bd60..52beba8c7a39 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -99,8 +99,8 @@ static inline int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
return -EINVAL;
}
- ena_addr->mem_addr_low = (u32)addr;
- ena_addr->mem_addr_high = (u64)addr >> 32;
+ ena_addr->mem_addr_low = lower_32_bits(addr);
+ ena_addr->mem_addr_high = (u16)upper_32_bits(addr);
return 0;
}
@@ -329,7 +329,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
size_t size;
int dev_node = 0;
- memset(&io_sq->desc_addr, 0x0, sizeof(struct ena_com_io_desc_addr));
+ memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr));
io_sq->desc_entry_size =
(io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
@@ -383,7 +383,7 @@ static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
size_t size;
int prev_node = 0;
- memset(&io_cq->cdesc_addr, 0x0, sizeof(struct ena_com_io_desc_addr));
+ memset(&io_cq->cdesc_addr, 0x0, sizeof(io_cq->cdesc_addr));
/* Use the basic completion descriptor for Rx */
io_cq->cdesc_entry_size_in_bytes =
@@ -494,7 +494,7 @@ static int ena_com_comp_status_to_errno(u8 comp_status)
case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE:
return -ENOMEM;
case ENA_ADMIN_UNSUPPORTED_OPCODE:
- return -EPERM;
+ return -EOPNOTSUPP;
case ENA_ADMIN_BAD_OPCODE:
case ENA_ADMIN_MALFORMED_REQUEST:
case ENA_ADMIN_ILLEGAL_PARAMETER:
@@ -511,7 +511,7 @@ static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_c
unsigned long flags, timeout;
int ret;
- timeout = jiffies + ADMIN_CMD_TIMEOUT_US;
+ timeout = jiffies + usecs_to_jiffies(admin_queue->completion_timeout);
while (1) {
spin_lock_irqsave(&admin_queue->q_lock, flags);
@@ -561,7 +561,8 @@ static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *com
int ret;
wait_for_completion_timeout(&comp_ctx->wait_event,
- usecs_to_jiffies(ADMIN_CMD_TIMEOUT_US));
+ usecs_to_jiffies(
+ admin_queue->completion_timeout));
/* In case the command wasn't completed find out the root cause.
* There might be 2 kinds of errors
@@ -601,12 +602,15 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp =
mmio_read->read_resp;
- u32 mmio_read_reg, ret;
+ u32 mmio_read_reg, ret, i;
unsigned long flags;
- int i;
+ u32 timeout = mmio_read->reg_read_to;
might_sleep();
+ if (timeout == 0)
+ timeout = ENA_REG_READ_TIMEOUT;
+
/* If readless is disabled, perform regular read */
if (!mmio_read->readless_supported)
return readl(ena_dev->reg_bar + offset);
@@ -627,14 +631,14 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
writel(mmio_read_reg, ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
- for (i = 0; i < ENA_REG_READ_TIMEOUT; i++) {
+ for (i = 0; i < timeout; i++) {
if (read_resp->req_id == mmio_read->seq_num)
break;
udelay(1);
}
- if (unlikely(i == ENA_REG_READ_TIMEOUT)) {
+ if (unlikely(i == timeout)) {
pr_err("reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n",
mmio_read->seq_num, offset, read_resp->req_id,
read_resp->reg_off);
@@ -681,7 +685,7 @@ static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev,
u8 direction;
int ret;
- memset(&destroy_cmd, 0x0, sizeof(struct ena_admin_aq_destroy_sq_cmd));
+ memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
direction = ENA_ADMIN_SQ_DIRECTION_TX;
@@ -786,7 +790,7 @@ static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) {
pr_debug("Feature %d isn't supported\n", feature_id);
- return -EPERM;
+ return -EOPNOTSUPP;
}
memset(&get_cmd, 0x0, sizeof(get_cmd));
@@ -963,7 +967,7 @@ static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
u8 direction;
int ret;
- memset(&create_cmd, 0x0, sizeof(struct ena_admin_aq_create_sq_cmd));
+ memset(&create_cmd, 0x0, sizeof(create_cmd));
create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_SQ;
@@ -1155,7 +1159,7 @@ int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
struct ena_admin_acq_create_cq_resp_desc cmd_completion;
int ret;
- memset(&create_cmd, 0x0, sizeof(struct ena_admin_aq_create_cq_cmd));
+ memset(&create_cmd, 0x0, sizeof(create_cmd));
create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_CQ;
@@ -1263,7 +1267,7 @@ int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
struct ena_admin_acq_destroy_cq_resp_desc destroy_resp;
int ret;
- memset(&destroy_cmd, 0x0, sizeof(struct ena_admin_aq_destroy_sq_cmd));
+ memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
destroy_cmd.cq_idx = io_cq->idx;
destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_CQ;
@@ -1324,7 +1328,7 @@ int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) {
pr_warn("Trying to set unsupported aenq events. supported flag: %x asked flag: %x\n",
get_resp.u.aenq.supported_groups, groups_flag);
- return -EPERM;
+ return -EOPNOTSUPP;
}
memset(&cmd, 0x0, sizeof(cmd));
@@ -1619,8 +1623,8 @@ int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
io_sq = &ena_dev->io_sq_queues[ctx->qid];
io_cq = &ena_dev->io_cq_queues[ctx->qid];
- memset(io_sq, 0x0, sizeof(struct ena_com_io_sq));
- memset(io_cq, 0x0, sizeof(struct ena_com_io_cq));
+ memset(io_sq, 0x0, sizeof(*io_sq));
+ memset(io_cq, 0x0, sizeof(*io_cq));
/* Init CQ */
io_cq->q_depth = ctx->queue_size;
@@ -1730,6 +1734,20 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
memcpy(&get_feat_ctx->offload, &get_resp.u.offload,
sizeof(get_resp.u.offload));
+ /* Driver hints isn't mandatory admin command. So in case the
+ * command isn't supported set driver hints to 0
+ */
+ rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_HW_HINTS);
+
+ if (!rc)
+ memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints,
+ sizeof(get_resp.u.hw_hints));
+ else if (rc == -EOPNOTSUPP)
+ memset(&get_feat_ctx->hw_hints, 0x0,
+ sizeof(get_feat_ctx->hw_hints));
+ else
+ return rc;
+
return 0;
}
@@ -1807,7 +1825,8 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
writel((u32)aenq->head, dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
}
-int ena_com_dev_reset(struct ena_com_dev *ena_dev)
+int ena_com_dev_reset(struct ena_com_dev *ena_dev,
+ enum ena_regs_reset_reason_types reset_reason)
{
u32 stat, timeout, cap, reset_val;
int rc;
@@ -1835,6 +1854,8 @@ int ena_com_dev_reset(struct ena_com_dev *ena_dev)
/* start reset */
reset_val = ENA_REGS_DEV_CTL_DEV_RESET_MASK;
+ reset_val |= (reset_reason << ENA_REGS_DEV_CTL_RESET_REASON_SHIFT) &
+ ENA_REGS_DEV_CTL_RESET_REASON_MASK;
writel(reset_val, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
/* Write again the MMIO read request address */
@@ -1855,6 +1876,14 @@ int ena_com_dev_reset(struct ena_com_dev *ena_dev)
return rc;
}
+ timeout = (cap & ENA_REGS_CAPS_ADMIN_CMD_TO_MASK) >>
+ ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT;
+ if (timeout)
+ /* the resolution of timeout reg is 100ms */
+ ena_dev->admin_queue.completion_timeout = timeout * 100000;
+ else
+ ena_dev->admin_queue.completion_timeout = ADMIN_CMD_TIMEOUT_US;
+
return 0;
}
@@ -1909,7 +1938,7 @@ int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu)
if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) {
pr_debug("Feature %d isn't supported\n", ENA_ADMIN_MTU);
- return -EPERM;
+ return -EOPNOTSUPP;
}
memset(&cmd, 0x0, sizeof(cmd));
@@ -1963,7 +1992,7 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
ENA_ADMIN_RSS_HASH_FUNCTION)) {
pr_debug("Feature %d isn't supported\n",
ENA_ADMIN_RSS_HASH_FUNCTION);
- return -EPERM;
+ return -EOPNOTSUPP;
}
/* Validate hash function is supported */
@@ -1975,7 +2004,7 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
if (get_resp.u.flow_hash_func.supported_func & (1 << rss->hash_func)) {
pr_err("Func hash %d isn't supported by device, abort\n",
rss->hash_func);
- return -EPERM;
+ return -EOPNOTSUPP;
}
memset(&cmd, 0x0, sizeof(cmd));
@@ -2034,7 +2063,7 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
if (!((1 << func) & get_resp.u.flow_hash_func.supported_func)) {
pr_err("Flow hash function %d isn't supported\n", func);
- return -EPERM;
+ return -EOPNOTSUPP;
}
switch (func) {
@@ -2127,7 +2156,7 @@ int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
ENA_ADMIN_RSS_HASH_INPUT)) {
pr_debug("Feature %d isn't supported\n",
ENA_ADMIN_RSS_HASH_INPUT);
- return -EPERM;
+ return -EOPNOTSUPP;
}
memset(&cmd, 0x0, sizeof(cmd));
@@ -2208,7 +2237,7 @@ int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev)
pr_err("hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
i, hash_ctrl->supported_fields[i].fields,
hash_ctrl->selected_fields[i].fields);
- return -EPERM;
+ return -EOPNOTSUPP;
}
}
@@ -2286,7 +2315,7 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
ena_dev, ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG)) {
pr_debug("Feature %d isn't supported\n",
ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
- return -EPERM;
+ return -EOPNOTSUPP;
}
ret = ena_com_ind_tbl_convert_to_device(ena_dev);
@@ -2553,7 +2582,7 @@ int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev)
ENA_ADMIN_INTERRUPT_MODERATION);
if (rc) {
- if (rc == -EPERM) {
+ if (rc == -EOPNOTSUPP) {
pr_debug("Feature %d isn't supported\n",
ENA_ADMIN_INTERRUPT_MODERATION);
rc = 0;
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.h b/drivers/net/ethernet/amazon/ena/ena_com.h
index c9b33ee5f258..7b784f8a06a6 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.h
+++ b/drivers/net/ethernet/amazon/ena/ena_com.h
@@ -97,6 +97,8 @@
#define ENA_INTR_MODER_LEVEL_STRIDE 2
#define ENA_INTR_BYTE_COUNT_NOT_SUPPORTED 0xFFFFFF
+#define ENA_HW_HINTS_NO_TIMEOUT 0xFFFF
+
enum ena_intr_moder_level {
ENA_INTR_MODER_LOWEST = 0,
ENA_INTR_MODER_LOW,
@@ -232,7 +234,9 @@ struct ena_com_stats_admin {
struct ena_com_admin_queue {
void *q_dmadev;
spinlock_t q_lock; /* spinlock for the admin queue */
+
struct ena_comp_ctx *comp_ctx;
+ u32 completion_timeout;
u16 q_depth;
struct ena_com_admin_cq cq;
struct ena_com_admin_sq sq;
@@ -267,6 +271,7 @@ struct ena_com_aenq {
struct ena_com_mmio_read {
struct ena_admin_ena_mmio_req_read_less_resp *read_resp;
dma_addr_t read_resp_dma_addr;
+ u32 reg_read_to; /* in us */
u16 seq_num;
bool readless_supported;
/* spin lock to ensure a single outstanding read */
@@ -336,6 +341,7 @@ struct ena_com_dev_get_features_ctx {
struct ena_admin_device_attr_feature_desc dev_attr;
struct ena_admin_feature_aenq_desc aenq;
struct ena_admin_feature_offload_desc offload;
+ struct ena_admin_ena_hw_hints hw_hints;
};
struct ena_com_create_io_ctx {
@@ -414,10 +420,12 @@ void ena_com_admin_destroy(struct ena_com_dev *ena_dev);
/* ena_com_dev_reset - Perform device FLR to the device.
* @ena_dev: ENA communication layer struct
+ * @reset_reason: Specify what is the trigger for the reset in case of an error.
*
* @return - 0 on success, negative value on failure.
*/
-int ena_com_dev_reset(struct ena_com_dev *ena_dev);
+int ena_com_dev_reset(struct ena_com_dev *ena_dev,
+ enum ena_regs_reset_reason_types reset_reason);
/* ena_com_create_io_queue - Create io queue.
* @ena_dev: ENA communication layer struct
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.c b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
index f999305e1363..b11e573ad57a 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
@@ -493,6 +493,11 @@ int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id)
if (cdesc_phase != expected_phase)
return -EAGAIN;
+ if (unlikely(cdesc->req_id >= io_cq->q_depth)) {
+ pr_err("Invalid req id %d\n", cdesc->req_id);
+ return -EINVAL;
+ }
+
ena_com_cq_inc_head(io_cq);
*req_id = READ_ONCE(cdesc->req_id);
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
index 3ee55e2fd694..b1212debc2e1 100644
--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
@@ -93,6 +93,7 @@ static const struct ena_stats ena_stats_rx_strings[] = {
ENA_STAT_RX_ENTRY(dma_mapping_err),
ENA_STAT_RX_ENTRY(bad_desc_num),
ENA_STAT_RX_ENTRY(rx_copybreak_pkt),
+ ENA_STAT_RX_ENTRY(bad_req_id),
ENA_STAT_RX_ENTRY(empty_rx_ring),
};
@@ -539,12 +540,8 @@ static int ena_get_rss_hash(struct ena_com_dev *ena_dev,
}
rc = ena_com_get_hash_ctrl(ena_dev, proto, &hash_fields);
- if (rc) {
- /* If device don't have permission, return unsupported */
- if (rc == -EPERM)
- rc = -EOPNOTSUPP;
+ if (rc)
return rc;
- }
cmd->data = ena_flow_hash_to_flow_type(hash_fields);
@@ -612,7 +609,7 @@ static int ena_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info)
rc = -EOPNOTSUPP;
}
- return (rc == -EPERM) ? -EOPNOTSUPP : rc;
+ return rc;
}
static int ena_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info,
@@ -638,7 +635,7 @@ static int ena_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info,
rc = -EOPNOTSUPP;
}
- return (rc == -EPERM) ? -EOPNOTSUPP : rc;
+ return rc;
}
static u32 ena_get_rxfh_indir_size(struct net_device *netdev)
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 4f16ed38bcf3..f7dc22f65d9f 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -87,6 +87,7 @@ static void ena_tx_timeout(struct net_device *dev)
if (test_and_set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
return;
+ adapter->reset_reason = ENA_REGS_RESET_OS_NETDEV_WD;
u64_stats_update_begin(&adapter->syncp);
adapter->dev_stats.tx_timeout++;
u64_stats_update_end(&adapter->syncp);
@@ -303,6 +304,24 @@ static void ena_free_all_io_tx_resources(struct ena_adapter *adapter)
ena_free_tx_resources(adapter, i);
}
+static inline int validate_rx_req_id(struct ena_ring *rx_ring, u16 req_id)
+{
+ if (likely(req_id < rx_ring->ring_size))
+ return 0;
+
+ netif_err(rx_ring->adapter, rx_err, rx_ring->netdev,
+ "Invalid rx req_id: %hu\n", req_id);
+
+ u64_stats_update_begin(&rx_ring->syncp);
+ rx_ring->rx_stats.bad_req_id++;
+ u64_stats_update_end(&rx_ring->syncp);
+
+ /* Trigger device reset */
+ rx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID;
+ set_bit(ENA_FLAG_TRIGGER_RESET, &rx_ring->adapter->flags);
+ return -EFAULT;
+}
+
/* ena_setup_rx_resources - allocate I/O Rx resources (Descriptors)
* @adapter: network interface device structure
* @qid: queue index
@@ -314,7 +333,7 @@ static int ena_setup_rx_resources(struct ena_adapter *adapter,
{
struct ena_ring *rx_ring = &adapter->rx_ring[qid];
struct ena_irq *ena_irq = &adapter->irq_tbl[ENA_IO_IRQ_IDX(qid)];
- int size, node;
+ int size, node, i;
if (rx_ring->rx_buffer_info) {
netif_err(adapter, ifup, adapter->netdev,
@@ -335,6 +354,20 @@ static int ena_setup_rx_resources(struct ena_adapter *adapter,
return -ENOMEM;
}
+ size = sizeof(u16) * rx_ring->ring_size;
+ rx_ring->free_rx_ids = vzalloc_node(size, node);
+ if (!rx_ring->free_rx_ids) {
+ rx_ring->free_rx_ids = vzalloc(size);
+ if (!rx_ring->free_rx_ids) {
+ vfree(rx_ring->rx_buffer_info);
+ return -ENOMEM;
+ }
+ }
+
+ /* Req id ring for receiving RX pkts out of order */
+ for (i = 0; i < rx_ring->ring_size; i++)
+ rx_ring->free_rx_ids[i] = i;
+
/* Reset rx statistics */
memset(&rx_ring->rx_stats, 0x0, sizeof(rx_ring->rx_stats));
@@ -358,6 +391,9 @@ static void ena_free_rx_resources(struct ena_adapter *adapter,
vfree(rx_ring->rx_buffer_info);
rx_ring->rx_buffer_info = NULL;
+
+ vfree(rx_ring->free_rx_ids);
+ rx_ring->free_rx_ids = NULL;
}
/* ena_setup_all_rx_resources - allocate I/O Rx queues resources for all queues
@@ -463,15 +499,22 @@ static void ena_free_rx_page(struct ena_ring *rx_ring,
static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num)
{
- u16 next_to_use;
+ u16 next_to_use, req_id;
u32 i;
int rc;
next_to_use = rx_ring->next_to_use;
for (i = 0; i < num; i++) {
- struct ena_rx_buffer *rx_info =
- &rx_ring->rx_buffer_info[next_to_use];
+ struct ena_rx_buffer *rx_info;
+
+ req_id = rx_ring->free_rx_ids[next_to_use];
+ rc = validate_rx_req_id(rx_ring, req_id);
+ if (unlikely(rc < 0))
+ break;
+
+ rx_info = &rx_ring->rx_buffer_info[req_id];
+
rc = ena_alloc_rx_page(rx_ring, rx_info,
__GFP_COLD | GFP_ATOMIC | __GFP_COMP);
@@ -483,7 +526,7 @@ static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num)
}
rc = ena_com_add_single_rx_desc(rx_ring->ena_com_io_sq,
&rx_info->ena_buf,
- next_to_use);
+ req_id);
if (unlikely(rc)) {
netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev,
"failed to add buffer for rx queue %d\n",
@@ -670,6 +713,7 @@ static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id)
u64_stats_update_end(&tx_ring->syncp);
/* Trigger device reset */
+ tx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_TX_REQ_ID;
set_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags);
return -EFAULT;
}
@@ -781,19 +825,42 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
return tx_pkts;
}
+static struct sk_buff *ena_alloc_skb(struct ena_ring *rx_ring, bool frags)
+{
+ struct sk_buff *skb;
+
+ if (frags)
+ skb = napi_get_frags(rx_ring->napi);
+ else
+ skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
+ rx_ring->rx_copybreak);
+
+ if (unlikely(!skb)) {
+ u64_stats_update_begin(&rx_ring->syncp);
+ rx_ring->rx_stats.skb_alloc_fail++;
+ u64_stats_update_end(&rx_ring->syncp);
+ netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
+ "Failed to allocate skb. frags: %d\n", frags);
+ return NULL;
+ }
+
+ return skb;
+}
+
static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
struct ena_com_rx_buf_info *ena_bufs,
u32 descs,
u16 *next_to_clean)
{
struct sk_buff *skb;
- struct ena_rx_buffer *rx_info =
- &rx_ring->rx_buffer_info[*next_to_clean];
- u32 len;
- u32 buf = 0;
+ struct ena_rx_buffer *rx_info;
+ u16 len, req_id, buf = 0;
void *va;
- len = ena_bufs[0].len;
+ len = ena_bufs[buf].len;
+ req_id = ena_bufs[buf].req_id;
+ rx_info = &rx_ring->rx_buffer_info[req_id];
+
if (unlikely(!rx_info->page)) {
netif_err(rx_ring->adapter, rx_err, rx_ring->netdev,
"Page is NULL\n");
@@ -809,16 +876,9 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
prefetch(va + NET_IP_ALIGN);
if (len <= rx_ring->rx_copybreak) {
- skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
- rx_ring->rx_copybreak);
- if (unlikely(!skb)) {
- u64_stats_update_begin(&rx_ring->syncp);
- rx_ring->rx_stats.skb_alloc_fail++;
- u64_stats_update_end(&rx_ring->syncp);
- netif_err(rx_ring->adapter, rx_err, rx_ring->netdev,
- "Failed to allocate skb\n");
+ skb = ena_alloc_skb(rx_ring, false);
+ if (unlikely(!skb))
return NULL;
- }
netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
"rx allocated small packet. len %d. data_len %d\n",
@@ -837,20 +897,15 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
skb_put(skb, len);
skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+ rx_ring->free_rx_ids[*next_to_clean] = req_id;
*next_to_clean = ENA_RX_RING_IDX_ADD(*next_to_clean, descs,
rx_ring->ring_size);
return skb;
}
- skb = napi_get_frags(rx_ring->napi);
- if (unlikely(!skb)) {
- netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
- "Failed allocating skb\n");
- u64_stats_update_begin(&rx_ring->syncp);
- rx_ring->rx_stats.skb_alloc_fail++;
- u64_stats_update_end(&rx_ring->syncp);
+ skb = ena_alloc_skb(rx_ring, true);
+ if (unlikely(!skb))
return NULL;
- }
do {
dma_unmap_page(rx_ring->dev,
@@ -865,13 +920,18 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
skb->len, skb->data_len);
rx_info->page = NULL;
+
+ rx_ring->free_rx_ids[*next_to_clean] = req_id;
*next_to_clean =
ENA_RX_RING_IDX_NEXT(*next_to_clean,
rx_ring->ring_size);
if (likely(--descs == 0))
break;
- rx_info = &rx_ring->rx_buffer_info[*next_to_clean];
- len = ena_bufs[++buf].len;
+
+ buf++;
+ len = ena_bufs[buf].len;
+ req_id = ena_bufs[buf].req_id;
+ rx_info = &rx_ring->rx_buffer_info[req_id];
} while (1);
return skb;
@@ -972,6 +1032,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
int rc = 0;
int total_len = 0;
int rx_copybreak_pkt = 0;
+ int i;
netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
"%s qid %d\n", __func__, rx_ring->qid);
@@ -1001,9 +1062,13 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
/* exit if we failed to retrieve a buffer */
if (unlikely(!skb)) {
- next_to_clean = ENA_RX_RING_IDX_ADD(next_to_clean,
- ena_rx_ctx.descs,
- rx_ring->ring_size);
+ for (i = 0; i < ena_rx_ctx.descs; i++) {
+ rx_ring->free_tx_ids[next_to_clean] =
+ rx_ring->ena_bufs[i].req_id;
+ next_to_clean =
+ ENA_RX_RING_IDX_NEXT(next_to_clean,
+ rx_ring->ring_size);
+ }
break;
}
@@ -1055,6 +1120,7 @@ error:
u64_stats_update_end(&rx_ring->syncp);
/* Too many desc from the device. Trigger reset */
+ adapter->reset_reason = ENA_REGS_RESET_TOO_MANY_RX_DESCS;
set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
return 0;
@@ -1208,14 +1274,25 @@ static irqreturn_t ena_intr_msix_io(int irq, void *data)
{
struct ena_napi *ena_napi = data;
- napi_schedule(&ena_napi->napi);
+ napi_schedule_irqoff(&ena_napi->napi);
return IRQ_HANDLED;
}
+/* Reserve a single MSI-X vector for management (admin + aenq).
+ * plus reserve one vector for each potential io queue.
+ * the number of potential io queues is the minimum of what the device
+ * supports and the number of vCPUs.
+ */
static int ena_enable_msix(struct ena_adapter *adapter, int num_queues)
{
- int msix_vecs, rc;
+ int msix_vecs, irq_cnt;
+
+ if (test_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) {
+ netif_err(adapter, probe, adapter->netdev,
+ "Error, MSI-X is already enabled\n");
+ return -EPERM;
+ }
/* Reserved the max msix vectors we might need */
msix_vecs = ENA_MAX_MSIX_VEC(num_queues);
@@ -1223,25 +1300,28 @@ static int ena_enable_msix(struct ena_adapter *adapter, int num_queues)
netif_dbg(adapter, probe, adapter->netdev,
"trying to enable MSI-X, vectors %d\n", msix_vecs);
- rc = pci_alloc_irq_vectors(adapter->pdev, msix_vecs, msix_vecs,
- PCI_IRQ_MSIX);
- if (rc < 0) {
+ irq_cnt = pci_alloc_irq_vectors(adapter->pdev, ENA_MIN_MSIX_VEC,
+ msix_vecs, PCI_IRQ_MSIX);
+
+ if (irq_cnt < 0) {
netif_err(adapter, probe, adapter->netdev,
- "Failed to enable MSI-X, vectors %d rc %d\n",
- msix_vecs, rc);
+ "Failed to enable MSI-X. irq_cnt %d\n", irq_cnt);
return -ENOSPC;
}
- netif_dbg(adapter, probe, adapter->netdev, "enable MSI-X, vectors %d\n",
- msix_vecs);
-
- if (msix_vecs >= 1) {
- if (ena_init_rx_cpu_rmap(adapter))
- netif_warn(adapter, probe, adapter->netdev,
- "Failed to map IRQs to CPUs\n");
+ if (irq_cnt != msix_vecs) {
+ netif_notice(adapter, probe, adapter->netdev,
+ "enable only %d MSI-X (out of %d), reduce the number of queues\n",
+ irq_cnt, msix_vecs);
+ adapter->num_queues = irq_cnt - ENA_ADMIN_MSIX_VEC;
}
- adapter->msix_vecs = msix_vecs;
+ if (ena_init_rx_cpu_rmap(adapter))
+ netif_warn(adapter, probe, adapter->netdev,
+ "Failed to map IRQs to CPUs\n");
+
+ adapter->msix_vecs = irq_cnt;
+ set_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags);
return 0;
}
@@ -1318,6 +1398,12 @@ static int ena_request_io_irq(struct ena_adapter *adapter)
struct ena_irq *irq;
int rc = 0, i, k;
+ if (!test_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) {
+ netif_err(adapter, ifup, adapter->netdev,
+ "Failed to request I/O IRQ: MSI-X is not enabled\n");
+ return -EINVAL;
+ }
+
for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) {
irq = &adapter->irq_tbl[i];
rc = request_irq(irq->vector, irq->handler, flags, irq->name,
@@ -1376,6 +1462,12 @@ static void ena_free_io_irq(struct ena_adapter *adapter)
}
}
+static void ena_disable_msix(struct ena_adapter *adapter)
+{
+ if (test_and_clear_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags))
+ pci_free_irq_vectors(adapter->pdev);
+}
+
static void ena_disable_io_intr_sync(struct ena_adapter *adapter)
{
int i;
@@ -1446,7 +1538,7 @@ static int ena_rss_configure(struct ena_adapter *adapter)
/* In case the RSS table wasn't initialized by probe */
if (!ena_dev->rss.tbl_log_size) {
rc = ena_rss_init_default(adapter);
- if (rc && (rc != -EPERM)) {
+ if (rc && (rc != -EOPNOTSUPP)) {
netif_err(adapter, ifup, adapter->netdev,
"Failed to init RSS rc: %d\n", rc);
return rc;
@@ -1455,17 +1547,17 @@ static int ena_rss_configure(struct ena_adapter *adapter)
/* Set indirect table */
rc = ena_com_indirect_table_set(ena_dev);
- if (unlikely(rc && rc != -EPERM))
+ if (unlikely(rc && rc != -EOPNOTSUPP))
return rc;
/* Configure hash function (if supported) */
rc = ena_com_set_hash_function(ena_dev);
- if (unlikely(rc && (rc != -EPERM)))
+ if (unlikely(rc && (rc != -EOPNOTSUPP)))
return rc;
/* Configure hash inputs (if supported) */
rc = ena_com_set_hash_ctrl(ena_dev);
- if (unlikely(rc && (rc != -EPERM)))
+ if (unlikely(rc && (rc != -EOPNOTSUPP)))
return rc;
return 0;
@@ -1720,7 +1812,7 @@ static void ena_down(struct ena_adapter *adapter)
if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) {
int rc;
- rc = ena_com_dev_reset(adapter->ena_dev);
+ rc = ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason);
if (rc)
dev_err(&adapter->pdev->dev, "Device reset failed\n");
}
@@ -2144,7 +2236,7 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev)
rc = ena_com_set_host_attributes(ena_dev);
if (rc) {
- if (rc == -EPERM)
+ if (rc == -EOPNOTSUPP)
pr_warn("Cannot set host attributes\n");
else
pr_err("Cannot set host attributes\n");
@@ -2181,7 +2273,7 @@ static void ena_config_debug_area(struct ena_adapter *adapter)
rc = ena_com_set_host_attributes(adapter->ena_dev);
if (rc) {
- if (rc == -EPERM)
+ if (rc == -EOPNOTSUPP)
netif_warn(adapter, drv, adapter->netdev,
"Cannot set host attributes\n");
else
@@ -2353,7 +2445,7 @@ static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev,
readless_supported = !(pdev->revision & ENA_MMIO_DISABLE_REG_READ);
ena_com_set_mmio_read_mode(ena_dev, readless_supported);
- rc = ena_com_dev_reset(ena_dev);
+ rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL);
if (rc) {
dev_err(dev, "Can not reset device\n");
goto err_mmio_read_less;
@@ -2464,7 +2556,8 @@ static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *adapter,
return 0;
err_disable_msix:
- pci_free_irq_vectors(adapter->pdev);
+ ena_disable_msix(adapter);
+
return rc;
}
@@ -2502,7 +2595,7 @@ static void ena_fw_reset_device(struct work_struct *work)
ena_free_mgmnt_irq(adapter);
- pci_free_irq_vectors(adapter->pdev);
+ ena_disable_msix(adapter);
ena_com_abort_admin_commands(ena_dev);
@@ -2512,6 +2605,7 @@ static void ena_fw_reset_device(struct work_struct *work)
ena_com_mmio_reg_read_request_destroy(ena_dev);
+ adapter->reset_reason = ENA_REGS_RESET_NORMAL;
clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
/* Finish with the destroy part. Start the init part */
@@ -2553,7 +2647,7 @@ static void ena_fw_reset_device(struct work_struct *work)
return;
err_disable_msix:
ena_free_mgmnt_irq(adapter);
- pci_free_irq_vectors(adapter->pdev);
+ ena_disable_msix(adapter);
err_device_destroy:
ena_com_admin_destroy(ena_dev);
err:
@@ -2577,7 +2671,7 @@ static int check_missing_comp_in_queue(struct ena_adapter *adapter,
tx_buf = &tx_ring->tx_buffer_info[i];
last_jiffies = tx_buf->last_jiffies;
if (unlikely(last_jiffies &&
- time_is_before_jiffies(last_jiffies + TX_TIMEOUT))) {
+ time_is_before_jiffies(last_jiffies + adapter->missing_tx_completion_to))) {
if (!tx_buf->print_once)
netif_notice(adapter, tx_err, adapter->netdev,
"Found a Tx that wasn't completed on time, qid %d, index %d.\n",
@@ -2586,10 +2680,13 @@ static int check_missing_comp_in_queue(struct ena_adapter *adapter,
tx_buf->print_once = 1;
missed_tx++;
- if (unlikely(missed_tx > MAX_NUM_OF_TIMEOUTED_PACKETS)) {
+ if (unlikely(missed_tx > adapter->missing_tx_completion_threshold)) {
netif_err(adapter, tx_err, adapter->netdev,
"The number of lost tx completions is above the threshold (%d > %d). Reset the device\n",
- missed_tx, MAX_NUM_OF_TIMEOUTED_PACKETS);
+ missed_tx,
+ adapter->missing_tx_completion_threshold);
+ adapter->reset_reason =
+ ENA_REGS_RESET_MISS_TX_CMPL;
set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
return -EIO;
}
@@ -2613,6 +2710,9 @@ static void check_for_missing_tx_completions(struct ena_adapter *adapter)
if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
return;
+ if (adapter->missing_tx_completion_to == ENA_HW_HINTS_NO_TIMEOUT)
+ return;
+
budget = ENA_MONITORED_TX_QUEUES;
for (i = adapter->last_monitored_tx_qid; i < adapter->num_queues; i++) {
@@ -2690,14 +2790,18 @@ static void check_for_missing_keep_alive(struct ena_adapter *adapter)
if (!adapter->wd_state)
return;
- keep_alive_expired = round_jiffies(adapter->last_keep_alive_jiffies
- + ENA_DEVICE_KALIVE_TIMEOUT);
+ if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT)
+ return;
+
+ keep_alive_expired = round_jiffies(adapter->last_keep_alive_jiffies +
+ adapter->keep_alive_timeout);
if (unlikely(time_is_before_jiffies(keep_alive_expired))) {
netif_err(adapter, drv, adapter->netdev,
"Keep alive watchdog timeout.\n");
u64_stats_update_begin(&adapter->syncp);
adapter->dev_stats.wd_expired++;
u64_stats_update_end(&adapter->syncp);
+ adapter->reset_reason = ENA_REGS_RESET_KEEP_ALIVE_TO;
set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
}
}
@@ -2710,10 +2814,49 @@ static void check_for_admin_com_state(struct ena_adapter *adapter)
u64_stats_update_begin(&adapter->syncp);
adapter->dev_stats.admin_q_pause++;
u64_stats_update_end(&adapter->syncp);
+ adapter->reset_reason = ENA_REGS_RESET_ADMIN_TO;
set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
}
}
+static void ena_update_hints(struct ena_adapter *adapter,
+ struct ena_admin_ena_hw_hints *hints)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ if (hints->admin_completion_tx_timeout)
+ adapter->ena_dev->admin_queue.completion_timeout =
+ hints->admin_completion_tx_timeout * 1000;
+
+ if (hints->mmio_read_timeout)
+ /* convert to usec */
+ adapter->ena_dev->mmio_read.reg_read_to =
+ hints->mmio_read_timeout * 1000;
+
+ if (hints->missed_tx_completion_count_threshold_to_reset)
+ adapter->missing_tx_completion_threshold =
+ hints->missed_tx_completion_count_threshold_to_reset;
+
+ if (hints->missing_tx_completion_timeout) {
+ if (hints->missing_tx_completion_timeout == ENA_HW_HINTS_NO_TIMEOUT)
+ adapter->missing_tx_completion_to = ENA_HW_HINTS_NO_TIMEOUT;
+ else
+ adapter->missing_tx_completion_to =
+ msecs_to_jiffies(hints->missing_tx_completion_timeout);
+ }
+
+ if (hints->netdev_wd_timeout)
+ netdev->watchdog_timeo = msecs_to_jiffies(hints->netdev_wd_timeout);
+
+ if (hints->driver_watchdog_timeout) {
+ if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT)
+ adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT;
+ else
+ adapter->keep_alive_timeout =
+ msecs_to_jiffies(hints->driver_watchdog_timeout);
+ }
+}
+
static void ena_update_host_info(struct ena_admin_host_info *host_info,
struct net_device *netdev)
{
@@ -2886,7 +3029,7 @@ static int ena_rss_init_default(struct ena_adapter *adapter)
val = ethtool_rxfh_indir_default(i, adapter->num_queues);
rc = ena_com_indirect_table_fill_entry(ena_dev, i,
ENA_IO_RXQ_IDX(val));
- if (unlikely(rc && (rc != -EPERM))) {
+ if (unlikely(rc && (rc != -EOPNOTSUPP))) {
dev_err(dev, "Cannot fill indirect table\n");
goto err_fill_indir;
}
@@ -2894,13 +3037,13 @@ static int ena_rss_init_default(struct ena_adapter *adapter)
rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_CRC32, NULL,
ENA_HASH_KEY_SIZE, 0xFFFFFFFF);
- if (unlikely(rc && (rc != -EPERM))) {
+ if (unlikely(rc && (rc != -EOPNOTSUPP))) {
dev_err(dev, "Cannot fill hash function\n");
goto err_fill_indir;
}
rc = ena_com_set_default_hash_ctrl(ena_dev);
- if (unlikely(rc && (rc != -EPERM))) {
+ if (unlikely(rc && (rc != -EOPNOTSUPP))) {
dev_err(dev, "Cannot fill hash control\n");
goto err_fill_indir;
}
@@ -3076,6 +3219,7 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ena_set_conf_feat_params(adapter, &get_feat_ctx);
adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
+ adapter->reset_reason = ENA_REGS_RESET_NORMAL;
adapter->tx_ring_size = queue_size;
adapter->rx_ring_size = queue_size;
@@ -3114,7 +3258,7 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_worker_destroy;
}
rc = ena_rss_init_default(adapter);
- if (rc && (rc != -EPERM)) {
+ if (rc && (rc != -EOPNOTSUPP)) {
dev_err(&pdev->dev, "Cannot init RSS rc: %d\n", rc);
goto err_free_msix;
}
@@ -3136,6 +3280,11 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
INIT_WORK(&adapter->reset_task, ena_fw_reset_device);
adapter->last_keep_alive_jiffies = jiffies;
+ adapter->keep_alive_timeout = ENA_DEVICE_KALIVE_TIMEOUT;
+ adapter->missing_tx_completion_to = TX_TIMEOUT;
+ adapter->missing_tx_completion_threshold = MAX_NUM_OF_TIMEOUTED_PACKETS;
+
+ ena_update_hints(adapter, &get_feat_ctx.hw_hints);
setup_timer(&adapter->timer_service, ena_timer_service,
(unsigned long)adapter);
@@ -3155,9 +3304,9 @@ err_rss:
ena_com_delete_debug_area(ena_dev);
ena_com_rss_destroy(ena_dev);
err_free_msix:
- ena_com_dev_reset(ena_dev);
+ ena_com_dev_reset(ena_dev, ENA_REGS_RESET_INIT_ERR);
ena_free_mgmnt_irq(adapter);
- pci_free_irq_vectors(adapter->pdev);
+ ena_disable_msix(adapter);
err_worker_destroy:
ena_com_destroy_interrupt_moderation(ena_dev);
del_timer(&adapter->timer_service);
@@ -3238,11 +3387,11 @@ static void ena_remove(struct pci_dev *pdev)
/* Reset the device only if the device is running. */
if (test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
- ena_com_dev_reset(ena_dev);
+ ena_com_dev_reset(ena_dev, adapter->reset_reason);
ena_free_mgmnt_irq(adapter);
- pci_free_irq_vectors(adapter->pdev);
+ ena_disable_msix(adapter);
free_netdev(netdev);
@@ -3329,14 +3478,24 @@ static void ena_keep_alive_wd(void *adapter_data,
struct ena_admin_aenq_entry *aenq_e)
{
struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
+ struct ena_admin_aenq_keep_alive_desc *desc;
+ u64 rx_drops;
+ desc = (struct ena_admin_aenq_keep_alive_desc *)aenq_e;
adapter->last_keep_alive_jiffies = jiffies;
+
+ rx_drops = ((u64)desc->rx_drops_high << 32) | desc->rx_drops_low;
+
+ u64_stats_update_begin(&adapter->syncp);
+ adapter->dev_stats.rx_drops = rx_drops;
+ u64_stats_update_end(&adapter->syncp);
}
static void ena_notification(void *adapter_data,
struct ena_admin_aenq_entry *aenq_e)
{
struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
+ struct ena_admin_ena_hw_hints *hints;
WARN(aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION,
"Invalid group(%x) expected %x\n",
@@ -3354,6 +3513,11 @@ static void ena_notification(void *adapter_data,
case ENA_ADMIN_RESUME:
queue_work(ena_wq, &adapter->resume_io_task);
break;
+ case ENA_ADMIN_UPDATE_HINTS:
+ hints = (struct ena_admin_ena_hw_hints *)
+ (&aenq_e->inline_data_w4);
+ ena_update_hints(adapter, hints);
+ break;
default:
netif_err(adapter, drv, adapter->netdev,
"Invalid aenq notification link state %d\n",
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
index a4d3d5e21068..29bb5704260b 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -44,21 +44,24 @@
#include "ena_eth_com.h"
#define DRV_MODULE_VER_MAJOR 1
-#define DRV_MODULE_VER_MINOR 1
-#define DRV_MODULE_VER_SUBMINOR 7
+#define DRV_MODULE_VER_MINOR 2
+#define DRV_MODULE_VER_SUBMINOR 0
#define DRV_MODULE_NAME "ena"
#ifndef DRV_MODULE_VERSION
#define DRV_MODULE_VERSION \
__stringify(DRV_MODULE_VER_MAJOR) "." \
__stringify(DRV_MODULE_VER_MINOR) "." \
- __stringify(DRV_MODULE_VER_SUBMINOR)
+ __stringify(DRV_MODULE_VER_SUBMINOR) "k"
#endif
#define DEVICE_NAME "Elastic Network Adapter (ENA)"
/* 1 for AENQ + ADMIN */
-#define ENA_MAX_MSIX_VEC(io_queues) (1 + (io_queues))
+#define ENA_ADMIN_MSIX_VEC 1
+#define ENA_MAX_MSIX_VEC(io_queues) (ENA_ADMIN_MSIX_VEC + (io_queues))
+
+#define ENA_MIN_MSIX_VEC 2
#define ENA_REG_BAR 0
#define ENA_MEM_BAR 2
@@ -194,12 +197,19 @@ struct ena_stats_rx {
u64 dma_mapping_err;
u64 bad_desc_num;
u64 rx_copybreak_pkt;
+ u64 bad_req_id;
u64 empty_rx_ring;
};
struct ena_ring {
- /* Holds the empty requests for TX out of order completions */
- u16 *free_tx_ids;
+ union {
+ /* Holds the empty requests for TX/RX
+ * out of order completions
+ */
+ u16 *free_tx_ids;
+ u16 *free_rx_ids;
+ };
+
union {
struct ena_tx_buffer *tx_buffer_info;
struct ena_rx_buffer *rx_buffer_info;
@@ -260,6 +270,7 @@ enum ena_flags_t {
ENA_FLAG_DEVICE_RUNNING,
ENA_FLAG_DEV_UP,
ENA_FLAG_LINK_UP,
+ ENA_FLAG_MSIX_ENABLED,
ENA_FLAG_TRIGGER_RESET
};
@@ -280,6 +291,8 @@ struct ena_adapter {
int msix_vecs;
+ u32 missing_tx_completion_threshold;
+
u32 tx_usecs, rx_usecs; /* interrupt moderation */
u32 tx_frames, rx_frames; /* interrupt moderation */
@@ -293,6 +306,9 @@ struct ena_adapter {
u8 mac_addr[ETH_ALEN];
+ unsigned long keep_alive_timeout;
+ unsigned long missing_tx_completion_to;
+
char name[ENA_NAME_MAX_LEN];
unsigned long flags;
@@ -322,6 +338,8 @@ struct ena_adapter {
/* last queue index that was checked for uncompleted tx packets */
u32 last_monitored_tx_qid;
+
+ enum ena_regs_reset_reason_types reset_reason;
};
void ena_set_ethtool_ops(struct net_device *netdev);
diff --git a/drivers/net/ethernet/amazon/ena/ena_regs_defs.h b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
index 26097a2b6030..9aec43c5bba8 100644
--- a/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
+++ b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
@@ -32,6 +32,36 @@
#ifndef _ENA_REGS_H_
#define _ENA_REGS_H_
+enum ena_regs_reset_reason_types {
+ ENA_REGS_RESET_NORMAL = 0,
+
+ ENA_REGS_RESET_KEEP_ALIVE_TO = 1,
+
+ ENA_REGS_RESET_ADMIN_TO = 2,
+
+ ENA_REGS_RESET_MISS_TX_CMPL = 3,
+
+ ENA_REGS_RESET_INV_RX_REQ_ID = 4,
+
+ ENA_REGS_RESET_INV_TX_REQ_ID = 5,
+
+ ENA_REGS_RESET_TOO_MANY_RX_DESCS = 6,
+
+ ENA_REGS_RESET_INIT_ERR = 7,
+
+ ENA_REGS_RESET_DRIVER_INVALID_STATE = 8,
+
+ ENA_REGS_RESET_OS_TRIGGER = 9,
+
+ ENA_REGS_RESET_OS_NETDEV_WD = 10,
+
+ ENA_REGS_RESET_SHUTDOWN = 11,
+
+ ENA_REGS_RESET_USER_TRIGGER = 12,
+
+ ENA_REGS_RESET_GENERIC = 13,
+};
+
/* ena_registers offsets */
#define ENA_REGS_VERSION_OFF 0x0
#define ENA_REGS_CONTROLLER_VERSION_OFF 0x4
@@ -78,6 +108,8 @@
#define ENA_REGS_CAPS_RESET_TIMEOUT_MASK 0x3e
#define ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT 8
#define ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK 0xff00
+#define ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT 16
+#define ENA_REGS_CAPS_ADMIN_CMD_TO_MASK 0xf0000
/* aq_caps register */
#define ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK 0xffff
@@ -102,6 +134,8 @@
#define ENA_REGS_DEV_CTL_QUIESCENT_MASK 0x4
#define ENA_REGS_DEV_CTL_IO_RESUME_SHIFT 3
#define ENA_REGS_DEV_CTL_IO_RESUME_MASK 0x8
+#define ENA_REGS_DEV_CTL_RESET_REASON_SHIFT 28
+#define ENA_REGS_DEV_CTL_RESET_REASON_MASK 0xf0000000
/* dev_sts register */
#define ENA_REGS_DEV_STS_READY_MASK 0x1
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
index 127adbeefb10..9795419aac2d 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
@@ -123,38 +123,13 @@
#define DMA_ISR 0x3008
#define DMA_AXIARCR 0x3010
#define DMA_AXIAWCR 0x3018
+#define DMA_AXIAWARCR 0x301c
#define DMA_DSR0 0x3020
#define DMA_DSR1 0x3024
+#define DMA_TXEDMACR 0x3040
+#define DMA_RXEDMACR 0x3044
/* DMA register entry bit positions and sizes */
-#define DMA_AXIARCR_DRC_INDEX 0
-#define DMA_AXIARCR_DRC_WIDTH 4
-#define DMA_AXIARCR_DRD_INDEX 4
-#define DMA_AXIARCR_DRD_WIDTH 2
-#define DMA_AXIARCR_TEC_INDEX 8
-#define DMA_AXIARCR_TEC_WIDTH 4
-#define DMA_AXIARCR_TED_INDEX 12
-#define DMA_AXIARCR_TED_WIDTH 2
-#define DMA_AXIARCR_THC_INDEX 16
-#define DMA_AXIARCR_THC_WIDTH 4
-#define DMA_AXIARCR_THD_INDEX 20
-#define DMA_AXIARCR_THD_WIDTH 2
-#define DMA_AXIAWCR_DWC_INDEX 0
-#define DMA_AXIAWCR_DWC_WIDTH 4
-#define DMA_AXIAWCR_DWD_INDEX 4
-#define DMA_AXIAWCR_DWD_WIDTH 2
-#define DMA_AXIAWCR_RPC_INDEX 8
-#define DMA_AXIAWCR_RPC_WIDTH 4
-#define DMA_AXIAWCR_RPD_INDEX 12
-#define DMA_AXIAWCR_RPD_WIDTH 2
-#define DMA_AXIAWCR_RHC_INDEX 16
-#define DMA_AXIAWCR_RHC_WIDTH 4
-#define DMA_AXIAWCR_RHD_INDEX 20
-#define DMA_AXIAWCR_RHD_WIDTH 2
-#define DMA_AXIAWCR_TDC_INDEX 24
-#define DMA_AXIAWCR_TDC_WIDTH 4
-#define DMA_AXIAWCR_TDD_INDEX 28
-#define DMA_AXIAWCR_TDD_WIDTH 2
#define DMA_ISR_MACIS_INDEX 17
#define DMA_ISR_MACIS_WIDTH 1
#define DMA_ISR_MTLIS_INDEX 16
@@ -163,14 +138,31 @@
#define DMA_MR_INTM_WIDTH 2
#define DMA_MR_SWR_INDEX 0
#define DMA_MR_SWR_WIDTH 1
+#define DMA_RXEDMACR_RDPS_INDEX 0
+#define DMA_RXEDMACR_RDPS_WIDTH 3
+#define DMA_SBMR_AAL_INDEX 12
+#define DMA_SBMR_AAL_WIDTH 1
#define DMA_SBMR_EAME_INDEX 11
#define DMA_SBMR_EAME_WIDTH 1
-#define DMA_SBMR_BLEN_256_INDEX 7
-#define DMA_SBMR_BLEN_256_WIDTH 1
+#define DMA_SBMR_BLEN_INDEX 1
+#define DMA_SBMR_BLEN_WIDTH 7
+#define DMA_SBMR_RD_OSR_LMT_INDEX 16
+#define DMA_SBMR_RD_OSR_LMT_WIDTH 6
#define DMA_SBMR_UNDEF_INDEX 0
#define DMA_SBMR_UNDEF_WIDTH 1
+#define DMA_SBMR_WR_OSR_LMT_INDEX 24
+#define DMA_SBMR_WR_OSR_LMT_WIDTH 6
+#define DMA_TXEDMACR_TDPS_INDEX 0
+#define DMA_TXEDMACR_TDPS_WIDTH 3
/* DMA register values */
+#define DMA_SBMR_BLEN_256 256
+#define DMA_SBMR_BLEN_128 128
+#define DMA_SBMR_BLEN_64 64
+#define DMA_SBMR_BLEN_32 32
+#define DMA_SBMR_BLEN_16 16
+#define DMA_SBMR_BLEN_8 8
+#define DMA_SBMR_BLEN_4 4
#define DMA_DSR_RPS_WIDTH 4
#define DMA_DSR_TPS_WIDTH 4
#define DMA_DSR_Q_WIDTH (DMA_DSR_RPS_WIDTH + DMA_DSR_TPS_WIDTH)
@@ -959,6 +951,7 @@
#define XP_DRIVER_INT_RO 0x0064
#define XP_DRIVER_SCRATCH_0 0x0068
#define XP_DRIVER_SCRATCH_1 0x006c
+#define XP_INT_REISSUE_EN 0x0074
#define XP_INT_EN 0x0078
#define XP_I2C_MUTEX 0x0080
#define XP_MDIO_MUTEX 0x0084
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
index 0a98c369df20..45d92304068e 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
@@ -176,8 +176,8 @@ static void xgbe_free_ring_resources(struct xgbe_prv_data *pdata)
DBGPR("-->xgbe_free_ring_resources\n");
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
+ for (i = 0; i < pdata->channel_count; i++) {
+ channel = pdata->channel[i];
xgbe_free_ring(pdata, channel->tx_ring);
xgbe_free_ring(pdata, channel->rx_ring);
}
@@ -185,34 +185,60 @@ static void xgbe_free_ring_resources(struct xgbe_prv_data *pdata)
DBGPR("<--xgbe_free_ring_resources\n");
}
+static void *xgbe_alloc_node(size_t size, int node)
+{
+ void *mem;
+
+ mem = kzalloc_node(size, GFP_KERNEL, node);
+ if (!mem)
+ mem = kzalloc(size, GFP_KERNEL);
+
+ return mem;
+}
+
+static void *xgbe_dma_alloc_node(struct device *dev, size_t size,
+ dma_addr_t *dma, int node)
+{
+ void *mem;
+ int cur_node = dev_to_node(dev);
+
+ set_dev_node(dev, node);
+ mem = dma_alloc_coherent(dev, size, dma, GFP_KERNEL);
+ set_dev_node(dev, cur_node);
+
+ if (!mem)
+ mem = dma_alloc_coherent(dev, size, dma, GFP_KERNEL);
+
+ return mem;
+}
+
static int xgbe_init_ring(struct xgbe_prv_data *pdata,
struct xgbe_ring *ring, unsigned int rdesc_count)
{
- DBGPR("-->xgbe_init_ring\n");
+ size_t size;
if (!ring)
return 0;
/* Descriptors */
+ size = rdesc_count * sizeof(struct xgbe_ring_desc);
+
ring->rdesc_count = rdesc_count;
- ring->rdesc = dma_alloc_coherent(pdata->dev,
- (sizeof(struct xgbe_ring_desc) *
- rdesc_count), &ring->rdesc_dma,
- GFP_KERNEL);
+ ring->rdesc = xgbe_dma_alloc_node(pdata->dev, size, &ring->rdesc_dma,
+ ring->node);
if (!ring->rdesc)
return -ENOMEM;
/* Descriptor information */
- ring->rdata = kcalloc(rdesc_count, sizeof(struct xgbe_ring_data),
- GFP_KERNEL);
+ size = rdesc_count * sizeof(struct xgbe_ring_data);
+
+ ring->rdata = xgbe_alloc_node(size, ring->node);
if (!ring->rdata)
return -ENOMEM;
netif_dbg(pdata, drv, pdata->netdev,
- "rdesc=%p, rdesc_dma=%pad, rdata=%p\n",
- ring->rdesc, &ring->rdesc_dma, ring->rdata);
-
- DBGPR("<--xgbe_init_ring\n");
+ "rdesc=%p, rdesc_dma=%pad, rdata=%p, node=%d\n",
+ ring->rdesc, &ring->rdesc_dma, ring->rdata, ring->node);
return 0;
}
@@ -223,10 +249,8 @@ static int xgbe_alloc_ring_resources(struct xgbe_prv_data *pdata)
unsigned int i;
int ret;
- DBGPR("-->xgbe_alloc_ring_resources\n");
-
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
+ for (i = 0; i < pdata->channel_count; i++) {
+ channel = pdata->channel[i];
netif_dbg(pdata, drv, pdata->netdev, "%s - Tx ring:\n",
channel->name);
@@ -250,8 +274,6 @@ static int xgbe_alloc_ring_resources(struct xgbe_prv_data *pdata)
}
}
- DBGPR("<--xgbe_alloc_ring_resources\n");
-
return 0;
err_ring:
@@ -261,21 +283,33 @@ err_ring:
}
static int xgbe_alloc_pages(struct xgbe_prv_data *pdata,
- struct xgbe_page_alloc *pa, gfp_t gfp, int order)
+ struct xgbe_page_alloc *pa, int alloc_order,
+ int node)
{
struct page *pages = NULL;
dma_addr_t pages_dma;
- int ret;
+ gfp_t gfp;
+ int order, ret;
+
+again:
+ order = alloc_order;
/* Try to obtain pages, decreasing order if necessary */
- gfp |= __GFP_COLD | __GFP_COMP | __GFP_NOWARN;
+ gfp = GFP_ATOMIC | __GFP_COLD | __GFP_COMP | __GFP_NOWARN;
while (order >= 0) {
- pages = alloc_pages(gfp, order);
+ pages = alloc_pages_node(node, gfp, order);
if (pages)
break;
order--;
}
+
+ /* If we couldn't get local pages, try getting from anywhere */
+ if (!pages && (node != NUMA_NO_NODE)) {
+ node = NUMA_NO_NODE;
+ goto again;
+ }
+
if (!pages)
return -ENOMEM;
@@ -327,14 +361,14 @@ static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata,
int ret;
if (!ring->rx_hdr_pa.pages) {
- ret = xgbe_alloc_pages(pdata, &ring->rx_hdr_pa, GFP_ATOMIC, 0);
+ ret = xgbe_alloc_pages(pdata, &ring->rx_hdr_pa, 0, ring->node);
if (ret)
return ret;
}
if (!ring->rx_buf_pa.pages) {
- ret = xgbe_alloc_pages(pdata, &ring->rx_buf_pa, GFP_ATOMIC,
- PAGE_ALLOC_COSTLY_ORDER);
+ ret = xgbe_alloc_pages(pdata, &ring->rx_buf_pa,
+ PAGE_ALLOC_COSTLY_ORDER, ring->node);
if (ret)
return ret;
}
@@ -362,8 +396,8 @@ static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
DBGPR("-->xgbe_wrapper_tx_descriptor_init\n");
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
+ for (i = 0; i < pdata->channel_count; i++) {
+ channel = pdata->channel[i];
ring = channel->tx_ring;
if (!ring)
break;
@@ -403,8 +437,8 @@ static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
DBGPR("-->xgbe_wrapper_rx_descriptor_init\n");
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
+ for (i = 0; i < pdata->channel_count; i++) {
+ channel = pdata->channel[i];
ring = channel->rx_ring;
if (!ring)
break;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index 24a687ce4388..06f953e1e9b2 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -174,58 +174,30 @@ static unsigned int xgbe_riwt_to_usec(struct xgbe_prv_data *pdata,
return ret;
}
-static int xgbe_config_pblx8(struct xgbe_prv_data *pdata)
+static int xgbe_config_pbl_val(struct xgbe_prv_data *pdata)
{
- struct xgbe_channel *channel;
+ unsigned int pblx8, pbl;
unsigned int i;
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++)
- XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_CR, PBLX8,
- pdata->pblx8);
-
- return 0;
-}
-
-static int xgbe_get_tx_pbl_val(struct xgbe_prv_data *pdata)
-{
- return XGMAC_DMA_IOREAD_BITS(pdata->channel, DMA_CH_TCR, PBL);
-}
-
-static int xgbe_config_tx_pbl_val(struct xgbe_prv_data *pdata)
-{
- struct xgbe_channel *channel;
- unsigned int i;
-
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
- if (!channel->tx_ring)
- break;
+ pblx8 = DMA_PBL_X8_DISABLE;
+ pbl = pdata->pbl;
- XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, PBL,
- pdata->tx_pbl);
+ if (pdata->pbl > 32) {
+ pblx8 = DMA_PBL_X8_ENABLE;
+ pbl >>= 3;
}
- return 0;
-}
-
-static int xgbe_get_rx_pbl_val(struct xgbe_prv_data *pdata)
-{
- return XGMAC_DMA_IOREAD_BITS(pdata->channel, DMA_CH_RCR, PBL);
-}
-
-static int xgbe_config_rx_pbl_val(struct xgbe_prv_data *pdata)
-{
- struct xgbe_channel *channel;
- unsigned int i;
+ for (i = 0; i < pdata->channel_count; i++) {
+ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_CR, PBLX8,
+ pblx8);
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
- if (!channel->rx_ring)
- break;
+ if (pdata->channel[i]->tx_ring)
+ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR,
+ PBL, pbl);
- XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, PBL,
- pdata->rx_pbl);
+ if (pdata->channel[i]->rx_ring)
+ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR,
+ PBL, pbl);
}
return 0;
@@ -233,15 +205,13 @@ static int xgbe_config_rx_pbl_val(struct xgbe_prv_data *pdata)
static int xgbe_config_osp_mode(struct xgbe_prv_data *pdata)
{
- struct xgbe_channel *channel;
unsigned int i;
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
- if (!channel->tx_ring)
+ for (i = 0; i < pdata->channel_count; i++) {
+ if (!pdata->channel[i]->tx_ring)
break;
- XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, OSP,
+ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, OSP,
pdata->tx_osp_mode);
}
@@ -292,15 +262,13 @@ static int xgbe_config_tx_threshold(struct xgbe_prv_data *pdata,
static int xgbe_config_rx_coalesce(struct xgbe_prv_data *pdata)
{
- struct xgbe_channel *channel;
unsigned int i;
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
- if (!channel->rx_ring)
+ for (i = 0; i < pdata->channel_count; i++) {
+ if (!pdata->channel[i]->rx_ring)
break;
- XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RIWT, RWT,
+ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RIWT, RWT,
pdata->rx_riwt);
}
@@ -314,44 +282,38 @@ static int xgbe_config_tx_coalesce(struct xgbe_prv_data *pdata)
static void xgbe_config_rx_buffer_size(struct xgbe_prv_data *pdata)
{
- struct xgbe_channel *channel;
unsigned int i;
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
- if (!channel->rx_ring)
+ for (i = 0; i < pdata->channel_count; i++) {
+ if (!pdata->channel[i]->rx_ring)
break;
- XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, RBSZ,
+ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, RBSZ,
pdata->rx_buf_size);
}
}
static void xgbe_config_tso_mode(struct xgbe_prv_data *pdata)
{
- struct xgbe_channel *channel;
unsigned int i;
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
- if (!channel->tx_ring)
+ for (i = 0; i < pdata->channel_count; i++) {
+ if (!pdata->channel[i]->tx_ring)
break;
- XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, TSE, 1);
+ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, TSE, 1);
}
}
static void xgbe_config_sph_mode(struct xgbe_prv_data *pdata)
{
- struct xgbe_channel *channel;
unsigned int i;
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
- if (!channel->rx_ring)
+ for (i = 0; i < pdata->channel_count; i++) {
+ if (!pdata->channel[i]->rx_ring)
break;
- XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_CR, SPH, 1);
+ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_CR, SPH, 1);
}
XGMAC_IOWRITE_BITS(pdata, MAC_RCR, HDSMS, XGBE_SPH_HDSMS_SIZE);
@@ -651,8 +613,9 @@ static void xgbe_enable_dma_interrupts(struct xgbe_prv_data *pdata)
XGMAC_IOWRITE_BITS(pdata, DMA_MR, INTM,
pdata->channel_irq_mode);
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
+ for (i = 0; i < pdata->channel_count; i++) {
+ channel = pdata->channel[i];
+
/* Clear all the interrupts which are set */
dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR);
XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_ch_isr);
@@ -1497,26 +1460,37 @@ static void xgbe_rx_desc_init(struct xgbe_channel *channel)
static void xgbe_update_tstamp_addend(struct xgbe_prv_data *pdata,
unsigned int addend)
{
+ unsigned int count = 10000;
+
/* Set the addend register value and tell the device */
XGMAC_IOWRITE(pdata, MAC_TSAR, addend);
XGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSADDREG, 1);
/* Wait for addend update to complete */
- while (XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSADDREG))
+ while (--count && XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSADDREG))
udelay(5);
+
+ if (!count)
+ netdev_err(pdata->netdev,
+ "timed out updating timestamp addend register\n");
}
static void xgbe_set_tstamp_time(struct xgbe_prv_data *pdata, unsigned int sec,
unsigned int nsec)
{
+ unsigned int count = 10000;
+
/* Set the time values and tell the device */
XGMAC_IOWRITE(pdata, MAC_STSUR, sec);
XGMAC_IOWRITE(pdata, MAC_STNUR, nsec);
XGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSINIT, 1);
/* Wait for time update to complete */
- while (XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSINIT))
+ while (--count && XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSINIT))
udelay(5);
+
+ if (!count)
+ netdev_err(pdata->netdev, "timed out initializing timestamp\n");
}
static u64 xgbe_get_tstamp_time(struct xgbe_prv_data *pdata)
@@ -2140,37 +2114,38 @@ static int xgbe_flush_tx_queues(struct xgbe_prv_data *pdata)
static void xgbe_config_dma_bus(struct xgbe_prv_data *pdata)
{
+ unsigned int sbmr;
+
+ sbmr = XGMAC_IOREAD(pdata, DMA_SBMR);
+
/* Set enhanced addressing mode */
- XGMAC_IOWRITE_BITS(pdata, DMA_SBMR, EAME, 1);
+ XGMAC_SET_BITS(sbmr, DMA_SBMR, EAME, 1);
/* Set the System Bus mode */
- XGMAC_IOWRITE_BITS(pdata, DMA_SBMR, UNDEF, 1);
- XGMAC_IOWRITE_BITS(pdata, DMA_SBMR, BLEN_256, 1);
+ XGMAC_SET_BITS(sbmr, DMA_SBMR, UNDEF, 1);
+ XGMAC_SET_BITS(sbmr, DMA_SBMR, BLEN, pdata->blen >> 2);
+ XGMAC_SET_BITS(sbmr, DMA_SBMR, AAL, pdata->aal);
+ XGMAC_SET_BITS(sbmr, DMA_SBMR, RD_OSR_LMT, pdata->rd_osr_limit - 1);
+ XGMAC_SET_BITS(sbmr, DMA_SBMR, WR_OSR_LMT, pdata->wr_osr_limit - 1);
+
+ XGMAC_IOWRITE(pdata, DMA_SBMR, sbmr);
+
+ /* Set descriptor fetching threshold */
+ if (pdata->vdata->tx_desc_prefetch)
+ XGMAC_IOWRITE_BITS(pdata, DMA_TXEDMACR, TDPS,
+ pdata->vdata->tx_desc_prefetch);
+
+ if (pdata->vdata->rx_desc_prefetch)
+ XGMAC_IOWRITE_BITS(pdata, DMA_RXEDMACR, RDPS,
+ pdata->vdata->rx_desc_prefetch);
}
static void xgbe_config_dma_cache(struct xgbe_prv_data *pdata)
{
- unsigned int arcache, awcache;
-
- arcache = 0;
- XGMAC_SET_BITS(arcache, DMA_AXIARCR, DRC, pdata->arcache);
- XGMAC_SET_BITS(arcache, DMA_AXIARCR, DRD, pdata->axdomain);
- XGMAC_SET_BITS(arcache, DMA_AXIARCR, TEC, pdata->arcache);
- XGMAC_SET_BITS(arcache, DMA_AXIARCR, TED, pdata->axdomain);
- XGMAC_SET_BITS(arcache, DMA_AXIARCR, THC, pdata->arcache);
- XGMAC_SET_BITS(arcache, DMA_AXIARCR, THD, pdata->axdomain);
- XGMAC_IOWRITE(pdata, DMA_AXIARCR, arcache);
-
- awcache = 0;
- XGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWC, pdata->awcache);
- XGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWD, pdata->axdomain);
- XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPC, pdata->awcache);
- XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPD, pdata->axdomain);
- XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHC, pdata->awcache);
- XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHD, pdata->axdomain);
- XGMAC_SET_BITS(awcache, DMA_AXIAWCR, TDC, pdata->awcache);
- XGMAC_SET_BITS(awcache, DMA_AXIAWCR, TDD, pdata->axdomain);
- XGMAC_IOWRITE(pdata, DMA_AXIAWCR, awcache);
+ XGMAC_IOWRITE(pdata, DMA_AXIARCR, pdata->arcr);
+ XGMAC_IOWRITE(pdata, DMA_AXIAWCR, pdata->awcr);
+ if (pdata->awarcr)
+ XGMAC_IOWRITE(pdata, DMA_AXIAWARCR, pdata->awarcr);
}
static void xgbe_config_mtl_mode(struct xgbe_prv_data *pdata)
@@ -3202,16 +3177,14 @@ static void xgbe_prepare_tx_stop(struct xgbe_prv_data *pdata,
static void xgbe_enable_tx(struct xgbe_prv_data *pdata)
{
- struct xgbe_channel *channel;
unsigned int i;
/* Enable each Tx DMA channel */
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
- if (!channel->tx_ring)
+ for (i = 0; i < pdata->channel_count; i++) {
+ if (!pdata->channel[i]->tx_ring)
break;
- XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 1);
+ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 1);
}
/* Enable each Tx queue */
@@ -3225,7 +3198,6 @@ static void xgbe_enable_tx(struct xgbe_prv_data *pdata)
static void xgbe_disable_tx(struct xgbe_prv_data *pdata)
{
- struct xgbe_channel *channel;
unsigned int i;
/* Prepare for Tx DMA channel stop */
@@ -3240,12 +3212,11 @@ static void xgbe_disable_tx(struct xgbe_prv_data *pdata)
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN, 0);
/* Disable each Tx DMA channel */
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
- if (!channel->tx_ring)
+ for (i = 0; i < pdata->channel_count; i++) {
+ if (!pdata->channel[i]->tx_ring)
break;
- XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 0);
+ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 0);
}
}
@@ -3277,16 +3248,14 @@ static void xgbe_prepare_rx_stop(struct xgbe_prv_data *pdata,
static void xgbe_enable_rx(struct xgbe_prv_data *pdata)
{
- struct xgbe_channel *channel;
unsigned int reg_val, i;
/* Enable each Rx DMA channel */
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
- if (!channel->rx_ring)
+ for (i = 0; i < pdata->channel_count; i++) {
+ if (!pdata->channel[i]->rx_ring)
break;
- XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 1);
+ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 1);
}
/* Enable each Rx queue */
@@ -3304,7 +3273,6 @@ static void xgbe_enable_rx(struct xgbe_prv_data *pdata)
static void xgbe_disable_rx(struct xgbe_prv_data *pdata)
{
- struct xgbe_channel *channel;
unsigned int i;
/* Disable MAC Rx */
@@ -3321,27 +3289,24 @@ static void xgbe_disable_rx(struct xgbe_prv_data *pdata)
XGMAC_IOWRITE(pdata, MAC_RQC0R, 0);
/* Disable each Rx DMA channel */
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
- if (!channel->rx_ring)
+ for (i = 0; i < pdata->channel_count; i++) {
+ if (!pdata->channel[i]->rx_ring)
break;
- XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 0);
+ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 0);
}
}
static void xgbe_powerup_tx(struct xgbe_prv_data *pdata)
{
- struct xgbe_channel *channel;
unsigned int i;
/* Enable each Tx DMA channel */
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
- if (!channel->tx_ring)
+ for (i = 0; i < pdata->channel_count; i++) {
+ if (!pdata->channel[i]->tx_ring)
break;
- XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 1);
+ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 1);
}
/* Enable MAC Tx */
@@ -3350,7 +3315,6 @@ static void xgbe_powerup_tx(struct xgbe_prv_data *pdata)
static void xgbe_powerdown_tx(struct xgbe_prv_data *pdata)
{
- struct xgbe_channel *channel;
unsigned int i;
/* Prepare for Tx DMA channel stop */
@@ -3361,42 +3325,37 @@ static void xgbe_powerdown_tx(struct xgbe_prv_data *pdata)
XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0);
/* Disable each Tx DMA channel */
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
- if (!channel->tx_ring)
+ for (i = 0; i < pdata->channel_count; i++) {
+ if (!pdata->channel[i]->tx_ring)
break;
- XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 0);
+ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 0);
}
}
static void xgbe_powerup_rx(struct xgbe_prv_data *pdata)
{
- struct xgbe_channel *channel;
unsigned int i;
/* Enable each Rx DMA channel */
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
- if (!channel->rx_ring)
+ for (i = 0; i < pdata->channel_count; i++) {
+ if (!pdata->channel[i]->rx_ring)
break;
- XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 1);
+ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 1);
}
}
static void xgbe_powerdown_rx(struct xgbe_prv_data *pdata)
{
- struct xgbe_channel *channel;
unsigned int i;
/* Disable each Rx DMA channel */
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
- if (!channel->rx_ring)
+ for (i = 0; i < pdata->channel_count; i++) {
+ if (!pdata->channel[i]->rx_ring)
break;
- XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 0);
+ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 0);
}
}
@@ -3420,9 +3379,7 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
xgbe_config_dma_bus(pdata);
xgbe_config_dma_cache(pdata);
xgbe_config_osp_mode(pdata);
- xgbe_config_pblx8(pdata);
- xgbe_config_tx_pbl_val(pdata);
- xgbe_config_rx_pbl_val(pdata);
+ xgbe_config_pbl_val(pdata);
xgbe_config_rx_coalesce(pdata);
xgbe_config_tx_coalesce(pdata);
xgbe_config_rx_buffer_size(pdata);
@@ -3550,13 +3507,6 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
/* For TX DMA Operating on Second Frame config */
hw_if->config_osp_mode = xgbe_config_osp_mode;
- /* For RX and TX PBL config */
- hw_if->config_rx_pbl_val = xgbe_config_rx_pbl_val;
- hw_if->get_rx_pbl_val = xgbe_get_rx_pbl_val;
- hw_if->config_tx_pbl_val = xgbe_config_tx_pbl_val;
- hw_if->get_tx_pbl_val = xgbe_get_tx_pbl_val;
- hw_if->config_pblx8 = xgbe_config_pblx8;
-
/* For MMC statistics support */
hw_if->tx_mmc_int = xgbe_tx_mmc_int;
hw_if->rx_mmc_int = xgbe_rx_mmc_int;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index a934bd5d0507..ecef3ee87b17 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -158,81 +158,106 @@ static int xgbe_one_poll(struct napi_struct *, int);
static int xgbe_all_poll(struct napi_struct *, int);
static void xgbe_stop(struct xgbe_prv_data *);
+static void *xgbe_alloc_node(size_t size, int node)
+{
+ void *mem;
+
+ mem = kzalloc_node(size, GFP_KERNEL, node);
+ if (!mem)
+ mem = kzalloc(size, GFP_KERNEL);
+
+ return mem;
+}
+
+static void xgbe_free_channels(struct xgbe_prv_data *pdata)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(pdata->channel); i++) {
+ if (!pdata->channel[i])
+ continue;
+
+ kfree(pdata->channel[i]->rx_ring);
+ kfree(pdata->channel[i]->tx_ring);
+ kfree(pdata->channel[i]);
+
+ pdata->channel[i] = NULL;
+ }
+
+ pdata->channel_count = 0;
+}
+
static int xgbe_alloc_channels(struct xgbe_prv_data *pdata)
{
- struct xgbe_channel *channel_mem, *channel;
- struct xgbe_ring *tx_ring, *rx_ring;
+ struct xgbe_channel *channel;
+ struct xgbe_ring *ring;
unsigned int count, i;
- int ret = -ENOMEM;
+ unsigned int cpu;
+ int node;
count = max_t(unsigned int, pdata->tx_ring_count, pdata->rx_ring_count);
+ for (i = 0; i < count; i++) {
+ /* Attempt to use a CPU on the node the device is on */
+ cpu = cpumask_local_spread(i, dev_to_node(pdata->dev));
- channel_mem = kcalloc(count, sizeof(struct xgbe_channel), GFP_KERNEL);
- if (!channel_mem)
- goto err_channel;
-
- tx_ring = kcalloc(pdata->tx_ring_count, sizeof(struct xgbe_ring),
- GFP_KERNEL);
- if (!tx_ring)
- goto err_tx_ring;
+ /* Set the allocation node based on the returned CPU */
+ node = cpu_to_node(cpu);
- rx_ring = kcalloc(pdata->rx_ring_count, sizeof(struct xgbe_ring),
- GFP_KERNEL);
- if (!rx_ring)
- goto err_rx_ring;
+ channel = xgbe_alloc_node(sizeof(*channel), node);
+ if (!channel)
+ goto err_mem;
+ pdata->channel[i] = channel;
- for (i = 0, channel = channel_mem; i < count; i++, channel++) {
snprintf(channel->name, sizeof(channel->name), "channel-%u", i);
channel->pdata = pdata;
channel->queue_index = i;
channel->dma_regs = pdata->xgmac_regs + DMA_CH_BASE +
(DMA_CH_INC * i);
+ channel->node = node;
+ cpumask_set_cpu(cpu, &channel->affinity_mask);
if (pdata->per_channel_irq)
channel->dma_irq = pdata->channel_irq[i];
if (i < pdata->tx_ring_count) {
- spin_lock_init(&tx_ring->lock);
- channel->tx_ring = tx_ring++;
+ ring = xgbe_alloc_node(sizeof(*ring), node);
+ if (!ring)
+ goto err_mem;
+
+ spin_lock_init(&ring->lock);
+ ring->node = node;
+
+ channel->tx_ring = ring;
}
if (i < pdata->rx_ring_count) {
- spin_lock_init(&rx_ring->lock);
- channel->rx_ring = rx_ring++;
+ ring = xgbe_alloc_node(sizeof(*ring), node);
+ if (!ring)
+ goto err_mem;
+
+ spin_lock_init(&ring->lock);
+ ring->node = node;
+
+ channel->rx_ring = ring;
}
netif_dbg(pdata, drv, pdata->netdev,
+ "%s: cpu=%u, node=%d\n", channel->name, cpu, node);
+
+ netif_dbg(pdata, drv, pdata->netdev,
"%s: dma_regs=%p, dma_irq=%d, tx=%p, rx=%p\n",
channel->name, channel->dma_regs, channel->dma_irq,
channel->tx_ring, channel->rx_ring);
}
- pdata->channel = channel_mem;
pdata->channel_count = count;
return 0;
-err_rx_ring:
- kfree(tx_ring);
-
-err_tx_ring:
- kfree(channel_mem);
-
-err_channel:
- return ret;
-}
-
-static void xgbe_free_channels(struct xgbe_prv_data *pdata)
-{
- if (!pdata->channel)
- return;
-
- kfree(pdata->channel->rx_ring);
- kfree(pdata->channel->tx_ring);
- kfree(pdata->channel);
+err_mem:
+ xgbe_free_channels(pdata);
- pdata->channel = NULL;
- pdata->channel_count = 0;
+ return -ENOMEM;
}
static inline unsigned int xgbe_tx_avail_desc(struct xgbe_ring *ring)
@@ -301,12 +326,10 @@ static void xgbe_enable_rx_tx_int(struct xgbe_prv_data *pdata,
static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
{
- struct xgbe_channel *channel;
unsigned int i;
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++)
- xgbe_enable_rx_tx_int(pdata, channel);
+ for (i = 0; i < pdata->channel_count; i++)
+ xgbe_enable_rx_tx_int(pdata, pdata->channel[i]);
}
static void xgbe_disable_rx_tx_int(struct xgbe_prv_data *pdata,
@@ -329,12 +352,10 @@ static void xgbe_disable_rx_tx_int(struct xgbe_prv_data *pdata,
static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
{
- struct xgbe_channel *channel;
unsigned int i;
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++)
- xgbe_disable_rx_tx_int(pdata, channel);
+ for (i = 0; i < pdata->channel_count; i++)
+ xgbe_disable_rx_tx_int(pdata, pdata->channel[i]);
}
static bool xgbe_ecc_sec(struct xgbe_prv_data *pdata, unsigned long *period,
@@ -382,9 +403,9 @@ static bool xgbe_ecc_ded(struct xgbe_prv_data *pdata, unsigned long *period,
return false;
}
-static irqreturn_t xgbe_ecc_isr(int irq, void *data)
+static void xgbe_ecc_isr_task(unsigned long data)
{
- struct xgbe_prv_data *pdata = data;
+ struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data;
unsigned int ecc_isr;
bool stop = false;
@@ -435,12 +456,26 @@ out:
/* Clear all ECC interrupts */
XP_IOWRITE(pdata, XP_ECC_ISR, ecc_isr);
- return IRQ_HANDLED;
+ /* Reissue interrupt if status is not clear */
+ if (pdata->vdata->irq_reissue_support)
+ XP_IOWRITE(pdata, XP_INT_REISSUE_EN, 1 << 1);
}
-static irqreturn_t xgbe_isr(int irq, void *data)
+static irqreturn_t xgbe_ecc_isr(int irq, void *data)
{
struct xgbe_prv_data *pdata = data;
+
+ if (pdata->isr_as_tasklet)
+ tasklet_schedule(&pdata->tasklet_ecc);
+ else
+ xgbe_ecc_isr_task((unsigned long)pdata);
+
+ return IRQ_HANDLED;
+}
+
+static void xgbe_isr_task(unsigned long data)
+{
+ struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data;
struct xgbe_hw_if *hw_if = &pdata->hw_if;
struct xgbe_channel *channel;
unsigned int dma_isr, dma_ch_isr;
@@ -461,7 +496,7 @@ static irqreturn_t xgbe_isr(int irq, void *data)
if (!(dma_isr & (1 << i)))
continue;
- channel = pdata->channel + i;
+ channel = pdata->channel[i];
dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR);
netif_dbg(pdata, intr, pdata->netdev, "DMA_CH%u_ISR=%#010x\n",
@@ -543,15 +578,36 @@ static irqreturn_t xgbe_isr(int irq, void *data)
isr_done:
/* If there is not a separate AN irq, handle it here */
if (pdata->dev_irq == pdata->an_irq)
- pdata->phy_if.an_isr(irq, pdata);
+ pdata->phy_if.an_isr(pdata);
/* If there is not a separate ECC irq, handle it here */
if (pdata->vdata->ecc_support && (pdata->dev_irq == pdata->ecc_irq))
- xgbe_ecc_isr(irq, pdata);
+ xgbe_ecc_isr_task((unsigned long)pdata);
/* If there is not a separate I2C irq, handle it here */
if (pdata->vdata->i2c_support && (pdata->dev_irq == pdata->i2c_irq))
- pdata->i2c_if.i2c_isr(irq, pdata);
+ pdata->i2c_if.i2c_isr(pdata);
+
+ /* Reissue interrupt if status is not clear */
+ if (pdata->vdata->irq_reissue_support) {
+ unsigned int reissue_mask;
+
+ reissue_mask = 1 << 0;
+ if (!pdata->per_channel_irq)
+ reissue_mask |= 0xffff < 4;
+
+ XP_IOWRITE(pdata, XP_INT_REISSUE_EN, reissue_mask);
+ }
+}
+
+static irqreturn_t xgbe_isr(int irq, void *data)
+{
+ struct xgbe_prv_data *pdata = data;
+
+ if (pdata->isr_as_tasklet)
+ tasklet_schedule(&pdata->tasklet_dev);
+ else
+ xgbe_isr_task((unsigned long)pdata);
return IRQ_HANDLED;
}
@@ -640,8 +696,8 @@ static void xgbe_init_timers(struct xgbe_prv_data *pdata)
setup_timer(&pdata->service_timer, xgbe_service_timer,
(unsigned long)pdata);
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
+ for (i = 0; i < pdata->channel_count; i++) {
+ channel = pdata->channel[i];
if (!channel->tx_ring)
break;
@@ -662,8 +718,8 @@ static void xgbe_stop_timers(struct xgbe_prv_data *pdata)
del_timer_sync(&pdata->service_timer);
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
+ for (i = 0; i < pdata->channel_count; i++) {
+ channel = pdata->channel[i];
if (!channel->tx_ring)
break;
@@ -781,8 +837,8 @@ static void xgbe_napi_enable(struct xgbe_prv_data *pdata, unsigned int add)
unsigned int i;
if (pdata->per_channel_irq) {
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
+ for (i = 0; i < pdata->channel_count; i++) {
+ channel = pdata->channel[i];
if (add)
netif_napi_add(pdata->netdev, &channel->napi,
xgbe_one_poll, NAPI_POLL_WEIGHT);
@@ -804,8 +860,8 @@ static void xgbe_napi_disable(struct xgbe_prv_data *pdata, unsigned int del)
unsigned int i;
if (pdata->per_channel_irq) {
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
+ for (i = 0; i < pdata->channel_count; i++) {
+ channel = pdata->channel[i];
napi_disable(&channel->napi);
if (del)
@@ -826,6 +882,10 @@ static int xgbe_request_irqs(struct xgbe_prv_data *pdata)
unsigned int i;
int ret;
+ tasklet_init(&pdata->tasklet_dev, xgbe_isr_task, (unsigned long)pdata);
+ tasklet_init(&pdata->tasklet_ecc, xgbe_ecc_isr_task,
+ (unsigned long)pdata);
+
ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0,
netdev->name, pdata);
if (ret) {
@@ -847,8 +907,8 @@ static int xgbe_request_irqs(struct xgbe_prv_data *pdata)
if (!pdata->per_channel_irq)
return 0;
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
+ for (i = 0; i < pdata->channel_count; i++) {
+ channel = pdata->channel[i];
snprintf(channel->dma_irq_name,
sizeof(channel->dma_irq_name) - 1,
"%s-TxRx-%u", netdev_name(netdev),
@@ -862,14 +922,21 @@ static int xgbe_request_irqs(struct xgbe_prv_data *pdata)
channel->dma_irq);
goto err_dma_irq;
}
+
+ irq_set_affinity_hint(channel->dma_irq,
+ &channel->affinity_mask);
}
return 0;
err_dma_irq:
/* Using an unsigned int, 'i' will go to UINT_MAX and exit */
- for (i--, channel--; i < pdata->channel_count; i--, channel--)
+ for (i--; i < pdata->channel_count; i--) {
+ channel = pdata->channel[i];
+
+ irq_set_affinity_hint(channel->dma_irq, NULL);
devm_free_irq(pdata->dev, channel->dma_irq, channel);
+ }
if (pdata->vdata->ecc_support && (pdata->dev_irq != pdata->ecc_irq))
devm_free_irq(pdata->dev, pdata->ecc_irq, pdata);
@@ -893,9 +960,12 @@ static void xgbe_free_irqs(struct xgbe_prv_data *pdata)
if (!pdata->per_channel_irq)
return;
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++)
+ for (i = 0; i < pdata->channel_count; i++) {
+ channel = pdata->channel[i];
+
+ irq_set_affinity_hint(channel->dma_irq, NULL);
devm_free_irq(pdata->dev, channel->dma_irq, channel);
+ }
}
void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
@@ -930,16 +1000,14 @@ void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
static void xgbe_free_tx_data(struct xgbe_prv_data *pdata)
{
struct xgbe_desc_if *desc_if = &pdata->desc_if;
- struct xgbe_channel *channel;
struct xgbe_ring *ring;
struct xgbe_ring_data *rdata;
unsigned int i, j;
DBGPR("-->xgbe_free_tx_data\n");
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
- ring = channel->tx_ring;
+ for (i = 0; i < pdata->channel_count; i++) {
+ ring = pdata->channel[i]->tx_ring;
if (!ring)
break;
@@ -955,16 +1023,14 @@ static void xgbe_free_tx_data(struct xgbe_prv_data *pdata)
static void xgbe_free_rx_data(struct xgbe_prv_data *pdata)
{
struct xgbe_desc_if *desc_if = &pdata->desc_if;
- struct xgbe_channel *channel;
struct xgbe_ring *ring;
struct xgbe_ring_data *rdata;
unsigned int i, j;
DBGPR("-->xgbe_free_rx_data\n");
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
- ring = channel->rx_ring;
+ for (i = 0; i < pdata->channel_count; i++) {
+ ring = pdata->channel[i]->rx_ring;
if (!ring)
break;
@@ -1140,8 +1206,8 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
hw_if->exit(pdata);
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
+ for (i = 0; i < pdata->channel_count; i++) {
+ channel = pdata->channel[i];
if (!channel->tx_ring)
continue;
@@ -1212,6 +1278,10 @@ static void xgbe_tx_tstamp(struct work_struct *work)
u64 nsec;
unsigned long flags;
+ spin_lock_irqsave(&pdata->tstamp_lock, flags);
+ if (!pdata->tx_tstamp_skb)
+ goto unlock;
+
if (pdata->tx_tstamp) {
nsec = timecounter_cyc2time(&pdata->tstamp_tc,
pdata->tx_tstamp);
@@ -1223,8 +1293,9 @@ static void xgbe_tx_tstamp(struct work_struct *work)
dev_kfree_skb_any(pdata->tx_tstamp_skb);
- spin_lock_irqsave(&pdata->tstamp_lock, flags);
pdata->tx_tstamp_skb = NULL;
+
+unlock:
spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
}
@@ -1623,7 +1694,7 @@ static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
DBGPR("-->xgbe_xmit: skb->len = %d\n", skb->len);
- channel = pdata->channel + skb->queue_mapping;
+ channel = pdata->channel[skb->queue_mapping];
txq = netdev_get_tx_queue(netdev, channel->queue_index);
ring = channel->tx_ring;
packet = &ring->packet_data;
@@ -1833,9 +1904,10 @@ static void xgbe_poll_controller(struct net_device *netdev)
DBGPR("-->xgbe_poll_controller\n");
if (pdata->per_channel_irq) {
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++)
+ for (i = 0; i < pdata->channel_count; i++) {
+ channel = pdata->channel[i];
xgbe_dma_isr(channel->dma_irq, channel);
+ }
} else {
disable_irq(pdata->dev_irq);
xgbe_isr(pdata->dev_irq, pdata);
@@ -2328,8 +2400,9 @@ static int xgbe_all_poll(struct napi_struct *napi, int budget)
do {
last_processed = processed;
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
+ for (i = 0; i < pdata->channel_count; i++) {
+ channel = pdata->channel[i];
+
/* Cleanup Tx ring first */
xgbe_tx_poll(channel);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
index 920566a3a599..67a2e52ad25d 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
@@ -247,7 +247,7 @@ static int xgbe_set_pauseparam(struct net_device *netdev,
if (pause->autoneg && (pdata->phy.autoneg != AUTONEG_ENABLE)) {
netdev_err(netdev,
- "autoneg disabled, pause autoneg not avialable\n");
+ "autoneg disabled, pause autoneg not available\n");
return -EINVAL;
}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c b/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c
index 417bdb5982a9..4d9062d35930 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c
@@ -274,13 +274,16 @@ static void xgbe_i2c_clear_isr_interrupts(struct xgbe_prv_data *pdata,
XI2C_IOREAD(pdata, IC_CLR_STOP_DET);
}
-static irqreturn_t xgbe_i2c_isr(int irq, void *data)
+static void xgbe_i2c_isr_task(unsigned long data)
{
struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data;
struct xgbe_i2c_op_state *state = &pdata->i2c.op_state;
unsigned int isr;
isr = XI2C_IOREAD(pdata, IC_RAW_INTR_STAT);
+ if (!isr)
+ goto reissue_check;
+
netif_dbg(pdata, intr, pdata->netdev,
"I2C interrupt received: status=%#010x\n", isr);
@@ -308,6 +311,21 @@ out:
if (state->ret || XI2C_GET_BITS(isr, IC_RAW_INTR_STAT, STOP_DET))
complete(&pdata->i2c_complete);
+reissue_check:
+ /* Reissue interrupt if status is not clear */
+ if (pdata->vdata->irq_reissue_support)
+ XP_IOWRITE(pdata, XP_INT_REISSUE_EN, 1 << 2);
+}
+
+static irqreturn_t xgbe_i2c_isr(int irq, void *data)
+{
+ struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data;
+
+ if (pdata->isr_as_tasklet)
+ tasklet_schedule(&pdata->tasklet_i2c);
+ else
+ xgbe_i2c_isr_task((unsigned long)pdata);
+
return IRQ_HANDLED;
}
@@ -349,12 +367,11 @@ static void xgbe_i2c_set_target(struct xgbe_prv_data *pdata, unsigned int addr)
XI2C_IOWRITE(pdata, IC_TAR, addr);
}
-static irqreturn_t xgbe_i2c_combined_isr(int irq, struct xgbe_prv_data *pdata)
+static irqreturn_t xgbe_i2c_combined_isr(struct xgbe_prv_data *pdata)
{
- if (!XI2C_IOREAD(pdata, IC_RAW_INTR_STAT))
- return IRQ_HANDLED;
+ xgbe_i2c_isr_task((unsigned long)pdata);
- return xgbe_i2c_isr(irq, pdata);
+ return IRQ_HANDLED;
}
static int xgbe_i2c_xfer(struct xgbe_prv_data *pdata, struct xgbe_i2c_op *op)
@@ -445,6 +462,9 @@ static int xgbe_i2c_start(struct xgbe_prv_data *pdata)
/* If we have a separate I2C irq, enable it */
if (pdata->dev_irq != pdata->i2c_irq) {
+ tasklet_init(&pdata->tasklet_i2c, xgbe_i2c_isr_task,
+ (unsigned long)pdata);
+
ret = devm_request_irq(pdata->dev, pdata->i2c_irq,
xgbe_i2c_isr, 0, pdata->i2c_name,
pdata);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
index 17ac8f9a51a0..500147d9e3c8 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
@@ -140,14 +140,16 @@ static void xgbe_default_config(struct xgbe_prv_data *pdata)
{
DBGPR("-->xgbe_default_config\n");
- pdata->pblx8 = DMA_PBL_X8_ENABLE;
+ pdata->blen = DMA_SBMR_BLEN_64;
+ pdata->pbl = DMA_PBL_128;
+ pdata->aal = 1;
+ pdata->rd_osr_limit = 8;
+ pdata->wr_osr_limit = 8;
pdata->tx_sf_mode = MTL_TSF_ENABLE;
pdata->tx_threshold = MTL_TX_THRESHOLD_64;
- pdata->tx_pbl = DMA_PBL_16;
pdata->tx_osp_mode = DMA_OSP_ENABLE;
pdata->rx_sf_mode = MTL_RSF_DISABLE;
pdata->rx_threshold = MTL_RX_THRESHOLD_64;
- pdata->rx_pbl = DMA_PBL_16;
pdata->pause_autoneg = 1;
pdata->tx_pause = 1;
pdata->rx_pause = 1;
@@ -277,7 +279,11 @@ int xgbe_config_netdev(struct xgbe_prv_data *pdata)
pdata->desc_ded_period = jiffies;
/* Issue software reset to device */
- pdata->hw_if.exit(pdata);
+ ret = pdata->hw_if.exit(pdata);
+ if (ret) {
+ dev_err(dev, "software reset failed\n");
+ return ret;
+ }
/* Set default configuration data */
xgbe_default_config(pdata);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
index b672d9249539..80684914dd8a 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
@@ -665,6 +665,10 @@ static void xgbe_an37_isr(struct xgbe_prv_data *pdata)
} else {
/* Enable AN interrupts */
xgbe_an37_enable_interrupts(pdata);
+
+ /* Reissue interrupt if status is not clear */
+ if (pdata->vdata->irq_reissue_support)
+ XP_IOWRITE(pdata, XP_INT_REISSUE_EN, 1 << 3);
}
}
@@ -684,10 +688,14 @@ static void xgbe_an73_isr(struct xgbe_prv_data *pdata)
} else {
/* Enable AN interrupts */
xgbe_an73_enable_interrupts(pdata);
+
+ /* Reissue interrupt if status is not clear */
+ if (pdata->vdata->irq_reissue_support)
+ XP_IOWRITE(pdata, XP_INT_REISSUE_EN, 1 << 3);
}
}
-static irqreturn_t xgbe_an_isr(int irq, void *data)
+static void xgbe_an_isr_task(unsigned long data)
{
struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data;
@@ -705,13 +713,25 @@ static irqreturn_t xgbe_an_isr(int irq, void *data)
default:
break;
}
+}
+
+static irqreturn_t xgbe_an_isr(int irq, void *data)
+{
+ struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data;
+
+ if (pdata->isr_as_tasklet)
+ tasklet_schedule(&pdata->tasklet_an);
+ else
+ xgbe_an_isr_task((unsigned long)pdata);
return IRQ_HANDLED;
}
-static irqreturn_t xgbe_an_combined_isr(int irq, struct xgbe_prv_data *pdata)
+static irqreturn_t xgbe_an_combined_isr(struct xgbe_prv_data *pdata)
{
- return xgbe_an_isr(irq, pdata);
+ xgbe_an_isr_task((unsigned long)pdata);
+
+ return IRQ_HANDLED;
}
static void xgbe_an_irq_work(struct work_struct *work)
@@ -915,6 +935,10 @@ static void xgbe_an_state_machine(struct work_struct *work)
break;
}
+ /* Reissue interrupt if status is not clear */
+ if (pdata->vdata->irq_reissue_support)
+ XP_IOWRITE(pdata, XP_INT_REISSUE_EN, 1 << 3);
+
mutex_unlock(&pdata->an_mutex);
}
@@ -1379,6 +1403,9 @@ static int xgbe_phy_start(struct xgbe_prv_data *pdata)
/* If we have a separate AN irq, enable it */
if (pdata->dev_irq != pdata->an_irq) {
+ tasklet_init(&pdata->tasklet_an, xgbe_an_isr_task,
+ (unsigned long)pdata);
+
ret = devm_request_irq(pdata->dev, pdata->an_irq,
xgbe_an_isr, 0, pdata->an_name,
pdata);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
index 38392a520725..1e56ad7bd9a5 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
@@ -139,6 +139,7 @@ static int xgbe_config_multi_msi(struct xgbe_prv_data *pdata)
return ret;
}
+ pdata->isr_as_tasklet = 1;
pdata->irq_count = ret;
pdata->dev_irq = pci_irq_vector(pdata->pcidev, 0);
@@ -175,6 +176,7 @@ static int xgbe_config_irqs(struct xgbe_prv_data *pdata)
return ret;
}
+ pdata->isr_as_tasklet = pdata->pcidev->msi_enabled ? 1 : 0;
pdata->irq_count = 1;
pdata->channel_irq_count = 1;
@@ -325,9 +327,9 @@ static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* Set the DMA coherency values */
pdata->coherent = 1;
- pdata->axdomain = XGBE_DMA_OS_AXDOMAIN;
- pdata->arcache = XGBE_DMA_OS_ARCACHE;
- pdata->awcache = XGBE_DMA_OS_AWCACHE;
+ pdata->arcr = XGBE_DMA_PCI_ARCR;
+ pdata->awcr = XGBE_DMA_PCI_AWCR;
+ pdata->awarcr = XGBE_DMA_PCI_AWARCR;
/* Set the maximum channels and queues */
reg = XP_IOREAD(pdata, XP_PROP_1);
@@ -445,6 +447,9 @@ static const struct xgbe_version_data xgbe_v2a = {
.tx_tstamp_workaround = 1,
.ecc_support = 1,
.i2c_support = 1,
+ .irq_reissue_support = 1,
+ .tx_desc_prefetch = 5,
+ .rx_desc_prefetch = 5,
};
static const struct xgbe_version_data xgbe_v2b = {
@@ -456,6 +461,9 @@ static const struct xgbe_version_data xgbe_v2b = {
.tx_tstamp_workaround = 1,
.ecc_support = 1,
.i2c_support = 1,
+ .irq_reissue_support = 1,
+ .tx_desc_prefetch = 5,
+ .rx_desc_prefetch = 5,
};
static const struct pci_device_id xgbe_pci_table[] = {
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
index e707c49cc55a..04b5c149caca 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
@@ -711,23 +711,39 @@ static void xgbe_phy_sfp_phy_settings(struct xgbe_prv_data *pdata)
{
struct xgbe_phy_data *phy_data = pdata->phy_data;
+ if (!phy_data->sfp_mod_absent && !phy_data->sfp_changed)
+ return;
+
+ pdata->phy.supported &= ~SUPPORTED_Autoneg;
+ pdata->phy.supported &= ~(SUPPORTED_Pause | SUPPORTED_Asym_Pause);
+ pdata->phy.supported &= ~SUPPORTED_TP;
+ pdata->phy.supported &= ~SUPPORTED_FIBRE;
+ pdata->phy.supported &= ~SUPPORTED_100baseT_Full;
+ pdata->phy.supported &= ~SUPPORTED_1000baseT_Full;
+ pdata->phy.supported &= ~SUPPORTED_10000baseT_Full;
+
if (phy_data->sfp_mod_absent) {
pdata->phy.speed = SPEED_UNKNOWN;
pdata->phy.duplex = DUPLEX_UNKNOWN;
pdata->phy.autoneg = AUTONEG_ENABLE;
+ pdata->phy.pause_autoneg = AUTONEG_ENABLE;
+
+ pdata->phy.supported |= SUPPORTED_Autoneg;
+ pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ pdata->phy.supported |= SUPPORTED_TP;
+ pdata->phy.supported |= SUPPORTED_FIBRE;
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100)
+ pdata->phy.supported |= SUPPORTED_100baseT_Full;
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000)
+ pdata->phy.supported |= SUPPORTED_1000baseT_Full;
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000)
+ pdata->phy.supported |= SUPPORTED_10000baseT_Full;
+
pdata->phy.advertising = pdata->phy.supported;
return;
}
- pdata->phy.advertising &= ~ADVERTISED_Autoneg;
- pdata->phy.advertising &= ~ADVERTISED_TP;
- pdata->phy.advertising &= ~ADVERTISED_FIBRE;
- pdata->phy.advertising &= ~ADVERTISED_100baseT_Full;
- pdata->phy.advertising &= ~ADVERTISED_1000baseT_Full;
- pdata->phy.advertising &= ~ADVERTISED_10000baseT_Full;
- pdata->phy.advertising &= ~ADVERTISED_10000baseR_FEC;
-
switch (phy_data->sfp_base) {
case XGBE_SFP_BASE_1000_T:
case XGBE_SFP_BASE_1000_SX:
@@ -736,17 +752,25 @@ static void xgbe_phy_sfp_phy_settings(struct xgbe_prv_data *pdata)
pdata->phy.speed = SPEED_UNKNOWN;
pdata->phy.duplex = DUPLEX_UNKNOWN;
pdata->phy.autoneg = AUTONEG_ENABLE;
- pdata->phy.advertising |= ADVERTISED_Autoneg;
+ pdata->phy.pause_autoneg = AUTONEG_ENABLE;
+ pdata->phy.supported |= SUPPORTED_Autoneg;
+ pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
break;
case XGBE_SFP_BASE_10000_SR:
case XGBE_SFP_BASE_10000_LR:
case XGBE_SFP_BASE_10000_LRM:
case XGBE_SFP_BASE_10000_ER:
case XGBE_SFP_BASE_10000_CR:
- default:
pdata->phy.speed = SPEED_10000;
pdata->phy.duplex = DUPLEX_FULL;
pdata->phy.autoneg = AUTONEG_DISABLE;
+ pdata->phy.pause_autoneg = AUTONEG_DISABLE;
+ break;
+ default:
+ pdata->phy.speed = SPEED_UNKNOWN;
+ pdata->phy.duplex = DUPLEX_UNKNOWN;
+ pdata->phy.autoneg = AUTONEG_DISABLE;
+ pdata->phy.pause_autoneg = AUTONEG_DISABLE;
break;
}
@@ -754,36 +778,38 @@ static void xgbe_phy_sfp_phy_settings(struct xgbe_prv_data *pdata)
case XGBE_SFP_BASE_1000_T:
case XGBE_SFP_BASE_1000_CX:
case XGBE_SFP_BASE_10000_CR:
- pdata->phy.advertising |= ADVERTISED_TP;
+ pdata->phy.supported |= SUPPORTED_TP;
break;
default:
- pdata->phy.advertising |= ADVERTISED_FIBRE;
+ pdata->phy.supported |= SUPPORTED_FIBRE;
}
switch (phy_data->sfp_speed) {
case XGBE_SFP_SPEED_100_1000:
if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100)
- pdata->phy.advertising |= ADVERTISED_100baseT_Full;
+ pdata->phy.supported |= SUPPORTED_100baseT_Full;
if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000)
- pdata->phy.advertising |= ADVERTISED_1000baseT_Full;
+ pdata->phy.supported |= SUPPORTED_1000baseT_Full;
break;
case XGBE_SFP_SPEED_1000:
if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000)
- pdata->phy.advertising |= ADVERTISED_1000baseT_Full;
+ pdata->phy.supported |= SUPPORTED_1000baseT_Full;
break;
case XGBE_SFP_SPEED_10000:
if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000)
- pdata->phy.advertising |= ADVERTISED_10000baseT_Full;
+ pdata->phy.supported |= SUPPORTED_10000baseT_Full;
break;
default:
/* Choose the fastest supported speed */
if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000)
- pdata->phy.advertising |= ADVERTISED_10000baseT_Full;
+ pdata->phy.supported |= SUPPORTED_10000baseT_Full;
else if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000)
- pdata->phy.advertising |= ADVERTISED_1000baseT_Full;
+ pdata->phy.supported |= SUPPORTED_1000baseT_Full;
else if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100)
- pdata->phy.advertising |= ADVERTISED_100baseT_Full;
+ pdata->phy.supported |= SUPPORTED_100baseT_Full;
}
+
+ pdata->phy.advertising = pdata->phy.supported;
}
static bool xgbe_phy_sfp_bit_rate(struct xgbe_sfp_eeprom *sfp_eeprom,
@@ -1095,7 +1121,8 @@ static int xgbe_phy_sfp_read_eeprom(struct xgbe_prv_data *pdata)
ret = xgbe_phy_sfp_get_mux(pdata);
if (ret) {
- netdev_err(pdata->netdev, "I2C error setting SFP MUX\n");
+ dev_err_once(pdata->dev, "%s: I2C error setting SFP MUX\n",
+ netdev_name(pdata->netdev));
return ret;
}
@@ -1105,7 +1132,8 @@ static int xgbe_phy_sfp_read_eeprom(struct xgbe_prv_data *pdata)
&eeprom_addr, sizeof(eeprom_addr),
&sfp_eeprom, sizeof(sfp_eeprom));
if (ret) {
- netdev_err(pdata->netdev, "I2C error reading SFP EEPROM\n");
+ dev_err_once(pdata->dev, "%s: I2C error reading SFP EEPROM\n",
+ netdev_name(pdata->netdev));
goto put;
}
@@ -1164,7 +1192,8 @@ static void xgbe_phy_sfp_signals(struct xgbe_prv_data *pdata)
&gpio_reg, sizeof(gpio_reg),
gpio_ports, sizeof(gpio_ports));
if (ret) {
- netdev_err(pdata->netdev, "I2C error reading SFP GPIOs\n");
+ dev_err_once(pdata->dev, "%s: I2C error reading SFP GPIOs\n",
+ netdev_name(pdata->netdev));
return;
}
@@ -1694,19 +1723,25 @@ static void xgbe_phy_set_redrv_mode(struct xgbe_prv_data *pdata)
xgbe_phy_put_comm_ownership(pdata);
}
-static void xgbe_phy_start_ratechange(struct xgbe_prv_data *pdata)
+static void xgbe_phy_perform_ratechange(struct xgbe_prv_data *pdata,
+ unsigned int cmd, unsigned int sub_cmd)
{
- if (!XP_IOREAD_BITS(pdata, XP_DRIVER_INT_RO, STATUS))
- return;
+ unsigned int s0 = 0;
+ unsigned int wait;
/* Log if a previous command did not complete */
- netif_dbg(pdata, link, pdata->netdev,
- "firmware mailbox not ready for command\n");
-}
+ if (XP_IOREAD_BITS(pdata, XP_DRIVER_INT_RO, STATUS))
+ netif_dbg(pdata, link, pdata->netdev,
+ "firmware mailbox not ready for command\n");
-static void xgbe_phy_complete_ratechange(struct xgbe_prv_data *pdata)
-{
- unsigned int wait;
+ /* Construct the command */
+ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, COMMAND, cmd);
+ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, sub_cmd);
+
+ /* Issue the command */
+ XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_0, s0);
+ XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_1, 0);
+ XP_IOWRITE_BITS(pdata, XP_DRIVER_INT_REQ, REQUEST, 1);
/* Wait for command to complete */
wait = XGBE_RATECHANGE_COUNT;
@@ -1723,21 +1758,8 @@ static void xgbe_phy_complete_ratechange(struct xgbe_prv_data *pdata)
static void xgbe_phy_rrc(struct xgbe_prv_data *pdata)
{
- unsigned int s0;
-
- xgbe_phy_start_ratechange(pdata);
-
/* Receiver Reset Cycle */
- s0 = 0;
- XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, COMMAND, 5);
- XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 0);
-
- /* Call FW to make the change */
- XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_0, s0);
- XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_1, 0);
- XP_IOWRITE_BITS(pdata, XP_DRIVER_INT_REQ, REQUEST, 1);
-
- xgbe_phy_complete_ratechange(pdata);
+ xgbe_phy_perform_ratechange(pdata, 5, 0);
netif_dbg(pdata, link, pdata->netdev, "receiver reset complete\n");
}
@@ -1746,14 +1768,8 @@ static void xgbe_phy_power_off(struct xgbe_prv_data *pdata)
{
struct xgbe_phy_data *phy_data = pdata->phy_data;
- xgbe_phy_start_ratechange(pdata);
-
- /* Call FW to make the change */
- XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_0, 0);
- XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_1, 0);
- XP_IOWRITE_BITS(pdata, XP_DRIVER_INT_REQ, REQUEST, 1);
-
- xgbe_phy_complete_ratechange(pdata);
+ /* Power off */
+ xgbe_phy_perform_ratechange(pdata, 0, 0);
phy_data->cur_mode = XGBE_MODE_UNKNOWN;
@@ -1763,33 +1779,21 @@ static void xgbe_phy_power_off(struct xgbe_prv_data *pdata)
static void xgbe_phy_sfi_mode(struct xgbe_prv_data *pdata)
{
struct xgbe_phy_data *phy_data = pdata->phy_data;
- unsigned int s0;
xgbe_phy_set_redrv_mode(pdata);
- xgbe_phy_start_ratechange(pdata);
-
/* 10G/SFI */
- s0 = 0;
- XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, COMMAND, 3);
if (phy_data->sfp_cable != XGBE_SFP_CABLE_PASSIVE) {
- XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 0);
+ xgbe_phy_perform_ratechange(pdata, 3, 0);
} else {
if (phy_data->sfp_cable_len <= 1)
- XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 1);
+ xgbe_phy_perform_ratechange(pdata, 3, 1);
else if (phy_data->sfp_cable_len <= 3)
- XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 2);
+ xgbe_phy_perform_ratechange(pdata, 3, 2);
else
- XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 3);
+ xgbe_phy_perform_ratechange(pdata, 3, 3);
}
- /* Call FW to make the change */
- XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_0, s0);
- XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_1, 0);
- XP_IOWRITE_BITS(pdata, XP_DRIVER_INT_REQ, REQUEST, 1);
-
- xgbe_phy_complete_ratechange(pdata);
-
phy_data->cur_mode = XGBE_MODE_SFI;
netif_dbg(pdata, link, pdata->netdev, "10GbE SFI mode set\n");
@@ -1798,23 +1802,11 @@ static void xgbe_phy_sfi_mode(struct xgbe_prv_data *pdata)
static void xgbe_phy_x_mode(struct xgbe_prv_data *pdata)
{
struct xgbe_phy_data *phy_data = pdata->phy_data;
- unsigned int s0;
xgbe_phy_set_redrv_mode(pdata);
- xgbe_phy_start_ratechange(pdata);
-
/* 1G/X */
- s0 = 0;
- XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, COMMAND, 1);
- XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 3);
-
- /* Call FW to make the change */
- XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_0, s0);
- XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_1, 0);
- XP_IOWRITE_BITS(pdata, XP_DRIVER_INT_REQ, REQUEST, 1);
-
- xgbe_phy_complete_ratechange(pdata);
+ xgbe_phy_perform_ratechange(pdata, 1, 3);
phy_data->cur_mode = XGBE_MODE_X;
@@ -1824,23 +1816,11 @@ static void xgbe_phy_x_mode(struct xgbe_prv_data *pdata)
static void xgbe_phy_sgmii_1000_mode(struct xgbe_prv_data *pdata)
{
struct xgbe_phy_data *phy_data = pdata->phy_data;
- unsigned int s0;
xgbe_phy_set_redrv_mode(pdata);
- xgbe_phy_start_ratechange(pdata);
-
/* 1G/SGMII */
- s0 = 0;
- XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, COMMAND, 1);
- XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 2);
-
- /* Call FW to make the change */
- XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_0, s0);
- XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_1, 0);
- XP_IOWRITE_BITS(pdata, XP_DRIVER_INT_REQ, REQUEST, 1);
-
- xgbe_phy_complete_ratechange(pdata);
+ xgbe_phy_perform_ratechange(pdata, 1, 2);
phy_data->cur_mode = XGBE_MODE_SGMII_1000;
@@ -1850,23 +1830,11 @@ static void xgbe_phy_sgmii_1000_mode(struct xgbe_prv_data *pdata)
static void xgbe_phy_sgmii_100_mode(struct xgbe_prv_data *pdata)
{
struct xgbe_phy_data *phy_data = pdata->phy_data;
- unsigned int s0;
xgbe_phy_set_redrv_mode(pdata);
- xgbe_phy_start_ratechange(pdata);
-
- /* 1G/SGMII */
- s0 = 0;
- XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, COMMAND, 1);
- XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 1);
-
- /* Call FW to make the change */
- XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_0, s0);
- XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_1, 0);
- XP_IOWRITE_BITS(pdata, XP_DRIVER_INT_REQ, REQUEST, 1);
-
- xgbe_phy_complete_ratechange(pdata);
+ /* 100M/SGMII */
+ xgbe_phy_perform_ratechange(pdata, 1, 1);
phy_data->cur_mode = XGBE_MODE_SGMII_100;
@@ -1876,23 +1844,11 @@ static void xgbe_phy_sgmii_100_mode(struct xgbe_prv_data *pdata)
static void xgbe_phy_kr_mode(struct xgbe_prv_data *pdata)
{
struct xgbe_phy_data *phy_data = pdata->phy_data;
- unsigned int s0;
xgbe_phy_set_redrv_mode(pdata);
- xgbe_phy_start_ratechange(pdata);
-
/* 10G/KR */
- s0 = 0;
- XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, COMMAND, 4);
- XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 0);
-
- /* Call FW to make the change */
- XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_0, s0);
- XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_1, 0);
- XP_IOWRITE_BITS(pdata, XP_DRIVER_INT_REQ, REQUEST, 1);
-
- xgbe_phy_complete_ratechange(pdata);
+ xgbe_phy_perform_ratechange(pdata, 4, 0);
phy_data->cur_mode = XGBE_MODE_KR;
@@ -1902,23 +1858,11 @@ static void xgbe_phy_kr_mode(struct xgbe_prv_data *pdata)
static void xgbe_phy_kx_2500_mode(struct xgbe_prv_data *pdata)
{
struct xgbe_phy_data *phy_data = pdata->phy_data;
- unsigned int s0;
xgbe_phy_set_redrv_mode(pdata);
- xgbe_phy_start_ratechange(pdata);
-
/* 2.5G/KX */
- s0 = 0;
- XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, COMMAND, 2);
- XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 0);
-
- /* Call FW to make the change */
- XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_0, s0);
- XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_1, 0);
- XP_IOWRITE_BITS(pdata, XP_DRIVER_INT_REQ, REQUEST, 1);
-
- xgbe_phy_complete_ratechange(pdata);
+ xgbe_phy_perform_ratechange(pdata, 2, 0);
phy_data->cur_mode = XGBE_MODE_KX_2500;
@@ -1928,23 +1872,11 @@ static void xgbe_phy_kx_2500_mode(struct xgbe_prv_data *pdata)
static void xgbe_phy_kx_1000_mode(struct xgbe_prv_data *pdata)
{
struct xgbe_phy_data *phy_data = pdata->phy_data;
- unsigned int s0;
xgbe_phy_set_redrv_mode(pdata);
- xgbe_phy_start_ratechange(pdata);
-
/* 1G/KX */
- s0 = 0;
- XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, COMMAND, 1);
- XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 3);
-
- /* Call FW to make the change */
- XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_0, s0);
- XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_1, 0);
- XP_IOWRITE_BITS(pdata, XP_DRIVER_INT_REQ, REQUEST, 1);
-
- xgbe_phy_complete_ratechange(pdata);
+ xgbe_phy_perform_ratechange(pdata, 1, 3);
phy_data->cur_mode = XGBE_MODE_KX_1000;
@@ -2037,6 +1969,8 @@ static enum xgbe_mode xgbe_phy_get_baset_mode(struct xgbe_phy_data *phy_data,
return XGBE_MODE_SGMII_100;
case SPEED_1000:
return XGBE_MODE_SGMII_1000;
+ case SPEED_2500:
+ return XGBE_MODE_KX_2500;
case SPEED_10000:
return XGBE_MODE_KR;
default:
@@ -2180,6 +2114,9 @@ static bool xgbe_phy_use_baset_mode(struct xgbe_prv_data *pdata,
case XGBE_MODE_SGMII_1000:
return xgbe_phy_check_mode(pdata, mode,
ADVERTISED_1000baseT_Full);
+ case XGBE_MODE_KX_2500:
+ return xgbe_phy_check_mode(pdata, mode,
+ ADVERTISED_2500baseX_Full);
case XGBE_MODE_KR:
return xgbe_phy_check_mode(pdata, mode,
ADVERTISED_10000baseT_Full);
@@ -2210,6 +2147,8 @@ static bool xgbe_phy_use_sfp_mode(struct xgbe_prv_data *pdata,
return xgbe_phy_check_mode(pdata, mode,
ADVERTISED_1000baseT_Full);
case XGBE_MODE_SFI:
+ if (phy_data->sfp_mod_absent)
+ return true;
return xgbe_phy_check_mode(pdata, mode,
ADVERTISED_10000baseT_Full);
default:
@@ -2287,6 +2226,8 @@ static bool xgbe_phy_valid_speed_baset_mode(struct xgbe_phy_data *phy_data,
case SPEED_100:
case SPEED_1000:
return true;
+ case SPEED_2500:
+ return (phy_data->port_mode == XGBE_PORT_MODE_NBASE_T);
case SPEED_10000:
return (phy_data->port_mode == XGBE_PORT_MODE_10GBASE_T);
default:
@@ -3013,9 +2954,6 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata)
if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000) {
pdata->phy.supported |= SUPPORTED_10000baseT_Full;
phy_data->start_mode = XGBE_MODE_SFI;
- if (pdata->fec_ability & MDIO_PMA_10GBR_FECABLE_ABLE)
- pdata->phy.supported |=
- SUPPORTED_10000baseR_FEC;
}
phy_data->phydev_mode = XGBE_MDIO_MODE_CL22;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-platform.c b/drivers/net/ethernet/amd/xgbe/xgbe-platform.c
index 84d4c51cab8c..d0f3dfb88202 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-platform.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-platform.c
@@ -448,13 +448,11 @@ static int xgbe_platform_probe(struct platform_device *pdev)
}
pdata->coherent = (attr == DEV_DMA_COHERENT);
if (pdata->coherent) {
- pdata->axdomain = XGBE_DMA_OS_AXDOMAIN;
- pdata->arcache = XGBE_DMA_OS_ARCACHE;
- pdata->awcache = XGBE_DMA_OS_AWCACHE;
+ pdata->arcr = XGBE_DMA_OS_ARCR;
+ pdata->awcr = XGBE_DMA_OS_AWCR;
} else {
- pdata->axdomain = XGBE_DMA_SYS_AXDOMAIN;
- pdata->arcache = XGBE_DMA_SYS_ARCACHE;
- pdata->awcache = XGBE_DMA_SYS_AWCACHE;
+ pdata->arcr = XGBE_DMA_SYS_ARCR;
+ pdata->awcr = XGBE_DMA_SYS_AWCR;
}
/* Set the maximum fifo amounts */
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
index a533a6cc2d53..d06d260cf1e2 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
@@ -267,7 +267,7 @@ void xgbe_ptp_register(struct xgbe_prv_data *pdata)
ktime_to_ns(ktime_get_real()));
/* Disable all timestamping to start */
- XGMAC_IOWRITE(pdata, MAC_TCR, 0);
+ XGMAC_IOWRITE(pdata, MAC_TSCR, 0);
pdata->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
pdata->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index f9a24639f574..0938294f640a 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -128,6 +128,7 @@
#include <linux/net_tstamp.h>
#include <net/dcbnl.h>
#include <linux/completion.h>
+#include <linux/cpumask.h>
#define XGBE_DRV_NAME "amd-xgbe"
#define XGBE_DRV_VERSION "1.0.3"
@@ -163,14 +164,17 @@
#define XGBE_DMA_STOP_TIMEOUT 1
/* DMA cache settings - Outer sharable, write-back, write-allocate */
-#define XGBE_DMA_OS_AXDOMAIN 0x2
-#define XGBE_DMA_OS_ARCACHE 0xb
-#define XGBE_DMA_OS_AWCACHE 0xf
+#define XGBE_DMA_OS_ARCR 0x002b2b2b
+#define XGBE_DMA_OS_AWCR 0x2f2f2f2f
/* DMA cache settings - System, no caches used */
-#define XGBE_DMA_SYS_AXDOMAIN 0x3
-#define XGBE_DMA_SYS_ARCACHE 0x0
-#define XGBE_DMA_SYS_AWCACHE 0x0
+#define XGBE_DMA_SYS_ARCR 0x00303030
+#define XGBE_DMA_SYS_AWCR 0x30303030
+
+/* DMA cache settings - PCI device */
+#define XGBE_DMA_PCI_ARCR 0x00000003
+#define XGBE_DMA_PCI_AWCR 0x13131313
+#define XGBE_DMA_PCI_AWARCR 0x00000313
/* DMA channel interrupt modes */
#define XGBE_IRQ_MODE_EDGE 0
@@ -412,6 +416,7 @@ struct xgbe_ring {
/* Page allocation for RX buffers */
struct xgbe_page_alloc rx_hdr_pa;
struct xgbe_page_alloc rx_buf_pa;
+ int node;
/* Ring index values
* cur - Tx: index of descriptor to be used for current transfer
@@ -462,6 +467,9 @@ struct xgbe_channel {
struct xgbe_ring *tx_ring;
struct xgbe_ring *rx_ring;
+
+ int node;
+ cpumask_t affinity_mask;
} ____cacheline_aligned;
enum xgbe_state {
@@ -734,13 +742,6 @@ struct xgbe_hw_if {
/* For TX DMA Operate on Second Frame config */
int (*config_osp_mode)(struct xgbe_prv_data *);
- /* For RX and TX PBL config */
- int (*config_rx_pbl_val)(struct xgbe_prv_data *);
- int (*get_rx_pbl_val)(struct xgbe_prv_data *);
- int (*config_tx_pbl_val)(struct xgbe_prv_data *);
- int (*get_tx_pbl_val)(struct xgbe_prv_data *);
- int (*config_pblx8)(struct xgbe_prv_data *);
-
/* For MMC statistics */
void (*rx_mmc_int)(struct xgbe_prv_data *);
void (*tx_mmc_int)(struct xgbe_prv_data *);
@@ -837,7 +838,7 @@ struct xgbe_phy_if {
bool (*phy_valid_speed)(struct xgbe_prv_data *, int);
/* For single interrupt support */
- irqreturn_t (*an_isr)(int, struct xgbe_prv_data *);
+ irqreturn_t (*an_isr)(struct xgbe_prv_data *);
/* PHY implementation specific services */
struct xgbe_phy_impl_if phy_impl;
@@ -855,7 +856,7 @@ struct xgbe_i2c_if {
int (*i2c_xfer)(struct xgbe_prv_data *, struct xgbe_i2c_op *);
/* For single interrupt support */
- irqreturn_t (*i2c_isr)(int, struct xgbe_prv_data *);
+ irqreturn_t (*i2c_isr)(struct xgbe_prv_data *);
};
struct xgbe_desc_if {
@@ -924,6 +925,9 @@ struct xgbe_version_data {
unsigned int tx_tstamp_workaround;
unsigned int ecc_support;
unsigned int i2c_support;
+ unsigned int irq_reissue_support;
+ unsigned int tx_desc_prefetch;
+ unsigned int rx_desc_prefetch;
};
struct xgbe_prv_data {
@@ -1001,9 +1005,9 @@ struct xgbe_prv_data {
/* AXI DMA settings */
unsigned int coherent;
- unsigned int axdomain;
- unsigned int arcache;
- unsigned int awcache;
+ unsigned int arcr;
+ unsigned int awcr;
+ unsigned int awarcr;
/* Service routine support */
struct workqueue_struct *dev_workqueue;
@@ -1011,7 +1015,7 @@ struct xgbe_prv_data {
struct timer_list service_timer;
/* Rings for Tx/Rx on a DMA channel */
- struct xgbe_channel *channel;
+ struct xgbe_channel *channel[XGBE_MAX_DMA_CHANNELS];
unsigned int tx_max_channel_count;
unsigned int rx_max_channel_count;
unsigned int channel_count;
@@ -1026,19 +1030,21 @@ struct xgbe_prv_data {
unsigned int rx_q_count;
/* Tx/Rx common settings */
- unsigned int pblx8;
+ unsigned int blen;
+ unsigned int pbl;
+ unsigned int aal;
+ unsigned int rd_osr_limit;
+ unsigned int wr_osr_limit;
/* Tx settings */
unsigned int tx_sf_mode;
unsigned int tx_threshold;
- unsigned int tx_pbl;
unsigned int tx_osp_mode;
unsigned int tx_max_fifo_size;
/* Rx settings */
unsigned int rx_sf_mode;
unsigned int rx_threshold;
- unsigned int rx_pbl;
unsigned int rx_max_fifo_size;
/* Tx coalescing settings */
@@ -1159,6 +1165,12 @@ struct xgbe_prv_data {
unsigned int lpm_ctrl; /* CTRL1 for resume */
+ unsigned int isr_as_tasklet;
+ struct tasklet_struct tasklet_dev;
+ struct tasklet_struct tasklet_ecc;
+ struct tasklet_struct tasklet_i2c;
+ struct tasklet_struct tasklet_an;
+
#ifdef CONFIG_DEBUG_FS
struct dentry *xgbe_debugfs;
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 7e913d8331c3..8c9986f3fc01 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -2252,7 +2252,7 @@ static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb,
if (atl1c_tx_map(adapter, skb, tpd, type) < 0) {
netif_info(adapter, tx_done, adapter->netdev,
- "tx-skb droppted due to dma error\n");
+ "tx-skb dropped due to dma error\n");
/* roll back tpd/buffer */
atl1c_tx_rollback(adapter, tpd, type);
dev_kfree_skb_any(skb);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 14c236e5bdb1..c12b4d3e946e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -12729,7 +12729,7 @@ static int bnx2x_set_mc_list(struct bnx2x *bp)
} else {
/* If no mc addresses are required, flush the configuration */
rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
- if (rc)
+ if (rc < 0)
BNX2X_ERR("Failed to clear multicast configuration %d\n",
rc);
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 11e8a866a312..a19f68f5862d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1311,10 +1311,11 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
cp_cons = NEXT_CMP(cp_cons);
}
- if (unlikely(agg_bufs > MAX_SKB_FRAGS)) {
+ if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
- netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
- agg_bufs, (int)MAX_SKB_FRAGS);
+ if (agg_bufs > MAX_SKB_FRAGS)
+ netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
+ agg_bufs, (int)MAX_SKB_FRAGS);
return NULL;
}
@@ -1573,6 +1574,45 @@ next_rx_no_prod:
return rc;
}
+/* In netpoll mode, if we are using a combined completion ring, we need to
+ * discard the rx packets and recycle the buffers.
+ */
+static int bnxt_force_rx_discard(struct bnxt *bp, struct bnxt_napi *bnapi,
+ u32 *raw_cons, u8 *event)
+{
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ u32 tmp_raw_cons = *raw_cons;
+ struct rx_cmp_ext *rxcmp1;
+ struct rx_cmp *rxcmp;
+ u16 cp_cons;
+ u8 cmp_type;
+
+ cp_cons = RING_CMP(tmp_raw_cons);
+ rxcmp = (struct rx_cmp *)
+ &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
+
+ tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
+ cp_cons = RING_CMP(tmp_raw_cons);
+ rxcmp1 = (struct rx_cmp_ext *)
+ &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
+
+ if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
+ return -EBUSY;
+
+ cmp_type = RX_CMP_TYPE(rxcmp);
+ if (cmp_type == CMP_TYPE_RX_L2_CMP) {
+ rxcmp1->rx_cmp_cfa_code_errors_v2 |=
+ cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
+ } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
+ struct rx_tpa_end_cmp_ext *tpa_end1;
+
+ tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1;
+ tpa_end1->rx_tpa_end_cmp_errors_v2 |=
+ cpu_to_le32(RX_TPA_END_CMP_ERRORS);
+ }
+ return bnxt_rx_pkt(bp, bnapi, raw_cons, event);
+}
+
#define BNXT_GET_EVENT_PORT(data) \
((data) & \
ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
@@ -1755,7 +1795,11 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
if (unlikely(tx_pkts > bp->tx_wake_thresh))
rx_pkts = budget;
} else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
- rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event);
+ if (likely(budget))
+ rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event);
+ else
+ rc = bnxt_force_rx_discard(bp, bnapi, &raw_cons,
+ &event);
if (likely(rc >= 0))
rx_pkts += rc;
else if (rc == -EBUSY) /* partial completion */
@@ -6730,12 +6774,11 @@ static void bnxt_poll_controller(struct net_device *dev)
struct bnxt *bp = netdev_priv(dev);
int i;
- for (i = 0; i < bp->cp_nr_rings; i++) {
- struct bnxt_irq *irq = &bp->irq_tbl[i];
+ /* Only process tx rings/combined rings in netpoll mode. */
+ for (i = 0; i < bp->tx_nr_rings; i++) {
+ struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
- disable_irq(irq->vector);
- irq->handler(irq->vector, bp->bnapi[i]);
- enable_irq(irq->vector);
+ napi_schedule(&txr->bnapi->napi);
}
}
#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 5984423499e6..f872a7db2ca8 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -374,12 +374,16 @@ struct rx_tpa_end_cmp_ext {
__le32 rx_tpa_end_cmp_errors_v2;
#define RX_TPA_END_CMP_V2 (0x1 << 0)
- #define RX_TPA_END_CMP_ERRORS (0x7fff << 1)
+ #define RX_TPA_END_CMP_ERRORS (0x3 << 1)
#define RX_TPA_END_CMPL_ERRORS_SHIFT 1
u32 rx_tpa_end_cmp_start_opaque;
};
+#define TPA_END_ERRORS(rx_tpa_end_ext) \
+ ((rx_tpa_end_ext)->rx_tpa_end_cmp_errors_v2 & \
+ cpu_to_le32(RX_TPA_END_CMP_ERRORS))
+
#define DB_IDX_MASK 0xffffff
#define DB_IDX_VALID (0x1 << 26)
#define DB_IRQ_DIS (0x1 << 27)
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index 285676f8da6b..071fcbd14e6a 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -251,11 +251,8 @@ int bcmgenet_mii_config(struct net_device *dev)
priv->ext_phy = !priv->internal_phy &&
(priv->phy_interface != PHY_INTERFACE_MODE_MOCA);
- if (priv->internal_phy)
- priv->phy_interface = PHY_INTERFACE_MODE_NA;
-
switch (priv->phy_interface) {
- case PHY_INTERFACE_MODE_NA:
+ case PHY_INTERFACE_MODE_INTERNAL:
case PHY_INTERFACE_MODE_MOCA:
/* Irrespective of the actually configured PHY speed (100 or
* 1000) GENETv4 only has an internal GPHY so we will just end
@@ -471,7 +468,6 @@ static int bcmgenet_mii_of_init(struct bcmgenet_priv *priv)
{
struct device_node *dn = priv->pdev->dev.of_node;
struct device *kdev = &priv->pdev->dev;
- const char *phy_mode_str = NULL;
struct phy_device *phydev = NULL;
char *compat;
int phy_mode;
@@ -510,23 +506,19 @@ static int bcmgenet_mii_of_init(struct bcmgenet_priv *priv)
/* Get the link mode */
phy_mode = of_get_phy_mode(dn);
+ if (phy_mode < 0) {
+ dev_err(kdev, "invalid PHY mode property\n");
+ return phy_mode;
+ }
+
priv->phy_interface = phy_mode;
/* We need to specifically look up whether this PHY interface is internal
* or not *before* we even try to probe the PHY driver over MDIO as we
* may have shut down the internal PHY for power saving purposes.
*/
- if (phy_mode < 0) {
- ret = of_property_read_string(dn, "phy-mode", &phy_mode_str);
- if (ret < 0) {
- dev_err(kdev, "invalid PHY mode property\n");
- return ret;
- }
-
- priv->phy_interface = PHY_INTERFACE_MODE_NA;
- if (!strcasecmp(phy_mode_str, "internal"))
- priv->internal_phy = true;
- }
+ if (priv->phy_interface == PHY_INTERFACE_MODE_INTERNAL)
+ priv->internal_phy = true;
/* Make sure we initialize MoCA PHYs with a link down */
if (phy_mode == PHY_INTERFACE_MODE_MOCA) {
diff --git a/drivers/net/ethernet/cadence/Kconfig b/drivers/net/ethernet/cadence/Kconfig
index 608bea171956..427d65a1a126 100644
--- a/drivers/net/ethernet/cadence/Kconfig
+++ b/drivers/net/ethernet/cadence/Kconfig
@@ -29,7 +29,15 @@ config MACB
support for the MACB/GEM chip.
To compile this driver as a module, choose M here: the module
- will be called macb.
+ will be macb.
+
+config MACB_USE_HWSTAMP
+ bool "Use IEEE 1588 hwstamp"
+ depends on MACB
+ default y
+ imply PTP_1588_CLOCK
+ ---help---
+ Enable IEEE 1588 Precision Time Protocol (PTP) support for MACB.
config MACB_PCI
tristate "Cadence PCI MACB/GEM support"
diff --git a/drivers/net/ethernet/cadence/Makefile b/drivers/net/ethernet/cadence/Makefile
index 4ba75594d5c5..1d66ddb68969 100644
--- a/drivers/net/ethernet/cadence/Makefile
+++ b/drivers/net/ethernet/cadence/Makefile
@@ -1,6 +1,11 @@
#
# Makefile for the Atmel network device drivers.
#
+macb-y := macb_main.o
+
+ifeq ($(CONFIG_MACB_USE_HWSTAMP),y)
+macb-y += macb_ptp.o
+endif
obj-$(CONFIG_MACB) += macb.o
obj-$(CONFIG_MACB_PCI) += macb_pci.o
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index ec037b0fa2a4..c93f3a2dc6c1 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -11,6 +11,12 @@
#define _MACB_H
#include <linux/phy.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/net_tstamp.h>
+
+#if defined(CONFIG_ARCH_DMA_ADDR_T_64BIT) || defined(CONFIG_MACB_USE_HWSTAMP)
+#define MACB_EXT_DESC
+#endif
#define MACB_GREGS_NBR 16
#define MACB_GREGS_VERSION 2
@@ -86,6 +92,10 @@
#define GEM_SA3T 0x009C /* Specific3 Top */
#define GEM_SA4B 0x00A0 /* Specific4 Bottom */
#define GEM_SA4T 0x00A4 /* Specific4 Top */
+#define GEM_EFTSH 0x00e8 /* PTP Event Frame Transmitted Seconds Register 47:32 */
+#define GEM_EFRSH 0x00ec /* PTP Event Frame Received Seconds Register 47:32 */
+#define GEM_PEFTSH 0x00f0 /* PTP Peer Event Frame Transmitted Seconds Register 47:32 */
+#define GEM_PEFRSH 0x00f4 /* PTP Peer Event Frame Received Seconds Register 47:32 */
#define GEM_OTX 0x0100 /* Octets transmitted */
#define GEM_OCTTXL 0x0100 /* Octets transmitted [31:0] */
#define GEM_OCTTXH 0x0104 /* Octets transmitted [47:32] */
@@ -155,6 +165,9 @@
#define GEM_DCFG6 0x0294 /* Design Config 6 */
#define GEM_DCFG7 0x0298 /* Design Config 7 */
+#define GEM_TXBDCTRL 0x04cc /* TX Buffer Descriptor control register */
+#define GEM_RXBDCTRL 0x04d0 /* RX Buffer Descriptor control register */
+
#define GEM_ISR(hw_q) (0x0400 + ((hw_q) << 2))
#define GEM_TBQP(hw_q) (0x0440 + ((hw_q) << 2))
#define GEM_TBQPH(hw_q) (0x04C8)
@@ -191,6 +204,8 @@
#define MACB_TZQ_OFFSET 12 /* Transmit zero quantum pause frame */
#define MACB_TZQ_SIZE 1
#define MACB_SRTSM_OFFSET 15
+#define MACB_OSSMODE_OFFSET 24 /* Enable One Step Synchro Mode */
+#define MACB_OSSMODE_SIZE 1
/* Bitfields in NCFGR */
#define MACB_SPD_OFFSET 0 /* Speed */
@@ -269,6 +284,10 @@
#define GEM_RXBS_SIZE 8
#define GEM_DDRP_OFFSET 24 /* disc_when_no_ahb */
#define GEM_DDRP_SIZE 1
+#define GEM_RXEXT_OFFSET 28 /* RX extended Buffer Descriptor mode */
+#define GEM_RXEXT_SIZE 1
+#define GEM_TXEXT_OFFSET 29 /* TX extended Buffer Descriptor mode */
+#define GEM_TXEXT_SIZE 1
#define GEM_ADDR64_OFFSET 30 /* Address bus width - 64b or 32b */
#define GEM_ADDR64_SIZE 1
@@ -425,6 +444,11 @@
#define GEM_TX_PKT_BUFF_OFFSET 21
#define GEM_TX_PKT_BUFF_SIZE 1
+
+/* Bitfields in DCFG5. */
+#define GEM_TSU_OFFSET 8
+#define GEM_TSU_SIZE 1
+
/* Bitfields in DCFG6. */
#define GEM_PBUF_LSO_OFFSET 27
#define GEM_PBUF_LSO_SIZE 1
@@ -439,6 +463,52 @@
#define GEM_NSINCR_OFFSET 0
#define GEM_NSINCR_SIZE 8
+/* Bitfields in TSH */
+#define GEM_TSH_OFFSET 0 /* TSU timer value (s). MSB [47:32] of seconds timer count */
+#define GEM_TSH_SIZE 16
+
+/* Bitfields in TSL */
+#define GEM_TSL_OFFSET 0 /* TSU timer value (s). LSB [31:0] of seconds timer count */
+#define GEM_TSL_SIZE 32
+
+/* Bitfields in TN */
+#define GEM_TN_OFFSET 0 /* TSU timer value (ns) */
+#define GEM_TN_SIZE 30
+
+/* Bitfields in TXBDCTRL */
+#define GEM_TXTSMODE_OFFSET 4 /* TX Descriptor Timestamp Insertion mode */
+#define GEM_TXTSMODE_SIZE 2
+
+/* Bitfields in RXBDCTRL */
+#define GEM_RXTSMODE_OFFSET 4 /* RX Descriptor Timestamp Insertion mode */
+#define GEM_RXTSMODE_SIZE 2
+
+/* Transmit DMA buffer descriptor Word 1 */
+#define GEM_DMA_TXVALID_OFFSET 23 /* timestamp has been captured in the Buffer Descriptor */
+#define GEM_DMA_TXVALID_SIZE 1
+
+/* Receive DMA buffer descriptor Word 0 */
+#define GEM_DMA_RXVALID_OFFSET 2 /* indicates a valid timestamp in the Buffer Descriptor */
+#define GEM_DMA_RXVALID_SIZE 1
+
+/* DMA buffer descriptor Word 2 (32 bit addressing) or Word 4 (64 bit addressing) */
+#define GEM_DMA_SECL_OFFSET 30 /* Timestamp seconds[1:0] */
+#define GEM_DMA_SECL_SIZE 2
+#define GEM_DMA_NSEC_OFFSET 0 /* Timestamp nanosecs [29:0] */
+#define GEM_DMA_NSEC_SIZE 30
+
+/* DMA buffer descriptor Word 3 (32 bit addressing) or Word 5 (64 bit addressing) */
+
+/* New hardware supports 12 bit precision of timestamp in DMA buffer descriptor.
+ * Old hardware supports only 6 bit precision but it is enough for PTP.
+ * Less accuracy is used always instead of checking hardware version.
+ */
+#define GEM_DMA_SECH_OFFSET 0 /* Timestamp seconds[5:2] */
+#define GEM_DMA_SECH_SIZE 4
+#define GEM_DMA_SEC_WIDTH (GEM_DMA_SECH_SIZE + GEM_DMA_SECL_SIZE)
+#define GEM_DMA_SEC_TOP (1 << GEM_DMA_SEC_WIDTH)
+#define GEM_DMA_SEC_MASK (GEM_DMA_SEC_TOP - 1)
+
/* Bitfields in ADJ */
#define GEM_ADDSUB_OFFSET 31
#define GEM_ADDSUB_SIZE 1
@@ -514,6 +584,8 @@
#define queue_readl(queue, reg) (queue)->bp->macb_reg_readl((queue)->bp, (queue)->reg)
#define queue_writel(queue, reg, value) (queue)->bp->macb_reg_writel((queue)->bp, (queue)->reg, (value))
+#define PTP_TS_BUFFER_SIZE 128 /* must be power of 2 */
+
/* Conditional GEM/MACB macros. These perform the operation to the correct
* register dependent on whether the device is a GEM or a MACB. For registers
* and bitfields that are common across both devices, use macb_{read,write}l
@@ -546,16 +618,26 @@ struct macb_dma_desc {
u32 ctrl;
};
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
-enum macb_hw_dma_cap {
- HW_DMA_CAP_32B,
- HW_DMA_CAP_64B,
-};
+#ifdef MACB_EXT_DESC
+#define HW_DMA_CAP_32B 0
+#define HW_DMA_CAP_64B (1 << 0)
+#define HW_DMA_CAP_PTP (1 << 1)
+#define HW_DMA_CAP_64B_PTP (HW_DMA_CAP_64B | HW_DMA_CAP_PTP)
struct macb_dma_desc_64 {
u32 addrh;
u32 resvd;
};
+
+struct macb_dma_desc_ptp {
+ u32 ts_1;
+ u32 ts_2;
+};
+
+struct gem_tx_ts {
+ struct sk_buff *skb;
+ struct macb_dma_desc_ptp desc_ptp;
+};
#endif
/* DMA descriptor bitfields */
@@ -871,6 +953,11 @@ struct macb_config {
int jumbo_max_len;
};
+struct tsu_incr {
+ u32 sub_ns;
+ u32 ns;
+};
+
struct macb_queue {
struct macb *bp;
int irq;
@@ -887,6 +974,12 @@ struct macb_queue {
struct macb_tx_skb *tx_skb;
dma_addr_t tx_ring_dma;
struct work_struct tx_error_task;
+
+#ifdef CONFIG_MACB_USE_HWSTAMP
+ struct work_struct tx_ts_task;
+ unsigned int tx_ts_head, tx_ts_tail;
+ struct gem_tx_ts tx_timestamps[PTP_TS_BUFFER_SIZE];
+#endif
};
struct macb {
@@ -930,6 +1023,7 @@ struct macb {
struct macb_or_gem_ops macbgem_ops;
struct mii_bus *mii_bus;
+ struct device_node *phy_node;
int link;
int speed;
int duplex;
@@ -954,11 +1048,62 @@ struct macb {
u32 wol;
struct macb_ptp_info *ptp_info; /* macb-ptp interface */
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- enum macb_hw_dma_cap hw_dma_cap;
+#ifdef MACB_EXT_DESC
+ uint8_t hw_dma_cap;
#endif
+ spinlock_t tsu_clk_lock; /* gem tsu clock locking */
+ unsigned int tsu_rate;
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_clock_info;
+ struct tsu_incr tsu_incr;
+ struct hwtstamp_config tstamp_config;
};
+#ifdef CONFIG_MACB_USE_HWSTAMP
+#define GEM_TSEC_SIZE (GEM_TSH_SIZE + GEM_TSL_SIZE)
+#define TSU_SEC_MAX_VAL (((u64)1 << GEM_TSEC_SIZE) - 1)
+#define TSU_NSEC_MAX_VAL ((1 << GEM_TN_SIZE) - 1)
+
+enum macb_bd_control {
+ TSTAMP_DISABLED,
+ TSTAMP_FRAME_PTP_EVENT_ONLY,
+ TSTAMP_ALL_PTP_FRAMES,
+ TSTAMP_ALL_FRAMES,
+};
+
+void gem_ptp_init(struct net_device *ndev);
+void gem_ptp_remove(struct net_device *ndev);
+int gem_ptp_txstamp(struct macb_queue *queue, struct sk_buff *skb, struct macb_dma_desc *des);
+void gem_ptp_rxstamp(struct macb *bp, struct sk_buff *skb, struct macb_dma_desc *desc);
+static inline int gem_ptp_do_txstamp(struct macb_queue *queue, struct sk_buff *skb, struct macb_dma_desc *desc)
+{
+ if (queue->bp->tstamp_config.tx_type == TSTAMP_DISABLED)
+ return -ENOTSUPP;
+
+ return gem_ptp_txstamp(queue, skb, desc);
+}
+
+static inline void gem_ptp_do_rxstamp(struct macb *bp, struct sk_buff *skb, struct macb_dma_desc *desc)
+{
+ if (bp->tstamp_config.rx_filter == TSTAMP_DISABLED)
+ return;
+
+ gem_ptp_rxstamp(bp, skb, desc);
+}
+int gem_get_hwtst(struct net_device *dev, struct ifreq *rq);
+int gem_set_hwtst(struct net_device *dev, struct ifreq *ifr, int cmd);
+#else
+static inline void gem_ptp_init(struct net_device *ndev) { }
+static inline void gem_ptp_remove(struct net_device *ndev) { }
+
+static inline int gem_ptp_do_txstamp(struct macb_queue *queue, struct sk_buff *skb, struct macb_dma_desc *desc)
+{
+ return -1;
+}
+
+static inline void gem_ptp_do_rxstamp(struct macb *bp, struct sk_buff *skb, struct macb_dma_desc *desc) { }
+#endif
+
static inline bool macb_is_gem(struct macb *bp)
{
return !!(bp->caps & MACB_CAPS_MACB_IS_GEM);
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb_main.c
index 4b0168bcbc8a..41e5711544fc 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -79,33 +79,84 @@
#define MACB_HALT_TIMEOUT 1230
/* DMA buffer descriptor might be different size
- * depends on hardware configuration.
+ * depends on hardware configuration:
+ *
+ * 1. dma address width 32 bits:
+ * word 1: 32 bit address of Data Buffer
+ * word 2: control
+ *
+ * 2. dma address width 64 bits:
+ * word 1: 32 bit address of Data Buffer
+ * word 2: control
+ * word 3: upper 32 bit address of Data Buffer
+ * word 4: unused
+ *
+ * 3. dma address width 32 bits with hardware timestamping:
+ * word 1: 32 bit address of Data Buffer
+ * word 2: control
+ * word 3: timestamp word 1
+ * word 4: timestamp word 2
+ *
+ * 4. dma address width 64 bits with hardware timestamping:
+ * word 1: 32 bit address of Data Buffer
+ * word 2: control
+ * word 3: upper 32 bit address of Data Buffer
+ * word 4: unused
+ * word 5: timestamp word 1
+ * word 6: timestamp word 2
*/
static unsigned int macb_dma_desc_get_size(struct macb *bp)
{
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- if (bp->hw_dma_cap == HW_DMA_CAP_64B)
- return sizeof(struct macb_dma_desc) + sizeof(struct macb_dma_desc_64);
+#ifdef MACB_EXT_DESC
+ unsigned int desc_size;
+
+ switch (bp->hw_dma_cap) {
+ case HW_DMA_CAP_64B:
+ desc_size = sizeof(struct macb_dma_desc)
+ + sizeof(struct macb_dma_desc_64);
+ break;
+ case HW_DMA_CAP_PTP:
+ desc_size = sizeof(struct macb_dma_desc)
+ + sizeof(struct macb_dma_desc_ptp);
+ break;
+ case HW_DMA_CAP_64B_PTP:
+ desc_size = sizeof(struct macb_dma_desc)
+ + sizeof(struct macb_dma_desc_64)
+ + sizeof(struct macb_dma_desc_ptp);
+ break;
+ default:
+ desc_size = sizeof(struct macb_dma_desc);
+ }
+ return desc_size;
#endif
return sizeof(struct macb_dma_desc);
}
-static unsigned int macb_adj_dma_desc_idx(struct macb *bp, unsigned int idx)
+static unsigned int macb_adj_dma_desc_idx(struct macb *bp, unsigned int desc_idx)
{
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- /* Dma buffer descriptor is 4 words length (instead of 2 words)
- * for 64b GEM.
- */
- if (bp->hw_dma_cap == HW_DMA_CAP_64B)
- idx <<= 1;
+#ifdef MACB_EXT_DESC
+ switch (bp->hw_dma_cap) {
+ case HW_DMA_CAP_64B:
+ case HW_DMA_CAP_PTP:
+ desc_idx <<= 1;
+ break;
+ case HW_DMA_CAP_64B_PTP:
+ desc_idx *= 3;
+ break;
+ default:
+ break;
+ }
+ return desc_idx;
#endif
- return idx;
+ return desc_idx;
}
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
static struct macb_dma_desc_64 *macb_64b_desc(struct macb *bp, struct macb_dma_desc *desc)
{
- return (struct macb_dma_desc_64 *)((void *)desc + sizeof(struct macb_dma_desc));
+ if (bp->hw_dma_cap & HW_DMA_CAP_64B)
+ return (struct macb_dma_desc_64 *)((void *)desc + sizeof(struct macb_dma_desc));
+ return NULL;
}
#endif
@@ -425,32 +476,40 @@ static int macb_mii_probe(struct net_device *dev)
int phy_irq;
int ret;
- phydev = phy_find_first(bp->mii_bus);
- if (!phydev) {
- netdev_err(dev, "no PHY found\n");
- return -ENXIO;
- }
+ if (bp->phy_node) {
+ phydev = of_phy_connect(dev, bp->phy_node,
+ &macb_handle_link_change, 0,
+ bp->phy_interface);
+ if (!phydev)
+ return -ENODEV;
+ } else {
+ phydev = phy_find_first(bp->mii_bus);
+ if (!phydev) {
+ netdev_err(dev, "no PHY found\n");
+ return -ENXIO;
+ }
- pdata = dev_get_platdata(&bp->pdev->dev);
- if (pdata) {
- if (gpio_is_valid(pdata->phy_irq_pin)) {
- ret = devm_gpio_request(&bp->pdev->dev,
- pdata->phy_irq_pin, "phy int");
- if (!ret) {
- phy_irq = gpio_to_irq(pdata->phy_irq_pin);
- phydev->irq = (phy_irq < 0) ? PHY_POLL : phy_irq;
+ pdata = dev_get_platdata(&bp->pdev->dev);
+ if (pdata) {
+ if (gpio_is_valid(pdata->phy_irq_pin)) {
+ ret = devm_gpio_request(&bp->pdev->dev,
+ pdata->phy_irq_pin, "phy int");
+ if (!ret) {
+ phy_irq = gpio_to_irq(pdata->phy_irq_pin);
+ phydev->irq = (phy_irq < 0) ? PHY_POLL : phy_irq;
+ }
+ } else {
+ phydev->irq = PHY_POLL;
}
- } else {
- phydev->irq = PHY_POLL;
}
- }
- /* attach the mac to the phy */
- ret = phy_connect_direct(dev, phydev, &macb_handle_link_change,
- bp->phy_interface);
- if (ret) {
- netdev_err(dev, "Could not attach to PHY\n");
- return ret;
+ /* attach the mac to the phy */
+ ret = phy_connect_direct(dev, phydev, &macb_handle_link_change,
+ bp->phy_interface);
+ if (ret) {
+ netdev_err(dev, "Could not attach to PHY\n");
+ return ret;
+ }
}
/* mask with MAC supported features */
@@ -499,26 +558,37 @@ static int macb_mii_init(struct macb *bp)
np = bp->pdev->dev.of_node;
if (np) {
- /* try dt phy registration */
- err = of_mdiobus_register(bp->mii_bus, np);
+ if (of_phy_is_fixed_link(np)) {
+ if (of_phy_register_fixed_link(np) < 0) {
+ dev_err(&bp->pdev->dev,
+ "broken fixed-link specification\n");
+ goto err_out_unregister_bus;
+ }
+ bp->phy_node = of_node_get(np);
- /* fallback to standard phy registration if no phy were
- * found during dt phy registration
- */
- if (!err && !phy_find_first(bp->mii_bus)) {
- for (i = 0; i < PHY_MAX_ADDR; i++) {
- struct phy_device *phydev;
-
- phydev = mdiobus_scan(bp->mii_bus, i);
- if (IS_ERR(phydev) &&
- PTR_ERR(phydev) != -ENODEV) {
- err = PTR_ERR(phydev);
- break;
+ err = mdiobus_register(bp->mii_bus);
+ } else {
+ /* try dt phy registration */
+ err = of_mdiobus_register(bp->mii_bus, np);
+
+ /* fallback to standard phy registration if no phy were
+ * found during dt phy registration
+ */
+ if (!err && !phy_find_first(bp->mii_bus)) {
+ for (i = 0; i < PHY_MAX_ADDR; i++) {
+ struct phy_device *phydev;
+
+ phydev = mdiobus_scan(bp->mii_bus, i);
+ if (IS_ERR(phydev) &&
+ PTR_ERR(phydev) != -ENODEV) {
+ err = PTR_ERR(phydev);
+ break;
+ }
}
- }
- if (err)
- goto err_out_unregister_bus;
+ if (err)
+ goto err_out_unregister_bus;
+ }
}
} else {
for (i = 0; i < PHY_MAX_ADDR; i++)
@@ -602,7 +672,7 @@ static void macb_set_addr(struct macb *bp, struct macb_dma_desc *desc, dma_addr_
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
struct macb_dma_desc_64 *desc_64;
- if (bp->hw_dma_cap == HW_DMA_CAP_64B) {
+ if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
desc_64 = macb_64b_desc(bp, desc);
desc_64->addrh = upper_32_bits(addr);
}
@@ -616,7 +686,7 @@ static dma_addr_t macb_get_addr(struct macb *bp, struct macb_dma_desc *desc)
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
struct macb_dma_desc_64 *desc_64;
- if (bp->hw_dma_cap == HW_DMA_CAP_64B) {
+ if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
desc_64 = macb_64b_desc(bp, desc);
addr = ((u64)(desc_64->addrh) << 32);
}
@@ -715,7 +785,7 @@ static void macb_tx_error_task(struct work_struct *work)
/* Reinitialize the TX desc queue */
queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- if (bp->hw_dma_cap == HW_DMA_CAP_64B)
+ if (bp->hw_dma_cap & HW_DMA_CAP_64B)
queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
#endif
/* Make TX ring reflect state of hardware */
@@ -777,6 +847,12 @@ static void macb_tx_interrupt(struct macb_queue *queue)
/* First, update TX stats if needed */
if (skb) {
+ if (gem_ptp_do_txstamp(queue, skb, desc) == 0) {
+ /* skb now belongs to timestamp buffer
+ * and will be removed later
+ */
+ tx_skb->skb = NULL;
+ }
netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n",
macb_tx_ring_wrap(bp, tail),
skb->data);
@@ -943,6 +1019,8 @@ static int gem_rx(struct macb *bp, int budget)
bp->dev->stats.rx_packets++;
bp->dev->stats.rx_bytes += skb->len;
+ gem_ptp_do_rxstamp(bp, skb, desc);
+
#if defined(DEBUG) && defined(VERBOSE_DEBUG)
netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
skb->len, skb->csum);
@@ -1264,7 +1342,6 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
queue_writel(queue, ISR, MACB_BIT(HRESP));
}
-
status = queue_readl(queue, ISR);
}
@@ -1594,7 +1671,6 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* Make newly initialized descriptor visible to hardware */
wmb();
-
skb_tx_timestamp(skb);
macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
@@ -1923,9 +1999,13 @@ static void macb_configure_dma(struct macb *bp)
dmacfg &= ~GEM_BIT(TXCOEN);
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- if (bp->hw_dma_cap == HW_DMA_CAP_64B)
+ if (bp->hw_dma_cap & HW_DMA_CAP_64B)
dmacfg |= GEM_BIT(ADDR64);
#endif
+#ifdef CONFIG_MACB_USE_HWSTAMP
+ if (bp->hw_dma_cap & HW_DMA_CAP_PTP)
+ dmacfg |= GEM_BIT(RXEXT) | GEM_BIT(TXEXT);
+#endif
netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n",
dmacfg);
gem_writel(bp, DMACFG, dmacfg);
@@ -1973,13 +2053,13 @@ static void macb_init_hw(struct macb *bp)
/* Initialize TX and RX buffers */
macb_writel(bp, RBQP, lower_32_bits(bp->rx_ring_dma));
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- if (bp->hw_dma_cap == HW_DMA_CAP_64B)
+ if (bp->hw_dma_cap & HW_DMA_CAP_64B)
macb_writel(bp, RBQPH, upper_32_bits(bp->rx_ring_dma));
#endif
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- if (bp->hw_dma_cap == HW_DMA_CAP_64B)
+ if (bp->hw_dma_cap & HW_DMA_CAP_64B)
queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
#endif
@@ -2448,6 +2528,70 @@ static int macb_set_ringparam(struct net_device *netdev,
return 0;
}
+#ifdef CONFIG_MACB_USE_HWSTAMP
+static unsigned int gem_get_tsu_rate(struct macb *bp)
+{
+ struct clk *tsu_clk;
+ unsigned int tsu_rate;
+
+ tsu_clk = devm_clk_get(&bp->pdev->dev, "tsu_clk");
+ if (!IS_ERR(tsu_clk))
+ tsu_rate = clk_get_rate(tsu_clk);
+ /* try pclk instead */
+ else if (!IS_ERR(bp->pclk)) {
+ tsu_clk = bp->pclk;
+ tsu_rate = clk_get_rate(tsu_clk);
+ } else
+ return -ENOTSUPP;
+ return tsu_rate;
+}
+
+static s32 gem_get_ptp_max_adj(void)
+{
+ return 64000000;
+}
+
+static int gem_get_ts_info(struct net_device *dev,
+ struct ethtool_ts_info *info)
+{
+ struct macb *bp = netdev_priv(dev);
+
+ if ((bp->hw_dma_cap & HW_DMA_CAP_PTP) == 0) {
+ ethtool_op_get_ts_info(dev, info);
+ return 0;
+ }
+
+ info->so_timestamping =
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->tx_types =
+ (1 << HWTSTAMP_TX_ONESTEP_SYNC) |
+ (1 << HWTSTAMP_TX_OFF) |
+ (1 << HWTSTAMP_TX_ON);
+ info->rx_filters =
+ (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_ALL);
+
+ info->phc_index = bp->ptp_clock ? ptp_clock_index(bp->ptp_clock) : -1;
+
+ return 0;
+}
+
+static struct macb_ptp_info gem_ptp_info = {
+ .ptp_init = gem_ptp_init,
+ .ptp_remove = gem_ptp_remove,
+ .get_ptp_max_adj = gem_get_ptp_max_adj,
+ .get_tsu_rate = gem_get_tsu_rate,
+ .get_ts_info = gem_get_ts_info,
+ .get_hwtst = gem_get_hwtst,
+ .set_hwtst = gem_set_hwtst,
+};
+#endif
+
static int macb_get_ts_info(struct net_device *netdev,
struct ethtool_ts_info *info)
{
@@ -2581,6 +2725,16 @@ static void macb_configure_caps(struct macb *bp,
dcfg = gem_readl(bp, DCFG2);
if ((dcfg & (GEM_BIT(RX_PKT_BUFF) | GEM_BIT(TX_PKT_BUFF))) == 0)
bp->caps |= MACB_CAPS_FIFO_MODE;
+#ifdef CONFIG_MACB_USE_HWSTAMP
+ if (gem_has_ptp(bp)) {
+ if (!GEM_BFEXT(TSU, gem_readl(bp, DCFG5)))
+ pr_err("GEM doesn't support hardware ptp.\n");
+ else {
+ bp->hw_dma_cap |= HW_DMA_CAP_PTP;
+ bp->ptp_info = &gem_ptp_info;
+ }
+ }
+#endif
}
dev_dbg(&bp->pdev->dev, "Cadence caps 0x%08x\n", bp->caps);
@@ -2718,7 +2872,7 @@ static int macb_init(struct platform_device *pdev)
queue->IMR = GEM_IMR(hw_q - 1);
queue->TBQP = GEM_TBQP(hw_q - 1);
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- if (bp->hw_dma_cap == HW_DMA_CAP_64B)
+ if (bp->hw_dma_cap & HW_DMA_CAP_64B)
queue->TBQPH = GEM_TBQPH(hw_q - 1);
#endif
} else {
@@ -2729,7 +2883,7 @@ static int macb_init(struct platform_device *pdev)
queue->IMR = MACB_IMR;
queue->TBQP = MACB_TBQP;
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- if (bp->hw_dma_cap == HW_DMA_CAP_64B)
+ if (bp->hw_dma_cap & HW_DMA_CAP_64B)
queue->TBQPH = MACB_TBQPH;
#endif
}
@@ -3186,7 +3340,9 @@ static const struct macb_config np4_config = {
};
static const struct macb_config zynqmp_config = {
- .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO,
+ .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE |
+ MACB_CAPS_JUMBO |
+ MACB_CAPS_GEM_HAS_PTP,
.dma_burst_length = 16,
.clk_init = macb_clk_init,
.init = macb_init,
@@ -3220,7 +3376,9 @@ MODULE_DEVICE_TABLE(of, macb_dt_ids);
#endif /* CONFIG_OF */
static const struct macb_config default_gem_config = {
- .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO,
+ .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE |
+ MACB_CAPS_JUMBO |
+ MACB_CAPS_GEM_HAS_PTP,
.dma_burst_length = 16,
.clk_init = macb_clk_init,
.init = macb_init,
@@ -3309,19 +3467,17 @@ static int macb_probe(struct platform_device *pdev)
bp->wol |= MACB_WOL_HAS_MAGIC_PACKET;
device_init_wakeup(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET);
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) {
- dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
- bp->hw_dma_cap = HW_DMA_CAP_64B;
- } else
- bp->hw_dma_cap = HW_DMA_CAP_32B;
-#endif
-
spin_lock_init(&bp->lock);
/* setup capabilities */
macb_configure_caps(bp, macb_config);
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) {
+ dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
+ bp->hw_dma_cap |= HW_DMA_CAP_64B;
+ }
+#endif
platform_set_drvdata(pdev, dev);
dev->irq = platform_get_irq(pdev, 0);
@@ -3438,6 +3594,7 @@ static int macb_remove(struct platform_device *pdev)
clk_disable_unprepare(bp->hclk);
clk_disable_unprepare(bp->pclk);
clk_disable_unprepare(bp->rx_clk);
+ of_node_put(bp->phy_node);
free_netdev(dev);
}
diff --git a/drivers/net/ethernet/cadence/macb_ptp.c b/drivers/net/ethernet/cadence/macb_ptp.c
new file mode 100755
index 000000000000..67cca08472b7
--- /dev/null
+++ b/drivers/net/ethernet/cadence/macb_ptp.c
@@ -0,0 +1,518 @@
+/**
+ * 1588 PTP support for Cadence GEM device.
+ *
+ * Copyright (C) 2017 Cadence Design Systems - http://www.cadence.com
+ *
+ * Authors: Rafal Ozieblo <rafalo@cadence.com>
+ * Bartosz Folta <bfolta@cadence.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+#include <linux/time64.h>
+#include <linux/ptp_classify.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/net_tstamp.h>
+#include <linux/circ_buf.h>
+#include <linux/spinlock.h>
+
+#include "macb.h"
+
+#define GEM_PTP_TIMER_NAME "gem-ptp-timer"
+
+static struct macb_dma_desc_ptp *macb_ptp_desc(struct macb *bp,
+ struct macb_dma_desc *desc)
+{
+ if (bp->hw_dma_cap == HW_DMA_CAP_PTP)
+ return (struct macb_dma_desc_ptp *)
+ ((u8 *)desc + sizeof(struct macb_dma_desc));
+ if (bp->hw_dma_cap == HW_DMA_CAP_64B_PTP)
+ return (struct macb_dma_desc_ptp *)
+ ((u8 *)desc + sizeof(struct macb_dma_desc)
+ + sizeof(struct macb_dma_desc_64));
+ return NULL;
+}
+
+static int gem_tsu_get_time(struct ptp_clock_info *ptp, struct timespec64 *ts)
+{
+ struct macb *bp = container_of(ptp, struct macb, ptp_clock_info);
+ unsigned long flags;
+ long first, second;
+ u32 secl, sech;
+
+ spin_lock_irqsave(&bp->tsu_clk_lock, flags);
+ first = gem_readl(bp, TN);
+ secl = gem_readl(bp, TSL);
+ sech = gem_readl(bp, TSH);
+ second = gem_readl(bp, TN);
+
+ /* test for nsec rollover */
+ if (first > second) {
+ /* if so, use later read & re-read seconds
+ * (assume all done within 1s)
+ */
+ ts->tv_nsec = gem_readl(bp, TN);
+ secl = gem_readl(bp, TSL);
+ sech = gem_readl(bp, TSH);
+ } else {
+ ts->tv_nsec = first;
+ }
+
+ spin_unlock_irqrestore(&bp->tsu_clk_lock, flags);
+ ts->tv_sec = (((u64)sech << GEM_TSL_SIZE) | secl)
+ & TSU_SEC_MAX_VAL;
+ return 0;
+}
+
+static int gem_tsu_set_time(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct macb *bp = container_of(ptp, struct macb, ptp_clock_info);
+ unsigned long flags;
+ u32 ns, sech, secl;
+
+ secl = (u32)ts->tv_sec;
+ sech = (ts->tv_sec >> GEM_TSL_SIZE) & ((1 << GEM_TSH_SIZE) - 1);
+ ns = ts->tv_nsec;
+
+ spin_lock_irqsave(&bp->tsu_clk_lock, flags);
+
+ /* TSH doesn't latch the time and no atomicity! */
+ gem_writel(bp, TN, 0); /* clear to avoid overflow */
+ gem_writel(bp, TSH, sech);
+ /* write lower bits 2nd, for synchronized secs update */
+ gem_writel(bp, TSL, secl);
+ gem_writel(bp, TN, ns);
+
+ spin_unlock_irqrestore(&bp->tsu_clk_lock, flags);
+
+ return 0;
+}
+
+static int gem_tsu_incr_set(struct macb *bp, struct tsu_incr *incr_spec)
+{
+ unsigned long flags;
+
+ /* tsu_timer_incr register must be written after
+ * the tsu_timer_incr_sub_ns register and the write operation
+ * will cause the value written to the tsu_timer_incr_sub_ns register
+ * to take effect.
+ */
+ spin_lock_irqsave(&bp->tsu_clk_lock, flags);
+ gem_writel(bp, TISUBN, GEM_BF(SUBNSINCR, incr_spec->sub_ns));
+ gem_writel(bp, TI, GEM_BF(NSINCR, incr_spec->ns));
+ spin_unlock_irqrestore(&bp->tsu_clk_lock, flags);
+
+ return 0;
+}
+
+static int gem_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ struct macb *bp = container_of(ptp, struct macb, ptp_clock_info);
+ struct tsu_incr incr_spec;
+ bool neg_adj = false;
+ u32 word;
+ u64 adj;
+
+ if (scaled_ppm < 0) {
+ neg_adj = true;
+ scaled_ppm = -scaled_ppm;
+ }
+
+ /* Adjustment is relative to base frequency */
+ incr_spec.sub_ns = bp->tsu_incr.sub_ns;
+ incr_spec.ns = bp->tsu_incr.ns;
+
+ /* scaling: unused(8bit) | ns(8bit) | fractions(16bit) */
+ word = ((u64)incr_spec.ns << GEM_SUBNSINCR_SIZE) + incr_spec.sub_ns;
+ adj = (u64)scaled_ppm * word;
+ /* Divide with rounding, equivalent to floating dividing:
+ * (temp / USEC_PER_SEC) + 0.5
+ */
+ adj += (USEC_PER_SEC >> 1);
+ adj >>= GEM_SUBNSINCR_SIZE; /* remove fractions */
+ adj = div_u64(adj, USEC_PER_SEC);
+ adj = neg_adj ? (word - adj) : (word + adj);
+
+ incr_spec.ns = (adj >> GEM_SUBNSINCR_SIZE)
+ & ((1 << GEM_NSINCR_SIZE) - 1);
+ incr_spec.sub_ns = adj & ((1 << GEM_SUBNSINCR_SIZE) - 1);
+ gem_tsu_incr_set(bp, &incr_spec);
+ return 0;
+}
+
+static int gem_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct macb *bp = container_of(ptp, struct macb, ptp_clock_info);
+ struct timespec64 now, then = ns_to_timespec64(delta);
+ u32 adj, sign = 0;
+
+ if (delta < 0) {
+ sign = 1;
+ delta = -delta;
+ }
+
+ if (delta > TSU_NSEC_MAX_VAL) {
+ gem_tsu_get_time(&bp->ptp_clock_info, &now);
+ if (sign)
+ now = timespec64_sub(now, then);
+ else
+ now = timespec64_add(now, then);
+
+ gem_tsu_set_time(&bp->ptp_clock_info,
+ (const struct timespec64 *)&now);
+ } else {
+ adj = (sign << GEM_ADDSUB_OFFSET) | delta;
+
+ gem_writel(bp, TA, adj);
+ }
+
+ return 0;
+}
+
+static int gem_ptp_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ return -EOPNOTSUPP;
+}
+
+static struct ptp_clock_info gem_ptp_caps_template = {
+ .owner = THIS_MODULE,
+ .name = GEM_PTP_TIMER_NAME,
+ .max_adj = 0,
+ .n_alarm = 0,
+ .n_ext_ts = 0,
+ .n_per_out = 0,
+ .n_pins = 0,
+ .pps = 1,
+ .adjfine = gem_ptp_adjfine,
+ .adjtime = gem_ptp_adjtime,
+ .gettime64 = gem_tsu_get_time,
+ .settime64 = gem_tsu_set_time,
+ .enable = gem_ptp_enable,
+};
+
+static void gem_ptp_init_timer(struct macb *bp)
+{
+ u32 rem = 0;
+ u64 adj;
+
+ bp->tsu_incr.ns = div_u64_rem(NSEC_PER_SEC, bp->tsu_rate, &rem);
+ if (rem) {
+ adj = rem;
+ adj <<= GEM_SUBNSINCR_SIZE;
+ bp->tsu_incr.sub_ns = div_u64(adj, bp->tsu_rate);
+ } else {
+ bp->tsu_incr.sub_ns = 0;
+ }
+}
+
+static void gem_ptp_init_tsu(struct macb *bp)
+{
+ struct timespec64 ts;
+
+ /* 1. get current system time */
+ ts = ns_to_timespec64(ktime_to_ns(ktime_get_real()));
+
+ /* 2. set ptp timer */
+ gem_tsu_set_time(&bp->ptp_clock_info, &ts);
+
+ /* 3. set PTP timer increment value to BASE_INCREMENT */
+ gem_tsu_incr_set(bp, &bp->tsu_incr);
+
+ gem_writel(bp, TA, 0);
+}
+
+static void gem_ptp_clear_timer(struct macb *bp)
+{
+ bp->tsu_incr.sub_ns = 0;
+ bp->tsu_incr.ns = 0;
+
+ gem_writel(bp, TISUBN, GEM_BF(SUBNSINCR, 0));
+ gem_writel(bp, TI, GEM_BF(NSINCR, 0));
+ gem_writel(bp, TA, 0);
+}
+
+static int gem_hw_timestamp(struct macb *bp, u32 dma_desc_ts_1,
+ u32 dma_desc_ts_2, struct timespec64 *ts)
+{
+ struct timespec64 tsu;
+
+ ts->tv_sec = (GEM_BFEXT(DMA_SECH, dma_desc_ts_2) << GEM_DMA_SECL_SIZE) |
+ GEM_BFEXT(DMA_SECL, dma_desc_ts_1);
+ ts->tv_nsec = GEM_BFEXT(DMA_NSEC, dma_desc_ts_1);
+
+ /* TSU overlapping workaround
+ * The timestamp only contains lower few bits of seconds,
+ * so add value from 1588 timer
+ */
+ gem_tsu_get_time(&bp->ptp_clock_info, &tsu);
+
+ /* If the top bit is set in the timestamp,
+ * but not in 1588 timer, it has rolled over,
+ * so subtract max size
+ */
+ if ((ts->tv_sec & (GEM_DMA_SEC_TOP >> 1)) &&
+ !(tsu.tv_sec & (GEM_DMA_SEC_TOP >> 1)))
+ ts->tv_sec -= GEM_DMA_SEC_TOP;
+
+ ts->tv_sec += ((~GEM_DMA_SEC_MASK) & tsu.tv_sec);
+
+ return 0;
+}
+
+void gem_ptp_rxstamp(struct macb *bp, struct sk_buff *skb,
+ struct macb_dma_desc *desc)
+{
+ struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
+ struct macb_dma_desc_ptp *desc_ptp;
+ struct timespec64 ts;
+
+ if (GEM_BFEXT(DMA_RXVALID, desc->addr)) {
+ desc_ptp = macb_ptp_desc(bp, desc);
+ gem_hw_timestamp(bp, desc_ptp->ts_1, desc_ptp->ts_2, &ts);
+ memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
+ shhwtstamps->hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
+ }
+}
+
+static void gem_tstamp_tx(struct macb *bp, struct sk_buff *skb,
+ struct macb_dma_desc_ptp *desc_ptp)
+{
+ struct skb_shared_hwtstamps shhwtstamps;
+ struct timespec64 ts;
+
+ gem_hw_timestamp(bp, desc_ptp->ts_1, desc_ptp->ts_2, &ts);
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+ shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
+ skb_tstamp_tx(skb, &shhwtstamps);
+}
+
+int gem_ptp_txstamp(struct macb_queue *queue, struct sk_buff *skb,
+ struct macb_dma_desc *desc)
+{
+ unsigned long tail = READ_ONCE(queue->tx_ts_tail);
+ unsigned long head = queue->tx_ts_head;
+ struct macb_dma_desc_ptp *desc_ptp;
+ struct gem_tx_ts *tx_timestamp;
+
+ if (!GEM_BFEXT(DMA_TXVALID, desc->ctrl))
+ return -EINVAL;
+
+ if (CIRC_SPACE(head, tail, PTP_TS_BUFFER_SIZE) == 0)
+ return -ENOMEM;
+
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ desc_ptp = macb_ptp_desc(queue->bp, desc);
+ tx_timestamp = &queue->tx_timestamps[head];
+ tx_timestamp->skb = skb;
+ tx_timestamp->desc_ptp.ts_1 = desc_ptp->ts_1;
+ tx_timestamp->desc_ptp.ts_2 = desc_ptp->ts_2;
+ /* move head */
+ smp_store_release(&queue->tx_ts_head,
+ (head + 1) & (PTP_TS_BUFFER_SIZE - 1));
+
+ schedule_work(&queue->tx_ts_task);
+ return 0;
+}
+
+static void gem_tx_timestamp_flush(struct work_struct *work)
+{
+ struct macb_queue *queue =
+ container_of(work, struct macb_queue, tx_ts_task);
+ unsigned long head, tail;
+ struct gem_tx_ts *tx_ts;
+
+ /* take current head */
+ head = smp_load_acquire(&queue->tx_ts_head);
+ tail = queue->tx_ts_tail;
+
+ while (CIRC_CNT(head, tail, PTP_TS_BUFFER_SIZE)) {
+ tx_ts = &queue->tx_timestamps[tail];
+ gem_tstamp_tx(queue->bp, tx_ts->skb, &tx_ts->desc_ptp);
+ /* cleanup */
+ dev_kfree_skb_any(tx_ts->skb);
+ /* remove old tail */
+ smp_store_release(&queue->tx_ts_tail,
+ (tail + 1) & (PTP_TS_BUFFER_SIZE - 1));
+ tail = queue->tx_ts_tail;
+ }
+}
+
+void gem_ptp_init(struct net_device *dev)
+{
+ struct macb *bp = netdev_priv(dev);
+ struct macb_queue *queue;
+ unsigned int q;
+
+ bp->ptp_clock_info = gem_ptp_caps_template;
+
+ /* nominal frequency and maximum adjustment in ppb */
+ bp->tsu_rate = bp->ptp_info->get_tsu_rate(bp);
+ bp->ptp_clock_info.max_adj = bp->ptp_info->get_ptp_max_adj();
+ gem_ptp_init_timer(bp);
+ bp->ptp_clock = ptp_clock_register(&bp->ptp_clock_info, &dev->dev);
+ if (IS_ERR(bp->ptp_clock)) {
+ pr_err("ptp clock register failed: %ld\n",
+ PTR_ERR(bp->ptp_clock));
+ bp->ptp_clock = NULL;
+ return;
+ } else if (bp->ptp_clock == NULL) {
+ pr_err("ptp clock register failed\n");
+ return;
+ }
+
+ spin_lock_init(&bp->tsu_clk_lock);
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
+ queue->tx_ts_head = 0;
+ queue->tx_ts_tail = 0;
+ INIT_WORK(&queue->tx_ts_task, gem_tx_timestamp_flush);
+ }
+
+ gem_ptp_init_tsu(bp);
+
+ dev_info(&bp->pdev->dev, "%s ptp clock registered.\n",
+ GEM_PTP_TIMER_NAME);
+}
+
+void gem_ptp_remove(struct net_device *ndev)
+{
+ struct macb *bp = netdev_priv(ndev);
+
+ if (bp->ptp_clock)
+ ptp_clock_unregister(bp->ptp_clock);
+
+ gem_ptp_clear_timer(bp);
+
+ dev_info(&bp->pdev->dev, "%s ptp clock unregistered.\n",
+ GEM_PTP_TIMER_NAME);
+}
+
+static int gem_ptp_set_ts_mode(struct macb *bp,
+ enum macb_bd_control tx_bd_control,
+ enum macb_bd_control rx_bd_control)
+{
+ gem_writel(bp, TXBDCTRL, GEM_BF(TXTSMODE, tx_bd_control));
+ gem_writel(bp, RXBDCTRL, GEM_BF(RXTSMODE, rx_bd_control));
+
+ return 0;
+}
+
+int gem_get_hwtst(struct net_device *dev, struct ifreq *rq)
+{
+ struct hwtstamp_config *tstamp_config;
+ struct macb *bp = netdev_priv(dev);
+
+ tstamp_config = &bp->tstamp_config;
+ if ((bp->hw_dma_cap & HW_DMA_CAP_PTP) == 0)
+ return -EOPNOTSUPP;
+
+ if (copy_to_user(rq->ifr_data, tstamp_config, sizeof(*tstamp_config)))
+ return -EFAULT;
+ else
+ return 0;
+}
+
+static int gem_ptp_set_one_step_sync(struct macb *bp, u8 enable)
+{
+ u32 reg_val;
+
+ reg_val = macb_readl(bp, NCR);
+
+ if (enable)
+ macb_writel(bp, NCR, reg_val | MACB_BIT(OSSMODE));
+ else
+ macb_writel(bp, NCR, reg_val & ~MACB_BIT(OSSMODE));
+
+ return 0;
+}
+
+int gem_set_hwtst(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ enum macb_bd_control tx_bd_control = TSTAMP_DISABLED;
+ enum macb_bd_control rx_bd_control = TSTAMP_DISABLED;
+ struct hwtstamp_config *tstamp_config;
+ struct macb *bp = netdev_priv(dev);
+ u32 regval;
+
+ tstamp_config = &bp->tstamp_config;
+ if ((bp->hw_dma_cap & HW_DMA_CAP_PTP) == 0)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(tstamp_config, ifr->ifr_data,
+ sizeof(*tstamp_config)))
+ return -EFAULT;
+
+ /* reserved for future extensions */
+ if (tstamp_config->flags)
+ return -EINVAL;
+
+ switch (tstamp_config->tx_type) {
+ case HWTSTAMP_TX_OFF:
+ break;
+ case HWTSTAMP_TX_ONESTEP_SYNC:
+ if (gem_ptp_set_one_step_sync(bp, 1) != 0)
+ return -ERANGE;
+ case HWTSTAMP_TX_ON:
+ tx_bd_control = TSTAMP_ALL_FRAMES;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (tstamp_config->rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ rx_bd_control = TSTAMP_ALL_PTP_FRAMES;
+ tstamp_config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ regval = macb_readl(bp, NCR);
+ macb_writel(bp, NCR, (regval | MACB_BIT(SRTSM)));
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_ALL:
+ rx_bd_control = TSTAMP_ALL_FRAMES;
+ tstamp_config->rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ default:
+ tstamp_config->rx_filter = HWTSTAMP_FILTER_NONE;
+ return -ERANGE;
+ }
+
+ if (gem_ptp_set_ts_mode(bp, tx_bd_control, rx_bd_control) != 0)
+ return -ERANGE;
+
+ if (copy_to_user(ifr->ifr_data, tstamp_config, sizeof(*tstamp_config)))
+ return -EFAULT;
+ else
+ return 0;
+}
+
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
index 962dcbcef8b5..6081c3132135 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
@@ -493,9 +493,8 @@ static void cn23xx_pf_setup_global_output_regs(struct octeon_device *oct)
for (q_no = srn; q_no < ern; q_no++) {
reg_val = octeon_read_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(q_no));
- /* set IPTR & DPTR */
- reg_val |=
- (CN23XX_PKT_OUTPUT_CTL_IPTR | CN23XX_PKT_OUTPUT_CTL_DPTR);
+ /* set DPTR */
+ reg_val |= CN23XX_PKT_OUTPUT_CTL_DPTR;
/* reset BMODE */
reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_BMODE);
@@ -638,7 +637,7 @@ static void cn23xx_setup_oq_regs(struct octeon_device *oct, u32 oq_no)
octeon_write_csr(oct, CN23XX_SLI_OQ_SIZE(oq_no), droq->max_count);
octeon_write_csr(oct, CN23XX_SLI_OQ_BUFF_INFO_SIZE(oq_no),
- (droq->buffer_size | (OCT_RH_SIZE << 16)));
+ droq->buffer_size);
/* Get the mapped address of the pkt_sent and pkts_credit regs */
droq->pkts_sent_reg =
@@ -1343,8 +1342,7 @@ int validate_cn23xx_pf_config_info(struct octeon_device *oct,
return 1;
}
- if (!(CFG_GET_OQ_INFO_PTR(conf23xx)) ||
- !(CFG_GET_OQ_REFILL_THRESHOLD(conf23xx))) {
+ if (!CFG_GET_OQ_REFILL_THRESHOLD(conf23xx)) {
dev_err(&oct->pci_dev->dev, "%s: Invalid parameter for OQ\n",
__func__);
return 1;
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c
index 20f3d2adf0c2..9338a0008378 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c
@@ -165,9 +165,8 @@ static void cn23xx_vf_setup_global_output_regs(struct octeon_device *oct)
reg_val =
octeon_read_csr(oct, CN23XX_VF_SLI_OQ_PKT_CONTROL(q_no));
- /* set IPTR & DPTR */
- reg_val |=
- (CN23XX_PKT_OUTPUT_CTL_IPTR | CN23XX_PKT_OUTPUT_CTL_DPTR);
+ /* set DPTR */
+ reg_val |= CN23XX_PKT_OUTPUT_CTL_DPTR;
/* reset BMODE */
reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_BMODE);
@@ -249,7 +248,7 @@ static void cn23xx_setup_vf_oq_regs(struct octeon_device *oct, u32 oq_no)
octeon_write_csr(oct, CN23XX_VF_SLI_OQ_SIZE(oq_no), droq->max_count);
octeon_write_csr(oct, CN23XX_VF_SLI_OQ_BUFF_INFO_SIZE(oq_no),
- (droq->buffer_size | (OCT_RH_SIZE << 16)));
+ droq->buffer_size);
/* Get the mapped address of the pkt_sent and pkts_credit regs */
droq->pkts_sent_reg =
diff --git a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c
index bdec051107a6..b28253c96d97 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c
@@ -209,9 +209,6 @@ void lio_cn6xxx_setup_global_output_regs(struct octeon_device *oct)
octeon_write_csr64(oct, CN6XXX_SLI_OQ_WMARK, 0);
}
- /* / Select Info Ptr for length & data */
- octeon_write_csr(oct, CN6XXX_SLI_PKT_IPTR, 0xFFFFFFFF);
-
/* / Select Packet count instead of bytes for SLI_PKTi_CNTS[CNT] */
octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_BMODE, 0);
@@ -314,7 +311,7 @@ void lio_cn6xxx_setup_oq_regs(struct octeon_device *oct, u32 oq_no)
octeon_write_csr(oct, CN6XXX_SLI_OQ_SIZE(oq_no), droq->max_count);
octeon_write_csr(oct, CN6XXX_SLI_OQ_BUFF_INFO_SIZE(oq_no),
- (droq->buffer_size | (OCT_RH_SIZE << 16)));
+ droq->buffer_size);
/* Get the mapped address of the pkt_sent and pkts_credit regs */
droq->pkts_sent_reg =
@@ -734,8 +731,7 @@ int lio_validate_cn6xxx_config_info(struct octeon_device *oct,
__func__);
return 1;
}
- if (!(CFG_GET_OQ_INFO_PTR(conf6xxx)) ||
- !(CFG_GET_OQ_REFILL_THRESHOLD(conf6xxx))) {
+ if (!CFG_GET_OQ_REFILL_THRESHOLD(conf6xxx)) {
dev_err(&oct->pci_dev->dev, "%s: Invalid parameter for OQ\n",
__func__);
return 1;
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c
index 796c2cbc11f6..adde7745d069 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_core.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c
@@ -202,9 +202,13 @@ void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr)
netdev->name);
break;
- case OCTNET_CMD_ENABLE_VLAN_FILTER:
- dev_info(&oct->pci_dev->dev, "%s VLAN filter enabled\n",
- netdev->name);
+ case OCTNET_CMD_VLAN_FILTER_CTL:
+ if (nctrl->ncmd.s.param1)
+ dev_info(&oct->pci_dev->dev,
+ "%s VLAN filter enabled\n", netdev->name);
+ else
+ dev_info(&oct->pci_dev->dev,
+ "%s VLAN filter disabled\n", netdev->name);
break;
case OCTNET_CMD_ADD_VLAN_FILTER:
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index ba012427edd6..51583ae4b1eb 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -3591,6 +3591,10 @@ static netdev_features_t liquidio_fix_features(struct net_device *netdev,
(lio->dev_capability & NETIF_F_LRO))
request &= ~NETIF_F_LRO;
+ if ((request & NETIF_F_HW_VLAN_CTAG_FILTER) &&
+ !(lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER))
+ request &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
+
return request;
}
@@ -3603,14 +3607,14 @@ static int liquidio_set_features(struct net_device *netdev,
{
struct lio *lio = netdev_priv(netdev);
- if (!((netdev->features ^ features) & NETIF_F_LRO))
- return 0;
-
- if ((features & NETIF_F_LRO) && (lio->dev_capability & NETIF_F_LRO))
+ if ((features & NETIF_F_LRO) &&
+ (lio->dev_capability & NETIF_F_LRO) &&
+ !(netdev->features & NETIF_F_LRO))
liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
else if (!(features & NETIF_F_LRO) &&
- (lio->dev_capability & NETIF_F_LRO))
+ (lio->dev_capability & NETIF_F_LRO) &&
+ (netdev->features & NETIF_F_LRO))
liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE,
OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
@@ -3629,6 +3633,17 @@ static int liquidio_set_features(struct net_device *netdev,
liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
OCTNET_CMD_RXCSUM_DISABLE);
+ if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
+ (lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER) &&
+ !(netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
+ liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL,
+ OCTNET_CMD_VLAN_FILTER_ENABLE);
+ else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
+ (lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER) &&
+ (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
+ liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL,
+ OCTNET_CMD_VLAN_FILTER_DISABLE);
+
return 0;
}
@@ -3884,7 +3899,7 @@ static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf)
union oct_link_status *ls;
int i;
- if (recv_pkt->buffer_size[0] != sizeof(*ls)) {
+ if (recv_pkt->buffer_size[0] != (sizeof(*ls) + OCT_DROQ_INFO_SIZE)) {
dev_err(&oct->pci_dev->dev, "Malformed NIC_INFO, len=%d, ifidx=%d\n",
recv_pkt->buffer_size[0],
recv_pkt->rh.r_nic_info.gmxport);
@@ -3892,7 +3907,8 @@ static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf)
}
gmxport = recv_pkt->rh.r_nic_info.gmxport;
- ls = (union oct_link_status *)get_rbd(recv_pkt->buffer_ptr[0]);
+ ls = (union oct_link_status *)(get_rbd(recv_pkt->buffer_ptr[0]) +
+ OCT_DROQ_INFO_SIZE);
octeon_swap_8B_data((u64 *)ls, (sizeof(union oct_link_status)) >> 3);
for (i = 0; i < oct->ifcount; i++) {
@@ -4199,7 +4215,8 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
- liquidio_set_feature(netdev, OCTNET_CMD_ENABLE_VLAN_FILTER, 0);
+ liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL,
+ OCTNET_CMD_VLAN_FILTER_ENABLE);
if ((debug != -1) && (debug & NETIF_MSG_HW))
liquidio_set_feature(netdev,
@@ -4449,7 +4466,7 @@ octeon_recv_vf_drv_notice(struct octeon_recv_info *recv_info, void *buf)
u64 *data, vf_num;
notice = recv_pkt->rh.r.ossp;
- data = (u64 *)get_rbd(recv_pkt->buffer_ptr[0]);
+ data = (u64 *)(get_rbd(recv_pkt->buffer_ptr[0]) + OCT_DROQ_INFO_SIZE);
/* the first 64-bit word of data is the vf_num */
vf_num = data[0];
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
index 1f7032614ae5..9b247102eb92 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
@@ -2718,7 +2718,7 @@ static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf)
int gmxport = 0;
int i;
- if (recv_pkt->buffer_size[0] != sizeof(*ls)) {
+ if (recv_pkt->buffer_size[0] != (sizeof(*ls) + OCT_DROQ_INFO_SIZE)) {
dev_err(&oct->pci_dev->dev, "Malformed NIC_INFO, len=%d, ifidx=%d\n",
recv_pkt->buffer_size[0],
recv_pkt->rh.r_nic_info.gmxport);
@@ -2726,7 +2726,8 @@ static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf)
}
gmxport = recv_pkt->rh.r_nic_info.gmxport;
- ls = (union oct_link_status *)get_rbd(recv_pkt->buffer_ptr[0]);
+ ls = (union oct_link_status *)(get_rbd(recv_pkt->buffer_ptr[0]) +
+ OCT_DROQ_INFO_SIZE);
octeon_swap_8B_data((u64 *)ls, (sizeof(union oct_link_status)) >> 3);
diff --git a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
index 8ea2323d8d67..231dd7fbfb80 100644
--- a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
+++ b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
@@ -173,6 +173,8 @@ static inline void add_sg_size(struct octeon_sg_entry *sg_entry,
/*------------------------- End Scatter/Gather ---------------------------*/
+#define OCTNET_FRM_LENGTH_SIZE 8
+
#define OCTNET_FRM_PTP_HEADER_SIZE 8
#define OCTNET_FRM_HEADER_SIZE 22 /* VLAN + Ethernet */
@@ -215,7 +217,7 @@ static inline void add_sg_size(struct octeon_sg_entry *sg_entry,
#define OCTNET_CMD_VERBOSE_ENABLE 0x14
#define OCTNET_CMD_VERBOSE_DISABLE 0x15
-#define OCTNET_CMD_ENABLE_VLAN_FILTER 0x16
+#define OCTNET_CMD_VLAN_FILTER_CTL 0x16
#define OCTNET_CMD_ADD_VLAN_FILTER 0x17
#define OCTNET_CMD_DEL_VLAN_FILTER 0x18
#define OCTNET_CMD_VXLAN_PORT_CONFIG 0x19
@@ -230,6 +232,8 @@ static inline void add_sg_size(struct octeon_sg_entry *sg_entry,
#define OCTNET_CMD_RXCSUM_DISABLE 0x1
#define OCTNET_CMD_TXCSUM_ENABLE 0x0
#define OCTNET_CMD_TXCSUM_DISABLE 0x1
+#define OCTNET_CMD_VLAN_FILTER_ENABLE 0x1
+#define OCTNET_CMD_VLAN_FILTER_DISABLE 0x0
/* RX(packets coming from wire) Checksum verification flags */
/* TCP/UDP csum */
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_config.h b/drivers/net/ethernet/cavium/liquidio/octeon_config.h
index d29ebc531151..f229d792c2b3 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_config.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_config.h
@@ -47,7 +47,7 @@
/* CN6xxx OQ configuration macros */
#define CN6XXX_MAX_OUTPUT_QUEUES 32
#define CN6XXX_MAX_OQ_DESCRIPTORS 2048
-#define CN6XXX_OQ_BUF_SIZE 1536
+#define CN6XXX_OQ_BUF_SIZE 1664
#define CN6XXX_OQ_PKTSPER_INTR ((CN6XXX_MAX_OQ_DESCRIPTORS < 512) ? \
(CN6XXX_MAX_OQ_DESCRIPTORS / 4) : 128)
#define CN6XXX_OQ_REFIL_THRESHOLD ((CN6XXX_MAX_OQ_DESCRIPTORS < 512) ? \
@@ -78,7 +78,7 @@
#define CN23XX_MAX_OUTPUT_QUEUES CN23XX_MAX_RINGS_PER_PF
#define CN23XX_MAX_OQ_DESCRIPTORS 512
-#define CN23XX_OQ_BUF_SIZE 1536
+#define CN23XX_OQ_BUF_SIZE 1664
#define CN23XX_OQ_PKTSPER_INTR 128
/*#define CAVIUM_ONLY_CN23XX_RX_PERF*/
#define CN23XX_OQ_REFIL_THRESHOLD 16
@@ -98,8 +98,6 @@
#define OCTEON_32BYTE_INSTR 32
#define OCTEON_64BYTE_INSTR 64
#define OCTEON_MAX_BASE_IOQ 4
-#define OCTEON_OQ_BUFPTR_MODE 0
-#define OCTEON_OQ_INFOPTR_MODE 1
#define OCTEON_DMA_INTR_PKT 64
#define OCTEON_DMA_INTR_TIME 1000
@@ -125,7 +123,6 @@
#define CFG_SET_IQ_INTR_PKT(cfg, val) (cfg)->iq.iq_intr_pkt = val
#define CFG_GET_OQ_MAX_Q(cfg) ((cfg)->oq.max_oqs)
-#define CFG_GET_OQ_INFO_PTR(cfg) ((cfg)->oq.info_ptr)
#define CFG_GET_OQ_PKTS_PER_INTR(cfg) ((cfg)->oq.pkts_per_intr)
#define CFG_GET_OQ_REFILL_THRESHOLD(cfg) ((cfg)->oq.refill_threshold)
#define CFG_GET_OQ_INTR_PKT(cfg) ((cfg)->oq.oq_intr_pkt)
@@ -266,9 +263,6 @@ struct octeon_oq_config {
*/
u64 refill_threshold:16;
- /** If set, the Output queue uses info-pointer mode. (Default: 1) */
- u64 info_ptr:32;
-
/* Max number of OQs available */
u64 max_oqs:8;
@@ -276,9 +270,6 @@ struct octeon_oq_config {
/* Max number of OQs available */
u64 max_oqs:8;
- /** If set, the Output queue uses info-pointer mode. (Default: 1) */
- u64 info_ptr:32;
-
/** The number of buffers that were consumed during packet processing by
* the driver on this Output queue before the driver attempts to
* replenish
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_console.c b/drivers/net/ethernet/cavium/liquidio/octeon_console.c
index 53f38d05f7c2..e08f7600f986 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_console.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_console.c
@@ -724,13 +724,11 @@ static int octeon_console_read(struct octeon_device *oct, u32 console_num,
}
#define FBUF_SIZE (4 * 1024 * 1024)
-u8 fbuf[FBUF_SIZE];
int octeon_download_firmware(struct octeon_device *oct, const u8 *data,
size_t size)
{
int ret = 0;
- u8 *p = fbuf;
u32 crc32_result;
u64 load_addr;
u32 image_len;
@@ -805,10 +803,8 @@ int octeon_download_firmware(struct octeon_device *oct, const u8 *data,
else
size = FBUF_SIZE;
- memcpy(p, data, size);
-
/* download the image */
- octeon_pci_write_core_mem(oct, load_addr, p, (u32)size);
+ octeon_pci_write_core_mem(oct, load_addr, data, (u32)size);
data += size;
rem -= (u32)size;
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
index 3b7cc9320deb..623e28ca736e 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
@@ -51,7 +51,6 @@ static struct octeon_config default_cn66xx_conf = {
/** OQ attributes */
.oq = {
.max_oqs = CN6XXX_CFG_IO_QUEUES,
- .info_ptr = OCTEON_OQ_INFOPTR_MODE,
.refill_threshold = CN6XXX_OQ_REFIL_THRESHOLD,
.oq_intr_pkt = CN6XXX_OQ_INTR_PKT,
.oq_intr_time = CN6XXX_OQ_INTR_TIME,
@@ -161,7 +160,6 @@ static struct octeon_config default_cn68xx_conf = {
/** OQ attributes */
.oq = {
.max_oqs = CN6XXX_CFG_IO_QUEUES,
- .info_ptr = OCTEON_OQ_INFOPTR_MODE,
.refill_threshold = CN6XXX_OQ_REFIL_THRESHOLD,
.oq_intr_pkt = CN6XXX_OQ_INTR_PKT,
.oq_intr_time = CN6XXX_OQ_INTR_TIME,
@@ -328,7 +326,6 @@ static struct octeon_config default_cn68xx_210nv_conf = {
/** OQ attributes */
.oq = {
.max_oqs = CN6XXX_CFG_IO_QUEUES,
- .info_ptr = OCTEON_OQ_INFOPTR_MODE,
.refill_threshold = CN6XXX_OQ_REFIL_THRESHOLD,
.oq_intr_pkt = CN6XXX_OQ_INTR_PKT,
.oq_intr_time = CN6XXX_OQ_INTR_TIME,
@@ -432,7 +429,6 @@ static struct octeon_config default_cn23xx_conf = {
/** OQ attributes */
.oq = {
.max_oqs = CN23XX_CFG_IO_QUEUES,
- .info_ptr = OCTEON_OQ_INFOPTR_MODE,
.pkts_per_intr = CN23XX_OQ_PKTSPER_INTR,
.refill_threshold = CN23XX_OQ_REFIL_THRESHOLD,
.oq_intr_pkt = CN23XX_OQ_INTR_PKT,
@@ -1236,13 +1232,15 @@ int octeon_core_drv_init(struct octeon_recv_info *recv_info, void *buf)
cs = &core_setup[oct->octeon_id];
- if (recv_pkt->buffer_size[0] != sizeof(*cs)) {
+ if (recv_pkt->buffer_size[0] != (sizeof(*cs) + OCT_DROQ_INFO_SIZE)) {
dev_dbg(&oct->pci_dev->dev, "Core setup bytes expected %u found %d\n",
(u32)sizeof(*cs),
recv_pkt->buffer_size[0]);
}
- memcpy(cs, get_rbd(recv_pkt->buffer_ptr[0]), sizeof(*cs));
+ memcpy(cs, get_rbd(
+ recv_pkt->buffer_ptr[0]) + OCT_DROQ_INFO_SIZE, sizeof(*cs));
+
strncpy(oct->boardinfo.name, cs->boardname, OCT_BOARD_NAME);
strncpy(oct->boardinfo.serial_number, cs->board_serial_number,
OCT_SERIAL_LEN);
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
index d3a6a1c28053..2e190deb2233 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
@@ -181,10 +181,7 @@ octeon_droq_setup_ring_buffers(struct octeon_device *oct,
droq->recv_buf_list[i].buffer = buf;
droq->recv_buf_list[i].data = get_rbd(buf);
- droq->info_list[i].length = 0;
-
- /* map ring buffers into memory */
- desc_ring[i].info_ptr = lio_map_ring_info(droq, i);
+ desc_ring[i].info_ptr = 0;
desc_ring[i].buffer_ptr =
lio_map_ring(droq->recv_buf_list[i].buffer);
}
@@ -205,9 +202,6 @@ int octeon_delete_droq(struct octeon_device *oct, u32 q_no)
octeon_droq_destroy_ring_buffers(oct, droq);
vfree(droq->recv_buf_list);
- if (droq->info_base_addr)
- lio_free_info_buffer(oct, droq);
-
if (droq->desc_ring)
lio_dma_free(oct, (droq->max_count * OCT_DROQ_DESC_SIZE),
droq->desc_ring, droq->desc_ring_dma);
@@ -280,14 +274,6 @@ int octeon_init_droq(struct octeon_device *oct,
dev_dbg(&oct->pci_dev->dev, "droq[%d]: num_desc: %d\n", q_no,
droq->max_count);
- droq->info_list = lio_alloc_info_buffer(oct, droq);
- if (!droq->info_list) {
- dev_err(&oct->pci_dev->dev, "Cannot allocate memory for info list.\n");
- lio_dma_free(oct, (droq->max_count * OCT_DROQ_DESC_SIZE),
- droq->desc_ring, droq->desc_ring_dma);
- return 1;
- }
-
droq->recv_buf_list = (struct octeon_recv_buffer *)
vmalloc_node(droq->max_count *
OCT_DROQ_RECVBUF_SIZE,
@@ -357,7 +343,7 @@ static inline struct octeon_recv_info *octeon_create_recv_info(
u32 i, bytes_left;
struct octeon_skb_page_info *pg_info;
- info = &droq->info_list[idx];
+ info = (struct octeon_droq_info *)droq->recv_buf_list[idx].data;
recv_info = octeon_alloc_recv_info(sizeof(struct __dispatch));
if (!recv_info)
@@ -491,8 +477,6 @@ octeon_droq_refill(struct octeon_device *octeon_dev, struct octeon_droq *droq)
desc_ring[droq->refill_idx].buffer_ptr =
lio_map_ring(droq->recv_buf_list[
droq->refill_idx].buffer);
- /* Reset any previous values in the length field. */
- droq->info_list[droq->refill_idx].length = 0;
droq->refill_idx = incr_index(droq->refill_idx, 1,
droq->max_count);
@@ -541,11 +525,7 @@ void octeon_droq_check_oom(struct octeon_droq *droq)
static inline u32
octeon_droq_get_bufcount(u32 buf_size, u32 total_len)
{
- u32 buf_cnt = 0;
-
- while (total_len > (buf_size * buf_cnt))
- buf_cnt++;
- return buf_cnt;
+ return ((total_len + buf_size - 1) / buf_size);
}
static int
@@ -593,11 +573,12 @@ static inline void octeon_droq_drop_packets(struct octeon_device *oct,
struct octeon_droq_info *info;
for (i = 0; i < cnt; i++) {
- info = &droq->info_list[droq->read_idx];
+ info = (struct octeon_droq_info *)
+ droq->recv_buf_list[droq->read_idx].data;
octeon_swap_8B_data((u64 *)info, 2);
if (info->length) {
- info->length -= OCT_RH_SIZE;
+ info->length += OCTNET_FRM_LENGTH_SIZE;
droq->stats.bytes_received += info->length;
buf_cnt = octeon_droq_get_bufcount(droq->buffer_size,
(u32)info->length);
@@ -629,7 +610,8 @@ octeon_droq_fast_process_packets(struct octeon_device *oct,
struct octeon_skb_page_info *pg_info;
void *buf;
- info = &droq->info_list[droq->read_idx];
+ info = (struct octeon_droq_info *)
+ droq->recv_buf_list[droq->read_idx].data;
octeon_swap_8B_data((u64 *)info, 2);
if (!info->length) {
@@ -643,9 +625,10 @@ octeon_droq_fast_process_packets(struct octeon_device *oct,
}
/* Len of resp hdr in included in the received data len. */
- info->length -= OCT_RH_SIZE;
rh = &info->rh;
+ info->length += OCTNET_FRM_LENGTH_SIZE;
+ rh->r_dh.len += (ROUNDUP8(OCT_DROQ_INFO_SIZE) / sizeof(u64));
total_len += (u32)info->length;
if (opcode_slow_path(rh)) {
u32 buf_cnt;
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.h b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h
index 9781577115e7..6efd139b894d 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h
@@ -51,11 +51,11 @@ struct octeon_droq_desc {
* about the packet.
*/
struct octeon_droq_info {
- /** The Output Receive Header. */
- union octeon_rh rh;
-
/** The Length of the packet. */
u64 length;
+
+ /** The Output Receive Header. */
+ union octeon_rh rh;
};
#define OCT_DROQ_INFO_SIZE (sizeof(struct octeon_droq_info))
@@ -294,9 +294,6 @@ struct octeon_droq {
*/
u32 max_empty_descs;
- /** The 8B aligned info ptrs begin from this address. */
- struct octeon_droq_info *info_list;
-
/** The receive buffer list. This list has the virtual addresses of the
* buffers.
*/
@@ -324,15 +321,6 @@ struct octeon_droq {
/** DMA mapped address of the DROQ descriptor ring. */
size_t desc_ring_dma;
- /** Info ptr list are allocated at this virtual address. */
- void *info_base_addr;
-
- /** DMA mapped address of the info list */
- dma_addr_t info_list_dma;
-
- /** Allocated size of info list. */
- u32 info_alloc_size;
-
/** application context */
void *app_ctx;
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c b/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c
index 5cd96e7d426c..4c85ae643b7b 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c
@@ -167,10 +167,10 @@ octeon_pci_read_core_mem(struct octeon_device *oct,
void
octeon_pci_write_core_mem(struct octeon_device *oct,
u64 coreaddr,
- u8 *buf,
+ const u8 *buf,
u32 len)
{
- __octeon_pci_rw_core_mem(oct, coreaddr, buf, len, 0);
+ __octeon_pci_rw_core_mem(oct, coreaddr, (u8 *)buf, len, 0);
}
u64 octeon_read_device_mem64(struct octeon_device *oct, u64 coreaddr)
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.h b/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.h
index bae2fdd89503..47a3ff5f9b1e 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.h
@@ -66,7 +66,7 @@ octeon_pci_read_core_mem(struct octeon_device *oct,
void
octeon_pci_write_core_mem(struct octeon_device *oct,
u64 coreaddr,
- u8 *buf,
+ const u8 *buf,
u32 len);
#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_network.h b/drivers/net/ethernet/cavium/liquidio/octeon_network.h
index 4b3507972243..ec8504b2942d 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_network.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_network.h
@@ -356,29 +356,6 @@ static inline void tx_buffer_free(void *buffer)
#define lio_dma_free(oct, size, virt_addr, dma_addr) \
dma_free_coherent(&(oct)->pci_dev->dev, size, virt_addr, dma_addr)
-static inline void *
-lio_alloc_info_buffer(struct octeon_device *oct,
- struct octeon_droq *droq)
-{
- void *virt_ptr;
-
- virt_ptr = lio_dma_alloc(oct, (droq->max_count * OCT_DROQ_INFO_SIZE),
- &droq->info_list_dma);
- if (virt_ptr) {
- droq->info_alloc_size = droq->max_count * OCT_DROQ_INFO_SIZE;
- droq->info_base_addr = virt_ptr;
- }
-
- return virt_ptr;
-}
-
-static inline void lio_free_info_buffer(struct octeon_device *oct,
- struct octeon_droq *droq)
-{
- lio_dma_free(oct, droq->info_alloc_size, droq->info_base_addr,
- droq->info_list_dma);
-}
-
static inline
void *get_rbd(struct sk_buff *skb)
{
@@ -392,12 +369,6 @@ void *get_rbd(struct sk_buff *skb)
}
static inline u64
-lio_map_ring_info(struct octeon_droq *droq, u32 i)
-{
- return droq->info_list_dma + (i * sizeof(struct octeon_droq_info));
-}
-
-static inline u64
lio_map_ring(void *buf)
{
dma_addr_t dma_addr;
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 573755b0a51b..49b80da51ba7 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -227,15 +227,14 @@ static void nicvf_handle_mbx_intr(struct nicvf *nic)
nic->speed = mbx.link_status.speed;
nic->mac_type = mbx.link_status.mac_type;
if (nic->link_up) {
- netdev_info(nic->netdev, "%s: Link is Up %d Mbps %s\n",
- nic->netdev->name, nic->speed,
+ netdev_info(nic->netdev, "Link is Up %d Mbps %s duplex\n",
+ nic->speed,
nic->duplex == DUPLEX_FULL ?
- "Full duplex" : "Half duplex");
+ "Full" : "Half");
netif_carrier_on(nic->netdev);
netif_tx_start_all_queues(nic->netdev);
} else {
- netdev_info(nic->netdev, "%s: Link is Down\n",
- nic->netdev->name);
+ netdev_info(nic->netdev, "Link is Down\n");
netif_carrier_off(nic->netdev);
netif_tx_stop_all_queues(nic->netdev);
}
@@ -721,8 +720,7 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev,
return;
if (netif_msg_pktdata(nic)) {
- netdev_info(nic->netdev, "%s: skb 0x%p, len=%d\n", netdev->name,
- skb, skb->len);
+ netdev_info(nic->netdev, "skb 0x%p, len=%d\n", skb, skb->len);
print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1,
skb->data, skb->len, true);
}
@@ -854,10 +852,8 @@ done:
netif_tx_wake_queue(txq);
nic = nic->pnicvf;
this_cpu_inc(nic->drv_stats->txq_wake);
- if (netif_msg_tx_err(nic))
- netdev_warn(netdev,
- "%s: Transmit queue wakeup SQ%d\n",
- netdev->name, txq_idx);
+ netif_warn(nic, tx_err, netdev,
+ "Transmit queue wakeup SQ%d\n", txq_idx);
}
}
@@ -928,9 +924,8 @@ static void nicvf_handle_qs_err(unsigned long data)
static void nicvf_dump_intr_status(struct nicvf *nic)
{
- if (netif_msg_intr(nic))
- netdev_info(nic->netdev, "%s: interrupt status 0x%llx\n",
- nic->netdev->name, nicvf_reg_read(nic, NIC_VF_INT));
+ netif_info(nic, intr, nic->netdev, "interrupt status 0x%llx\n",
+ nicvf_reg_read(nic, NIC_VF_INT));
}
static irqreturn_t nicvf_misc_intr_handler(int irq, void *nicvf_irq)
@@ -1212,10 +1207,8 @@ static netdev_tx_t nicvf_xmit(struct sk_buff *skb, struct net_device *netdev)
netif_tx_wake_queue(txq);
} else {
this_cpu_inc(nic->drv_stats->txq_stop);
- if (netif_msg_tx_err(nic))
- netdev_warn(netdev,
- "%s: Transmit ring full, stopping SQ%d\n",
- netdev->name, qid);
+ netif_warn(nic, tx_err, netdev,
+ "Transmit ring full, stopping SQ%d\n", qid);
}
return NETDEV_TX_BUSY;
}
@@ -1600,9 +1593,7 @@ static void nicvf_tx_timeout(struct net_device *dev)
{
struct nicvf *nic = netdev_priv(dev);
- if (netif_msg_tx_err(nic))
- netdev_warn(dev, "%s: Transmit timed out, resetting\n",
- dev->name);
+ netif_warn(nic, tx_err, dev, "Transmit timed out, resetting\n");
this_cpu_inc(nic->drv_stats->tx_timeout);
schedule_work(&nic->reset_task);
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index 2b181762ad49..d4496e9afcdf 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -1811,11 +1811,9 @@ void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx)
/* Check for errors in the receive cmp.queue entry */
int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
{
- if (netif_msg_rx_err(nic))
- netdev_err(nic->netdev,
- "%s: RX error CQE err_level 0x%x err_opcode 0x%x\n",
- nic->netdev->name,
- cqe_rx->err_level, cqe_rx->err_opcode);
+ netif_err(nic, rx_err, nic->netdev,
+ "RX error CQE err_level 0x%x err_opcode 0x%x\n",
+ cqe_rx->err_level, cqe_rx->err_opcode);
switch (cqe_rx->err_opcode) {
case CQ_RX_ERROP_RE_PARTIAL:
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index e1a50c87c9a9..0bc6a4ffce30 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -471,8 +471,7 @@ static int init_tp_parity(struct adapter *adap)
if (!skb)
goto alloc_skb_fail;
- req = __skb_put(skb, sizeof(*req));
- memset(req, 0, sizeof(*req));
+ req = __skb_put_zero(skb, sizeof(*req));
req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
req->mtu_idx = NMTUS - 1;
@@ -495,8 +494,7 @@ static int init_tp_parity(struct adapter *adap)
if (!skb)
goto alloc_skb_fail;
- req = __skb_put(skb, sizeof(*req));
- memset(req, 0, sizeof(*req));
+ req = __skb_put_zero(skb, sizeof(*req));
req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
req->params = htonl(V_L2T_W_IDX(i));
@@ -518,8 +516,7 @@ static int init_tp_parity(struct adapter *adap)
if (!skb)
goto alloc_skb_fail;
- req = __skb_put(skb, sizeof(*req));
- memset(req, 0, sizeof(*req));
+ req = __skb_put_zero(skb, sizeof(*req));
req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
req->l2t_idx = htonl(V_L2T_W_IDX(i));
@@ -538,8 +535,7 @@ static int init_tp_parity(struct adapter *adap)
if (!skb)
goto alloc_skb_fail;
- greq = __skb_put(skb, sizeof(*greq));
- memset(greq, 0, sizeof(*greq));
+ greq = __skb_put_zero(skb, sizeof(*greq));
greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
greq->mask = cpu_to_be64(1);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
index 1b9d154f1149..e2d342647b19 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
@@ -2282,7 +2282,7 @@ static int process_responses(struct adapter *adap, struct sge_qset *qs,
if (!skb)
goto no_mem;
- memcpy(__skb_put(skb, AN_PKT_SIZE), r, AN_PKT_SIZE);
+ __skb_put_data(skb, r, AN_PKT_SIZE);
skb->data[0] = CPL_ASYNC_NOTIF;
rss_hi = htonl(CPL_ASYNC_NOTIF << 24);
q->async_notif++;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index b7a92ebab3cf..451c138ff6ea 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -362,6 +362,11 @@ struct adapter_params {
unsigned int max_ordird_qp; /* Max read depth per RDMA QP */
unsigned int max_ird_adapter; /* Max read depth per adapter */
bool fr_nsmr_tpte_wr_support; /* FW support for FR_NSMR_TPTE_WR */
+
+ /* MPS Buffer Group Map[per Port]. Bit i is set if buffer group i is
+ * used by the Port
+ */
+ u8 mps_bg_map[MAX_NPORTS]; /* MPS Buffer Group Map */
};
/* State needed to monitor the forward progress of SGE Ingress DMA activities
@@ -1434,7 +1439,8 @@ void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
u32 t4_read_rss_pf_map(struct adapter *adapter);
u32 t4_read_rss_pf_mask(struct adapter *adapter);
-unsigned int t4_get_mps_bg_map(struct adapter *adapter, int idx);
+unsigned int t4_get_mps_bg_map(struct adapter *adapter, int pidx);
+unsigned int t4_get_tp_ch_map(struct adapter *adapter, int pidx);
void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]);
void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]);
int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data,
@@ -1494,9 +1500,12 @@ int t4_fw_initialize(struct adapter *adap, unsigned int mbox);
int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int vf, unsigned int nparams, const u32 *params,
u32 *val);
+int t4_query_params_ns(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int nparams, const u32 *params,
+ u32 *val);
int t4_query_params_rw(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int vf, unsigned int nparams, const u32 *params,
- u32 *val, int rw);
+ u32 *val, int rw, bool sleep_ok);
int t4_set_params_timeout(struct adapter *adap, unsigned int mbox,
unsigned int pf, unsigned int vf,
unsigned int nparams, const u32 *params,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
index a0fab65e80e8..45b5853ca2f1 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
@@ -231,8 +231,7 @@ int set_filter_wr(struct adapter *adapter, int fidx)
}
}
- fwr = __skb_put(skb, sizeof(*fwr));
- memset(fwr, 0, sizeof(*fwr));
+ fwr = __skb_put_zero(skb, sizeof(*fwr));
/* It would be nice to put most of the following in t4_hw.c but most
* of the work is translating the cxgbtool ch_filter_specification
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 15fb284eafc0..f41507e040da 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -824,9 +824,12 @@ static int setup_sge_queues(struct adapter *adap)
{
int err, i, j;
struct sge *s = &adap->sge;
- struct sge_uld_rxq_info *rxq_info = s->uld_rxq_info[CXGB4_ULD_RDMA];
+ struct sge_uld_rxq_info *rxq_info = NULL;
unsigned int cmplqid = 0;
+ if (is_uld(adap))
+ rxq_info = s->uld_rxq_info[CXGB4_ULD_RDMA];
+
for_each_port(adap, i) {
struct net_device *dev = adap->port[i];
struct port_info *pi = netdev_priv(dev);
@@ -840,8 +843,8 @@ static int setup_sge_queues(struct adapter *adap)
adap->msi_idx, &q->fl,
t4_ethrx_handler,
NULL,
- t4_get_mps_bg_map(adap,
- pi->tx_chan));
+ t4_get_tp_ch_map(adap,
+ pi->tx_chan));
if (err)
goto freeout;
q->rspq.idx = j;
@@ -2187,9 +2190,10 @@ static int cxgb_up(struct adapter *adap)
{
int err;
+ mutex_lock(&uld_mutex);
err = setup_sge_queues(adap);
if (err)
- goto out;
+ goto rel_lock;
err = setup_rss(adap);
if (err)
goto freeq;
@@ -2213,7 +2217,6 @@ static int cxgb_up(struct adapter *adap)
goto irq_err;
}
- mutex_lock(&uld_mutex);
enable_rx(adap);
t4_sge_start(adap);
t4_intr_enable(adap);
@@ -2226,13 +2229,15 @@ static int cxgb_up(struct adapter *adap)
#endif
/* Initialize hash mac addr list*/
INIT_LIST_HEAD(&adap->mac_hlist);
- out:
return err;
+
irq_err:
dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
freeq:
t4_free_sge_resources(adap);
- goto out;
+ rel_lock:
+ mutex_unlock(&uld_mutex);
+ return err;
}
static void cxgb_down(struct adapter *adapter)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index d5e316d5481e..82bf7aac6cdb 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -3540,7 +3540,7 @@ int t4_load_phy_fw(struct adapter *adap,
FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD));
val = phy_fw_size;
ret = t4_query_params_rw(adap, adap->mbox, adap->pf, 0, 1,
- &param, &val, 1);
+ &param, &val, 1, true);
if (ret < 0)
return ret;
mtype = val >> 8;
@@ -5440,30 +5440,155 @@ void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
}
/**
- * t4_get_mps_bg_map - return the buffer groups associated with a port
+ * compute_mps_bg_map - compute the MPS Buffer Group Map for a Port
* @adap: the adapter
- * @idx: the port index
+ * @pidx: the port index
+ *
+ * Computes and returns a bitmap indicating which MPS buffer groups are
+ * associated with the given Port. Bit i is set if buffer group i is
+ * used by the Port.
+ */
+static inline unsigned int compute_mps_bg_map(struct adapter *adapter,
+ int pidx)
+{
+ unsigned int chip_version, nports;
+
+ chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
+ nports = 1 << NUMPORTS_G(t4_read_reg(adapter, MPS_CMN_CTL_A));
+
+ switch (chip_version) {
+ case CHELSIO_T4:
+ case CHELSIO_T5:
+ switch (nports) {
+ case 1: return 0xf;
+ case 2: return 3 << (2 * pidx);
+ case 4: return 1 << pidx;
+ }
+ break;
+
+ case CHELSIO_T6:
+ switch (nports) {
+ case 2: return 1 << (2 * pidx);
+ }
+ break;
+ }
+
+ dev_err(adapter->pdev_dev, "Need MPS Buffer Group Map for Chip %0x, Nports %d\n",
+ chip_version, nports);
+
+ return 0;
+}
+
+/**
+ * t4_get_mps_bg_map - return the buffer groups associated with a port
+ * @adapter: the adapter
+ * @pidx: the port index
*
* Returns a bitmap indicating which MPS buffer groups are associated
- * with the given port. Bit i is set if buffer group i is used by the
- * port.
+ * with the given Port. Bit i is set if buffer group i is used by the
+ * Port.
*/
-unsigned int t4_get_mps_bg_map(struct adapter *adap, int idx)
+unsigned int t4_get_mps_bg_map(struct adapter *adapter, int pidx)
{
- u32 n = NUMPORTS_G(t4_read_reg(adap, MPS_CMN_CTL_A));
+ u8 *mps_bg_map;
+ unsigned int nports;
+
+ nports = 1 << NUMPORTS_G(t4_read_reg(adapter, MPS_CMN_CTL_A));
+ if (pidx >= nports) {
+ CH_WARN(adapter, "MPS Port Index %d >= Nports %d\n",
+ pidx, nports);
+ return 0;
+ }
+
+ /* If we've already retrieved/computed this, just return the result.
+ */
+ mps_bg_map = adapter->params.mps_bg_map;
+ if (mps_bg_map[pidx])
+ return mps_bg_map[pidx];
+
+ /* Newer Firmware can tell us what the MPS Buffer Group Map is.
+ * If we're talking to such Firmware, let it tell us. If the new
+ * API isn't supported, revert back to old hardcoded way. The value
+ * obtained from Firmware is encoded in below format:
+ *
+ * val = (( MPSBGMAP[Port 3] << 24 ) |
+ * ( MPSBGMAP[Port 2] << 16 ) |
+ * ( MPSBGMAP[Port 1] << 8 ) |
+ * ( MPSBGMAP[Port 0] << 0 ))
+ */
+ if (adapter->flags & FW_OK) {
+ u32 param, val;
+ int ret;
+
+ param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_MPSBGMAP));
+ ret = t4_query_params_ns(adapter, adapter->mbox, adapter->pf,
+ 0, 1, &param, &val);
+ if (!ret) {
+ int p;
+
+ /* Store the BG Map for all of the Ports in order to
+ * avoid more calls to the Firmware in the future.
+ */
+ for (p = 0; p < MAX_NPORTS; p++, val >>= 8)
+ mps_bg_map[p] = val & 0xff;
- if (n == 0)
- return idx == 0 ? 0xf : 0;
- /* In T6 (which is a 2 port card),
- * port 0 is mapped to channel 0 and port 1 is mapped to channel 1.
- * For 2 port T4/T5 adapter,
- * port 0 is mapped to channel 0 and 1,
- * port 1 is mapped to channel 2 and 3.
+ return mps_bg_map[pidx];
+ }
+ }
+
+ /* Either we're not talking to the Firmware or we're dealing with
+ * older Firmware which doesn't support the new API to get the MPS
+ * Buffer Group Map. Fall back to computing it ourselves.
*/
- if ((n == 1) &&
- (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5))
- return idx < 2 ? (3 << (2 * idx)) : 0;
- return 1 << idx;
+ mps_bg_map[pidx] = compute_mps_bg_map(adapter, pidx);
+ return mps_bg_map[pidx];
+}
+
+/**
+ * t4_get_tp_ch_map - return TP ingress channels associated with a port
+ * @adapter: the adapter
+ * @pidx: the port index
+ *
+ * Returns a bitmap indicating which TP Ingress Channels are associated
+ * with a given Port. Bit i is set if TP Ingress Channel i is used by
+ * the Port.
+ */
+unsigned int t4_get_tp_ch_map(struct adapter *adap, int pidx)
+{
+ unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
+ unsigned int nports = 1 << NUMPORTS_G(t4_read_reg(adap, MPS_CMN_CTL_A));
+
+ if (pidx >= nports) {
+ dev_warn(adap->pdev_dev, "TP Port Index %d >= Nports %d\n",
+ pidx, nports);
+ return 0;
+ }
+
+ switch (chip_version) {
+ case CHELSIO_T4:
+ case CHELSIO_T5:
+ /* Note that this happens to be the same values as the MPS
+ * Buffer Group Map for these Chips. But we replicate the code
+ * here because they're really separate concepts.
+ */
+ switch (nports) {
+ case 1: return 0xf;
+ case 2: return 3 << (2 * pidx);
+ case 4: return 1 << pidx;
+ }
+ break;
+
+ case CHELSIO_T6:
+ switch (nports) {
+ case 2: return 1 << pidx;
+ }
+ break;
+ }
+
+ dev_err(adap->pdev_dev, "Need TP Channel Map for Chip %0x, Nports %d\n",
+ chip_version, nports);
+ return 0;
}
/**
@@ -6578,13 +6703,14 @@ int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
* @params: the parameter names
* @val: the parameter values
* @rw: Write and read flag
+ * @sleep_ok: if true, we may sleep awaiting mbox cmd completion
*
* Reads the value of FW or device parameters. Up to 7 parameters can be
* queried at once.
*/
int t4_query_params_rw(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int vf, unsigned int nparams, const u32 *params,
- u32 *val, int rw)
+ u32 *val, int rw, bool sleep_ok)
{
int i, ret;
struct fw_params_cmd c;
@@ -6607,7 +6733,7 @@ int t4_query_params_rw(struct adapter *adap, unsigned int mbox, unsigned int pf,
p++;
}
- ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
+ ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
if (ret == 0)
for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
*val++ = be32_to_cpu(*p);
@@ -6618,7 +6744,16 @@ int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int vf, unsigned int nparams, const u32 *params,
u32 *val)
{
- return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0);
+ return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0,
+ true);
+}
+
+int t4_query_params_ns(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int nparams, const u32 *params,
+ u32 *val)
+{
+ return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0,
+ false);
}
/**
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
index be7041f6cf71..99987d8e437e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
@@ -192,6 +192,9 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
CH_PCI_ID_TABLE_FENTRY(0x6015),
CH_PCI_ID_TABLE_FENTRY(0x6080),
CH_PCI_ID_TABLE_FENTRY(0x6081),
+ CH_PCI_ID_TABLE_FENTRY(0x6082), /* Custom T6225-CR SFP28 */
+ CH_PCI_ID_TABLE_FENTRY(0x6083), /* Custom T62100-CR QSFP28 */
+ CH_PCI_ID_TABLE_FENTRY(0x6084), /* Custom T64100-CR QSFP28 */
CH_PCI_DEVICE_ID_TABLE_DEFINE_END;
#endif /* __T4_PCI_ID_TBL_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index f47461aa658b..f2ffc1f6b8be 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -1123,6 +1123,7 @@ enum fw_params_param_dev {
FW_PARAMS_PARAM_DEV_ULPTX_MEMWRITE_DSGL = 0x17,
FW_PARAMS_PARAM_DEV_FWCACHE = 0x18,
FW_PARAMS_PARAM_DEV_RI_FR_NSMR_TPTE_WR = 0x1C,
+ FW_PARAMS_PARAM_DEV_MPSBGMAP = 0x1E,
};
/*
diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
index 2b23f46b34d3..ba032ac9ae86 100644
--- a/drivers/net/ethernet/cisco/enic/enic.h
+++ b/drivers/net/ethernet/cisco/enic/enic.h
@@ -33,7 +33,7 @@
#define DRV_NAME "enic"
#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
-#define DRV_VERSION "2.3.0.31"
+#define DRV_VERSION "2.3.0.42"
#define DRV_COPYRIGHT "Copyright 2008-2013 Cisco Systems, Inc"
#define ENIC_BARS_MAX 6
@@ -47,7 +47,7 @@
struct enic_msix_entry {
int requested;
- char devname[IFNAMSIZ];
+ char devname[IFNAMSIZ + 8];
irqreturn_t (*isr)(int, void *);
void *devid;
cpumask_var_t affinity_mask;
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 6a9c8878aca0..d24ee1ad3be1 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -1737,7 +1737,7 @@ static int enic_request_intr(struct enic *enic)
intr = enic_msix_rq_intr(enic, i);
snprintf(enic->msix[intr].devname,
sizeof(enic->msix[intr].devname),
- "%.11s-rx-%u", netdev->name, i);
+ "%s-rx-%u", netdev->name, i);
enic->msix[intr].isr = enic_isr_msix;
enic->msix[intr].devid = &enic->napi[i];
}
@@ -1748,7 +1748,7 @@ static int enic_request_intr(struct enic *enic)
intr = enic_msix_wq_intr(enic, i);
snprintf(enic->msix[intr].devname,
sizeof(enic->msix[intr].devname),
- "%.11s-tx-%u", netdev->name, i);
+ "%s-tx-%u", netdev->name, i);
enic->msix[intr].isr = enic_isr_msix;
enic->msix[intr].devid = &enic->napi[wq];
}
@@ -1756,14 +1756,14 @@ static int enic_request_intr(struct enic *enic)
intr = enic_msix_err_intr(enic);
snprintf(enic->msix[intr].devname,
sizeof(enic->msix[intr].devname),
- "%.11s-err", netdev->name);
+ "%s-err", netdev->name);
enic->msix[intr].isr = enic_isr_msix_err;
enic->msix[intr].devid = enic;
intr = enic_msix_notify_intr(enic);
snprintf(enic->msix[intr].devname,
sizeof(enic->msix[intr].devname),
- "%.11s-notify", netdev->name);
+ "%s-notify", netdev->name);
enic->msix[intr].isr = enic_isr_msix_notify;
enic->msix[intr].devid = enic;
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index a5501af6db99..757b873735a5 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -2647,7 +2647,7 @@ static int dpaa_eth_probe(struct platform_device *pdev)
priv->buf_layout[TX].priv_data_size = DPAA_TX_PRIV_DATA_SIZE; /* Tx */
/* device used for DMA mapping */
- arch_setup_dma_ops(dev, 0, 0, NULL, false);
+ set_dma_ops(dev, get_dma_ops(&pdev->dev));
err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(40));
if (err) {
dev_err(dev, "dma_coerce_mask_and_coherent() failed\n");
diff --git a/drivers/net/ethernet/freescale/fman/Kconfig b/drivers/net/ethernet/freescale/fman/Kconfig
index dc0850b3b517..8870a9a798ca 100644
--- a/drivers/net/ethernet/freescale/fman/Kconfig
+++ b/drivers/net/ethernet/freescale/fman/Kconfig
@@ -2,6 +2,7 @@ config FSL_FMAN
tristate "FMan support"
depends on FSL_SOC || ARCH_LAYERSCAPE || COMPILE_TEST
select GENERIC_ALLOCATOR
+ depends on HAS_DMA
select PHYLIB
default n
help
diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c
index 0b31f8502ada..6e67d22fd0d5 100644
--- a/drivers/net/ethernet/freescale/fman/mac.c
+++ b/drivers/net/ethernet/freescale/fman/mac.c
@@ -623,6 +623,8 @@ static struct platform_device *dpaa_eth_add_device(int fman_id,
goto no_mem;
}
+ set_dma_ops(&pdev->dev, get_dma_ops(priv->dev));
+
ret = platform_device_add_data(pdev, &data, sizeof(data));
if (ret)
goto err;
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index a79e257bc338..c4b4b0a1bbf0 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -1718,7 +1718,7 @@ static int gfar_restore(struct device *dev)
return 0;
}
-static struct dev_pm_ops gfar_pm_ops = {
+static const struct dev_pm_ops gfar_pm_ops = {
.suspend = gfar_suspend,
.resume = gfar_resume,
.freeze = gfar_suspend,
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index af1b15cc6a7f..00e57bbaf122 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -288,9 +288,15 @@ static int hns_nic_config_phy_loopback(struct phy_device *phy_dev, u8 en)
/* Force 1000M Link, Default is 0x0200 */
phy_write(phy_dev, 7, 0x20C);
- phy_write(phy_dev, HNS_PHY_PAGE_REG, 0);
- /* Enable PHY loop-back */
+ /* Powerup Fiber */
+ phy_write(phy_dev, HNS_PHY_PAGE_REG, 1);
+ val = phy_read(phy_dev, COPPER_CONTROL_REG);
+ val &= ~PHY_POWER_DOWN;
+ phy_write(phy_dev, COPPER_CONTROL_REG, val);
+
+ /* Enable Phy Loopback */
+ phy_write(phy_dev, HNS_PHY_PAGE_REG, 0);
val = phy_read(phy_dev, COPPER_CONTROL_REG);
val |= PHY_LOOP_BACK;
val &= ~PHY_POWER_DOWN;
@@ -299,6 +305,12 @@ static int hns_nic_config_phy_loopback(struct phy_device *phy_dev, u8 en)
phy_write(phy_dev, HNS_PHY_PAGE_REG, 0xFA);
phy_write(phy_dev, 1, 0x400);
phy_write(phy_dev, 7, 0x200);
+
+ phy_write(phy_dev, HNS_PHY_PAGE_REG, 1);
+ val = phy_read(phy_dev, COPPER_CONTROL_REG);
+ val |= PHY_POWER_DOWN;
+ phy_write(phy_dev, COPPER_CONTROL_REG, val);
+
phy_write(phy_dev, HNS_PHY_PAGE_REG, 0);
phy_write(phy_dev, 9, 0xF00);
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 9a74c4e2e193..3e0a695537e2 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1914,7 +1914,7 @@ static struct vio_device_id ibmveth_device_table[] = {
};
MODULE_DEVICE_TABLE(vio, ibmveth_device_table);
-static struct dev_pm_ops ibmveth_pm_ops = {
+static const struct dev_pm_ops ibmveth_pm_ops = {
.resume = ibmveth_resume
};
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 722daf55d757..a3e694679635 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -163,16 +163,6 @@ static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
return rc;
}
-static void reset_long_term_buff(struct ibmvnic_adapter *adapter,
- struct ibmvnic_long_term_buff *ltb)
-{
- memset(ltb->buff, 0, ltb->size);
-
- init_completion(&adapter->fw_done);
- send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
- wait_for_completion(&adapter->fw_done);
-}
-
static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
struct ibmvnic_long_term_buff *ltb, int size)
{
@@ -193,6 +183,12 @@ static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
send_request_map(adapter, ltb->addr,
ltb->size, ltb->map_id);
wait_for_completion(&adapter->fw_done);
+
+ if (adapter->fw_done_rc) {
+ dev_err(dev, "Couldn't map long term buffer,rc = %d\n",
+ adapter->fw_done_rc);
+ return -1;
+ }
return 0;
}
@@ -210,6 +206,24 @@ static void free_long_term_buff(struct ibmvnic_adapter *adapter,
dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
}
+static int reset_long_term_buff(struct ibmvnic_adapter *adapter,
+ struct ibmvnic_long_term_buff *ltb)
+{
+ memset(ltb->buff, 0, ltb->size);
+
+ init_completion(&adapter->fw_done);
+ send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
+ wait_for_completion(&adapter->fw_done);
+
+ if (adapter->fw_done_rc) {
+ dev_info(&adapter->vdev->dev,
+ "Reset failed, attempting to free and reallocate buffer\n");
+ free_long_term_buff(adapter, ltb);
+ return alloc_long_term_buff(adapter, ltb, ltb->size);
+ }
+ return 0;
+}
+
static void deactivate_rx_pools(struct ibmvnic_adapter *adapter)
{
int i;
@@ -366,13 +380,15 @@ static int reset_rx_pools(struct ibmvnic_adapter *adapter)
{
struct ibmvnic_rx_pool *rx_pool;
int rx_scrqs;
- int i, j;
+ int i, j, rc;
rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
for (i = 0; i < rx_scrqs; i++) {
rx_pool = &adapter->rx_pool[i];
- reset_long_term_buff(adapter, &rx_pool->long_term_buff);
+ rc = reset_long_term_buff(adapter, &rx_pool->long_term_buff);
+ if (rc)
+ return rc;
for (j = 0; j < rx_pool->size; j++)
rx_pool->free_map[j] = j;
@@ -494,13 +510,15 @@ static int reset_tx_pools(struct ibmvnic_adapter *adapter)
{
struct ibmvnic_tx_pool *tx_pool;
int tx_scrqs;
- int i, j;
+ int i, j, rc;
tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
for (i = 0; i < tx_scrqs; i++) {
tx_pool = &adapter->tx_pool[i];
- reset_long_term_buff(adapter, &tx_pool->long_term_buff);
+ rc = reset_long_term_buff(adapter, &tx_pool->long_term_buff);
+ if (rc)
+ return rc;
memset(tx_pool->tx_buff, 0,
adapter->req_tx_entries_per_subcrq *
@@ -745,12 +763,6 @@ static int init_resources(struct ibmvnic_adapter *adapter)
if (rc)
return rc;
- rc = init_sub_crq_irqs(adapter);
- if (rc) {
- netdev_err(netdev, "failed to initialize sub crq irqs\n");
- return -1;
- }
-
rc = init_stats_token(adapter);
if (rc)
return rc;
@@ -1785,7 +1797,6 @@ static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter)
return rc;
}
- rc = init_sub_crq_irqs(adapter);
return rc;
}
@@ -3075,36 +3086,6 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
return 0;
}
-static void handle_request_map_rsp(union ibmvnic_crq *crq,
- struct ibmvnic_adapter *adapter)
-{
- struct device *dev = &adapter->vdev->dev;
- u8 map_id = crq->request_map_rsp.map_id;
- int tx_subcrqs;
- int rx_subcrqs;
- long rc;
- int i;
-
- tx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
- rx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
-
- rc = crq->request_map_rsp.rc.code;
- if (rc) {
- dev_err(dev, "Error %ld in REQUEST_MAP_RSP\n", rc);
- adapter->map_id--;
- /* need to find and zero tx/rx_pool map_id */
- for (i = 0; i < tx_subcrqs; i++) {
- if (adapter->tx_pool[i].long_term_buff.map_id == map_id)
- adapter->tx_pool[i].long_term_buff.map_id = 0;
- }
- for (i = 0; i < rx_subcrqs; i++) {
- if (adapter->rx_pool[i].long_term_buff.map_id == map_id)
- adapter->rx_pool[i].long_term_buff.map_id = 0;
- }
- }
- complete(&adapter->fw_done);
-}
-
static void handle_request_unmap_rsp(union ibmvnic_crq *crq,
struct ibmvnic_adapter *adapter)
{
@@ -3385,7 +3366,8 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
handle_query_map_rsp(crq, adapter);
break;
case REQUEST_MAP_RSP:
- handle_request_map_rsp(crq, adapter);
+ adapter->fw_done_rc = crq->request_map_rsp.rc.code;
+ complete(&adapter->fw_done);
break;
case REQUEST_UNMAP_RSP:
handle_request_unmap_rsp(crq, adapter);
@@ -3680,6 +3662,13 @@ static int ibmvnic_init(struct ibmvnic_adapter *adapter)
if (rc) {
dev_err(dev, "Initialization of sub crqs failed\n");
release_crq_queue(adapter);
+ return rc;
+ }
+
+ rc = init_sub_crq_irqs(adapter);
+ if (rc) {
+ dev_err(dev, "Failed to initialize sub crq irqs\n");
+ release_crq_queue(adapter);
}
return rc;
@@ -3737,7 +3726,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
do {
rc = ibmvnic_init(adapter);
- if (rc != EAGAIN) {
+ if (rc && rc != EAGAIN) {
free_netdev(netdev);
return rc;
}
@@ -3859,6 +3848,9 @@ static int ibmvnic_resume(struct device *dev)
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
int i;
+ if (adapter->state != VNIC_OPEN)
+ return 0;
+
/* kick the interrupt handlers just in case we lost an interrupt */
for (i = 0; i < adapter->req_rx_queues; i++)
ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq,
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index 7e2300e64a47..8eff6e15f4bb 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -595,7 +595,7 @@ struct ibmvnic_request_map_rsp {
u8 cmd;
u8 reserved1;
u8 map_id;
- u8 reserved2[4];
+ u8 reserved2[8];
struct ibmvnic_rc rc;
} __packed __aligned(8);
@@ -988,6 +988,7 @@ struct ibmvnic_adapter {
spinlock_t error_list_lock;
struct completion fw_done;
+ int fw_done_rc;
/* partner capabilities */
u64 min_tx_queues;
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 395ca94faf80..d616f698e155 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
+ * Copyright(c) 2013 - 2017 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -103,6 +103,12 @@
(I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW | \
I40E_AQ_PHY_DEBUG_DISABLE_ALL_LINK_FW)
+#define I40E_OEM_EETRACK_ID 0xffffffff
+#define I40E_OEM_GEN_SHIFT 24
+#define I40E_OEM_SNAP_MASK 0x00ff0000
+#define I40E_OEM_SNAP_SHIFT 16
+#define I40E_OEM_RELEASE_MASK 0x0000ffff
+
/* The values in here are decimal coded as hex as is the case in the NVM map*/
#define I40E_CURRENT_NVM_VERSION_HI 0x2
#define I40E_CURRENT_NVM_VERSION_LO 0x40
@@ -516,9 +522,8 @@ struct i40e_pf {
bool ptp_tx;
bool ptp_rx;
u16 rss_table_size; /* HW RSS table size */
- /* These are only valid in NPAR modes */
- u32 npar_max_bw;
- u32 npar_min_bw;
+ u32 max_bw;
+ u32 min_bw;
u32 ioremap_len;
u32 fd_inv;
@@ -629,6 +634,7 @@ struct i40e_vsi {
/* These are containers of ring pointers, allocated at run-time */
struct i40e_ring **rx_rings;
struct i40e_ring **tx_rings;
+ struct i40e_ring **xdp_rings; /* XDP Tx rings */
u32 active_filters;
u32 promisc_threshold;
@@ -645,6 +651,8 @@ struct i40e_vsi {
u16 max_frame;
u16 rx_buf_len;
+ struct bpf_prog *xdp_prog;
+
/* List of q_vectors allocated to this VSI */
struct i40e_q_vector **q_vectors;
int num_q_vectors;
@@ -732,22 +740,36 @@ static inline char *i40e_nvm_version_str(struct i40e_hw *hw)
{
static char buf[32];
u32 full_ver;
- u8 ver, patch;
- u16 build;
full_ver = hw->nvm.oem_ver;
- ver = (u8)(full_ver >> I40E_OEM_VER_SHIFT);
- build = (u16)((full_ver >> I40E_OEM_VER_BUILD_SHIFT) &
- I40E_OEM_VER_BUILD_MASK);
- patch = (u8)(full_ver & I40E_OEM_VER_PATCH_MASK);
-
- snprintf(buf, sizeof(buf),
- "%x.%02x 0x%x %d.%d.%d",
- (hw->nvm.version & I40E_NVM_VERSION_HI_MASK) >>
- I40E_NVM_VERSION_HI_SHIFT,
- (hw->nvm.version & I40E_NVM_VERSION_LO_MASK) >>
- I40E_NVM_VERSION_LO_SHIFT,
- hw->nvm.eetrack, ver, build, patch);
+
+ if (hw->nvm.eetrack == I40E_OEM_EETRACK_ID) {
+ u8 gen, snap;
+ u16 release;
+
+ gen = (u8)(full_ver >> I40E_OEM_GEN_SHIFT);
+ snap = (u8)((full_ver & I40E_OEM_SNAP_MASK) >>
+ I40E_OEM_SNAP_SHIFT);
+ release = (u16)(full_ver & I40E_OEM_RELEASE_MASK);
+
+ snprintf(buf, sizeof(buf), "%x.%x.%x", gen, snap, release);
+ } else {
+ u8 ver, patch;
+ u16 build;
+
+ ver = (u8)(full_ver >> I40E_OEM_VER_SHIFT);
+ build = (u16)((full_ver >> I40E_OEM_VER_BUILD_SHIFT) &
+ I40E_OEM_VER_BUILD_MASK);
+ patch = (u8)(full_ver & I40E_OEM_VER_PATCH_MASK);
+
+ snprintf(buf, sizeof(buf),
+ "%x.%02x 0x%x %d.%d.%d",
+ (hw->nvm.version & I40E_NVM_VERSION_HI_MASK) >>
+ I40E_NVM_VERSION_HI_SHIFT,
+ (hw->nvm.version & I40E_NVM_VERSION_LO_MASK) >>
+ I40E_NVM_VERSION_LO_SHIFT,
+ hw->nvm.eetrack, ver, build, patch);
+ }
return buf;
}
@@ -968,8 +990,13 @@ int i40e_ptp_get_ts_config(struct i40e_pf *pf, struct ifreq *ifr);
void i40e_ptp_init(struct i40e_pf *pf);
void i40e_ptp_stop(struct i40e_pf *pf);
int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi);
-i40e_status i40e_get_npar_bw_setting(struct i40e_pf *pf);
-i40e_status i40e_set_npar_bw_setting(struct i40e_pf *pf);
-i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf);
+i40e_status i40e_get_partition_bw_setting(struct i40e_pf *pf);
+i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf);
+i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf);
void i40e_print_link_message(struct i40e_vsi *vsi, bool isup);
+
+static inline bool i40e_enabled_xdp_vsi(struct i40e_vsi *vsi)
+{
+ return !!vsi->xdp_prog;
+}
#endif /* _I40E_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index 5eb04114e13f..5d5f422cbae5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
+ * Copyright(c) 2013 - 2017 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -531,7 +531,7 @@ struct i40e_aqc_mac_address_read {
#define I40E_AQC_PORT_ADDR_VALID 0x40
#define I40E_AQC_WOL_ADDR_VALID 0x80
#define I40E_AQC_MC_MAG_EN_VALID 0x100
-#define I40E_AQC_ADDR_VALID_MASK 0x1F0
+#define I40E_AQC_ADDR_VALID_MASK 0x3F0
u8 reserved[6];
__le32 addr_high;
__le32 addr_low;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c
index 36f694ccdc09..1b1e2acbd07f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_client.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+ * Copyright(c) 2013 - 2017 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -273,8 +273,8 @@ int i40e_vf_client_capable(struct i40e_pf *pf, u32 vf_id)
if (!cdev || !cdev->client)
goto out;
if (!cdev->client->ops || !cdev->client->ops->vf_capable) {
- dev_info(&pf->pdev->dev,
- "Cannot locate client instance VF capability routine\n");
+ dev_dbg(&pf->pdev->dev,
+ "Cannot locate client instance VF capability routine\n");
goto out;
}
if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state))
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index cbad4eba7ae7..8e082a946411 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -3614,11 +3614,15 @@ i40e_status i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
/**
* i40e_aq_add_udp_tunnel
* @hw: pointer to the hw struct
- * @udp_port: the UDP port to add
+ * @udp_port: the UDP port to add in Host byte order
* @header_len: length of the tunneling header length in DWords
* @protocol_index: protocol index type
* @filter_index: pointer to filter index
* @cmd_details: pointer to command details structure or NULL
+ *
+ * Note: Firmware expects the udp_port value to be in Little Endian format,
+ * and this function will call cpu_to_le16 to convert from Host byte order to
+ * Little Endian order.
**/
i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
u16 udp_port, u8 protocol_index,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
index 0fab3a9b51d9..55079fe3ed63 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
+ * Copyright(c) 2013 - 2017 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -390,6 +390,8 @@ static void i40e_parse_cee_app_tlv(struct i40e_cee_feat_tlv *tlv,
if (!dcbcfg->numapps)
return;
+ if (dcbcfg->numapps > I40E_DCBX_MAX_APPS)
+ dcbcfg->numapps = I40E_DCBX_MAX_APPS;
for (i = 0; i < dcbcfg->numapps; i++) {
u8 up, selector;
@@ -618,14 +620,17 @@ static void i40e_cee_to_dcb_v1_config(
/* CEE PG data to ETS config */
dcbcfg->etscfg.maxtcs = cee_cfg->oper_num_tc;
+ /* Note that the FW creates the oper_prio_tc nibbles reversed
+ * from those in the CEE Priority Group sub-TLV.
+ */
for (i = 0; i < 4; i++) {
tc = (u8)((cee_cfg->oper_prio_tc[i] &
- I40E_CEE_PGID_PRIO_1_MASK) >>
- I40E_CEE_PGID_PRIO_1_SHIFT);
- dcbcfg->etscfg.prioritytable[i*2] = tc;
- tc = (u8)((cee_cfg->oper_prio_tc[i] &
I40E_CEE_PGID_PRIO_0_MASK) >>
I40E_CEE_PGID_PRIO_0_SHIFT);
+ dcbcfg->etscfg.prioritytable[i * 2] = tc;
+ tc = (u8)((cee_cfg->oper_prio_tc[i] &
+ I40E_CEE_PGID_PRIO_1_MASK) >>
+ I40E_CEE_PGID_PRIO_1_SHIFT);
dcbcfg->etscfg.prioritytable[i*2 + 1] = tc;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 3d58762efbc0..9692a5294fa3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -1299,6 +1299,17 @@ static void i40e_get_ringparam(struct net_device *netdev,
ring->rx_jumbo_pending = 0;
}
+static bool i40e_active_tx_ring_index(struct i40e_vsi *vsi, u16 index)
+{
+ if (i40e_enabled_xdp_vsi(vsi)) {
+ return index < vsi->num_queue_pairs ||
+ (index >= vsi->alloc_queue_pairs &&
+ index < vsi->alloc_queue_pairs + vsi->num_queue_pairs);
+ }
+
+ return index < vsi->num_queue_pairs;
+}
+
static int i40e_set_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring)
{
@@ -1308,6 +1319,7 @@ static int i40e_set_ringparam(struct net_device *netdev,
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
u32 new_rx_count, new_tx_count;
+ u16 tx_alloc_queue_pairs;
int timeout = 50;
int i, err = 0;
@@ -1345,6 +1357,8 @@ static int i40e_set_ringparam(struct net_device *netdev,
for (i = 0; i < vsi->num_queue_pairs; i++) {
vsi->tx_rings[i]->count = new_tx_count;
vsi->rx_rings[i]->count = new_rx_count;
+ if (i40e_enabled_xdp_vsi(vsi))
+ vsi->xdp_rings[i]->count = new_tx_count;
}
goto done;
}
@@ -1354,20 +1368,24 @@ static int i40e_set_ringparam(struct net_device *netdev,
* to the Tx and Rx ring structs.
*/
- /* alloc updated Tx resources */
+ /* alloc updated Tx and XDP Tx resources */
+ tx_alloc_queue_pairs = vsi->alloc_queue_pairs *
+ (i40e_enabled_xdp_vsi(vsi) ? 2 : 1);
if (new_tx_count != vsi->tx_rings[0]->count) {
netdev_info(netdev,
"Changing Tx descriptor count from %d to %d.\n",
vsi->tx_rings[0]->count, new_tx_count);
- tx_rings = kcalloc(vsi->alloc_queue_pairs,
+ tx_rings = kcalloc(tx_alloc_queue_pairs,
sizeof(struct i40e_ring), GFP_KERNEL);
if (!tx_rings) {
err = -ENOMEM;
goto done;
}
- for (i = 0; i < vsi->num_queue_pairs; i++) {
- /* clone ring and setup updated count */
+ for (i = 0; i < tx_alloc_queue_pairs; i++) {
+ if (!i40e_active_tx_ring_index(vsi, i))
+ continue;
+
tx_rings[i] = *vsi->tx_rings[i];
tx_rings[i].count = new_tx_count;
/* the desc and bi pointers will be reallocated in the
@@ -1379,6 +1397,8 @@ static int i40e_set_ringparam(struct net_device *netdev,
if (err) {
while (i) {
i--;
+ if (!i40e_active_tx_ring_index(vsi, i))
+ continue;
i40e_free_tx_resources(&tx_rings[i]);
}
kfree(tx_rings);
@@ -1446,9 +1466,11 @@ rx_unwind:
i40e_down(vsi);
if (tx_rings) {
- for (i = 0; i < vsi->num_queue_pairs; i++) {
- i40e_free_tx_resources(vsi->tx_rings[i]);
- *vsi->tx_rings[i] = tx_rings[i];
+ for (i = 0; i < tx_alloc_queue_pairs; i++) {
+ if (i40e_active_tx_ring_index(vsi, i)) {
+ i40e_free_tx_resources(vsi->tx_rings[i]);
+ *vsi->tx_rings[i] = tx_rings[i];
+ }
}
kfree(tx_rings);
tx_rings = NULL;
@@ -1479,8 +1501,10 @@ rx_unwind:
free_tx:
/* error cleanup if the Rx allocations failed after getting Tx */
if (tx_rings) {
- for (i = 0; i < vsi->num_queue_pairs; i++)
- i40e_free_tx_resources(&tx_rings[i]);
+ for (i = 0; i < tx_alloc_queue_pairs; i++) {
+ if (i40e_active_tx_ring_index(vsi, i))
+ i40e_free_tx_resources(vsi->tx_rings[i]);
+ }
kfree(tx_rings);
tx_rings = NULL;
}
@@ -2687,6 +2711,12 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
u8 flow_pctype = 0;
u64 i_set, i_setc;
+ if (pf->flags & I40E_FLAG_MFP_ENABLED) {
+ dev_err(&pf->pdev->dev,
+ "Change of RSS hash input set is not supported when MFP mode is enabled\n");
+ return -EOPNOTSUPP;
+ }
+
/* RSS does not support anything other than hashing
* to queues on src and dst IPs and ports
*/
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 98fb644a580e..2db93d3f6d23 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
+ * Copyright(c) 2013 - 2017 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -27,6 +27,7 @@
#include <linux/etherdevice.h>
#include <linux/of_net.h>
#include <linux/pci.h>
+#include <linux/bpf.h>
/* Local includes */
#include "i40e.h"
@@ -407,6 +408,27 @@ struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
}
/**
+ * i40e_get_netdev_stats_struct_tx - populate stats from a Tx ring
+ * @ring: Tx ring to get statistics from
+ * @stats: statistics entry to be updated
+ **/
+static void i40e_get_netdev_stats_struct_tx(struct i40e_ring *ring,
+ struct rtnl_link_stats64 *stats)
+{
+ u64 bytes, packets;
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin_irq(&ring->syncp);
+ packets = ring->stats.packets;
+ bytes = ring->stats.bytes;
+ } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
+
+ stats->tx_packets += packets;
+ stats->tx_bytes += bytes;
+}
+
+/**
* i40e_get_netdev_stats_struct - Get statistics for netdev interface
* @netdev: network interface device structure
*
@@ -436,15 +458,8 @@ static void i40e_get_netdev_stats_struct(struct net_device *netdev,
tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
if (!tx_ring)
continue;
+ i40e_get_netdev_stats_struct_tx(tx_ring, stats);
- do {
- start = u64_stats_fetch_begin_irq(&tx_ring->syncp);
- packets = tx_ring->stats.packets;
- bytes = tx_ring->stats.bytes;
- } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
-
- stats->tx_packets += packets;
- stats->tx_bytes += bytes;
rx_ring = &tx_ring[1];
do {
@@ -455,6 +470,9 @@ static void i40e_get_netdev_stats_struct(struct net_device *netdev,
stats->rx_packets += packets;
stats->rx_bytes += bytes;
+
+ if (i40e_enabled_xdp_vsi(vsi))
+ i40e_get_netdev_stats_struct_tx(&rx_ring[1], stats);
}
rcu_read_unlock();
@@ -2263,9 +2281,8 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
i40e_aq_str(hw, hw->aq.asq_last_status));
}
}
- if ((changed_flags & IFF_PROMISC) ||
- (promisc_changed &&
- test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state))) {
+
+ if ((changed_flags & IFF_PROMISC) || promisc_changed) {
bool cur_promisc;
cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
@@ -2396,6 +2413,18 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf)
}
/**
+ * i40e_max_xdp_frame_size - returns the maximum allowed frame size for XDP
+ * @vsi: the vsi
+ **/
+static int i40e_max_xdp_frame_size(struct i40e_vsi *vsi)
+{
+ if (PAGE_SIZE >= 8192 || (vsi->back->flags & I40E_FLAG_LEGACY_RX))
+ return I40E_RXBUFFER_2048;
+ else
+ return I40E_RXBUFFER_3072;
+}
+
+/**
* i40e_change_mtu - NDO callback to change the Maximum Transfer Unit
* @netdev: network interface device structure
* @new_mtu: new value for maximum frame size
@@ -2408,6 +2437,13 @@ static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
+ if (i40e_enabled_xdp_vsi(vsi)) {
+ int frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+
+ if (frame_size > i40e_max_xdp_frame_size(vsi))
+ return -EINVAL;
+ }
+
netdev_info(netdev, "changing MTU from %d to %d\n",
netdev->mtu, new_mtu);
netdev->mtu = new_mtu;
@@ -2794,6 +2830,12 @@ static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
for (i = 0; i < vsi->num_queue_pairs && !err; i++)
err = i40e_setup_tx_descriptors(vsi->tx_rings[i]);
+ if (!i40e_enabled_xdp_vsi(vsi))
+ return err;
+
+ for (i = 0; i < vsi->num_queue_pairs && !err; i++)
+ err = i40e_setup_tx_descriptors(vsi->xdp_rings[i]);
+
return err;
}
@@ -2807,12 +2849,17 @@ static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
{
int i;
- if (!vsi->tx_rings)
- return;
+ if (vsi->tx_rings) {
+ for (i = 0; i < vsi->num_queue_pairs; i++)
+ if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
+ i40e_free_tx_resources(vsi->tx_rings[i]);
+ }
- for (i = 0; i < vsi->num_queue_pairs; i++)
- if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
- i40e_free_tx_resources(vsi->tx_rings[i]);
+ if (vsi->xdp_rings) {
+ for (i = 0; i < vsi->num_queue_pairs; i++)
+ if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc)
+ i40e_free_tx_resources(vsi->xdp_rings[i]);
+ }
}
/**
@@ -3073,6 +3120,12 @@ static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
err = i40e_configure_tx_ring(vsi->tx_rings[i]);
+ if (!i40e_enabled_xdp_vsi(vsi))
+ return err;
+
+ for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
+ err = i40e_configure_tx_ring(vsi->xdp_rings[i]);
+
return err;
}
@@ -3217,6 +3270,7 @@ static int i40e_vsi_configure(struct i40e_vsi *vsi)
**/
static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
{
+ bool has_xdp = i40e_enabled_xdp_vsi(vsi);
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
u16 vector;
@@ -3247,28 +3301,40 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
/* Linked list for the queuepairs assigned to this vector */
wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
for (q = 0; q < q_vector->num_ringpairs; q++) {
+ u32 nextqp = has_xdp ? qp + vsi->alloc_queue_pairs : qp;
u32 val;
val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
- (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
- (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
- (qp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)|
- (I40E_QUEUE_TYPE_TX
- << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
+ (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
+ (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
+ (nextqp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
+ (I40E_QUEUE_TYPE_TX <<
+ I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
wr32(hw, I40E_QINT_RQCTL(qp), val);
+ if (has_xdp) {
+ val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
+ (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
+ (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
+ (qp << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
+ (I40E_QUEUE_TYPE_TX <<
+ I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
+
+ wr32(hw, I40E_QINT_TQCTL(nextqp), val);
+ }
+
val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
- (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
- (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
- ((qp+1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)|
- (I40E_QUEUE_TYPE_RX
- << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
+ (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
+ (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
+ ((qp + 1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
+ (I40E_QUEUE_TYPE_RX <<
+ I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
/* Terminate the linked list */
if (q == (q_vector->num_ringpairs - 1))
- val |= (I40E_QUEUE_END_OF_LIST
- << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
+ val |= (I40E_QUEUE_END_OF_LIST <<
+ I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
wr32(hw, I40E_QINT_TQCTL(qp), val);
qp++;
@@ -3322,6 +3388,7 @@ static void i40e_enable_misc_int_causes(struct i40e_pf *pf)
**/
static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
{
+ u32 nextqp = i40e_enabled_xdp_vsi(vsi) ? vsi->alloc_queue_pairs : 0;
struct i40e_q_vector *q_vector = vsi->q_vectors[0];
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
@@ -3342,12 +3409,22 @@ static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
wr32(hw, I40E_PFINT_LNKLST0, 0);
/* Associate the queue pair to the vector and enable the queue int */
- val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
- (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
+ val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
+ (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
+ (nextqp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)|
(I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
wr32(hw, I40E_QINT_RQCTL(0), val);
+ if (i40e_enabled_xdp_vsi(vsi)) {
+ val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
+ (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)|
+ (I40E_QUEUE_TYPE_TX
+ << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
+
+ wr32(hw, I40E_QINT_TQCTL(nextqp), val);
+ }
+
val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
(I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
(I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
@@ -3511,11 +3588,24 @@ static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)
int base = vsi->base_vector;
int i;
+ /* disable interrupt causation from each queue */
for (i = 0; i < vsi->num_queue_pairs; i++) {
- wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), 0);
- wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), 0);
+ u32 val;
+
+ val = rd32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx));
+ val &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK;
+ wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), val);
+
+ val = rd32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx));
+ val &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK;
+ wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), val);
+
+ if (!i40e_enabled_xdp_vsi(vsi))
+ continue;
+ wr32(hw, I40E_QINT_TQCTL(vsi->xdp_rings[i]->reg_idx), 0);
}
+ /* disable each interrupt */
if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
for (i = vsi->base_vector;
i < (vsi->num_q_vectors + vsi->base_vector); i++)
@@ -3594,10 +3684,10 @@ static irqreturn_t i40e_intr(int irq, void *data)
pf->sw_int_count++;
if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
- (ena_mask & I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK)) {
+ (icr0 & I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK)) {
ena_mask &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
- icr0 &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
dev_dbg(&pf->pdev->dev, "cleared PE_CRITERR\n");
+ set_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
}
/* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
@@ -3816,6 +3906,16 @@ static void i40e_map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)
q_vector->tx.ring = tx_ring;
q_vector->tx.count++;
+ /* Place XDP Tx ring in the same q_vector ring list as regular Tx */
+ if (i40e_enabled_xdp_vsi(vsi)) {
+ struct i40e_ring *xdp_ring = vsi->xdp_rings[qp_idx];
+
+ xdp_ring->q_vector = q_vector;
+ xdp_ring->next = q_vector->tx.ring;
+ q_vector->tx.ring = xdp_ring;
+ q_vector->tx.count++;
+ }
+
rx_ring->q_vector = q_vector;
rx_ring->next = q_vector->rx.ring;
q_vector->rx.ring = rx_ring;
@@ -3995,6 +4095,33 @@ static void i40e_control_tx_q(struct i40e_pf *pf, int pf_q, bool enable)
}
/**
+ * i40e_control_wait_tx_q - Start/stop Tx queue and wait for completion
+ * @seid: VSI SEID
+ * @pf: the PF structure
+ * @pf_q: the PF queue to configure
+ * @is_xdp: true if the queue is used for XDP
+ * @enable: start or stop the queue
+ **/
+static int i40e_control_wait_tx_q(int seid, struct i40e_pf *pf, int pf_q,
+ bool is_xdp, bool enable)
+{
+ int ret;
+
+ i40e_control_tx_q(pf, pf_q, enable);
+
+ /* wait for the change to finish */
+ ret = i40e_pf_txq_wait(pf, pf_q, enable);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "VSI seid %d %sTx ring %d %sable timeout\n",
+ seid, (is_xdp ? "XDP " : ""), pf_q,
+ (enable ? "en" : "dis"));
+ }
+
+ return ret;
+}
+
+/**
* i40e_vsi_control_tx - Start or stop a VSI's rings
* @vsi: the VSI being configured
* @enable: start or stop the rings
@@ -4006,16 +4133,20 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
pf_q = vsi->base_queue;
for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
- i40e_control_tx_q(pf, pf_q, enable);
+ ret = i40e_control_wait_tx_q(vsi->seid, pf,
+ pf_q,
+ false /*is xdp*/, enable);
+ if (ret)
+ break;
- /* wait for the change to finish */
- ret = i40e_pf_txq_wait(pf, pf_q, enable);
- if (ret) {
- dev_info(&pf->pdev->dev,
- "VSI seid %d Tx ring %d %sable timeout\n",
- vsi->seid, pf_q, (enable ? "en" : "dis"));
+ if (!i40e_enabled_xdp_vsi(vsi))
+ continue;
+
+ ret = i40e_control_wait_tx_q(vsi->seid, pf,
+ pf_q + vsi->alloc_queue_pairs,
+ true /*is xdp*/, enable);
+ if (ret)
break;
- }
}
return ret;
@@ -4527,7 +4658,21 @@ int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi)
vsi->seid, pf_q);
return ret;
}
- /* Check and wait for the Tx queue */
+
+ if (!i40e_enabled_xdp_vsi(vsi))
+ goto wait_rx;
+
+ /* Check and wait for the XDP Tx queue */
+ ret = i40e_pf_txq_wait(pf, pf_q + vsi->alloc_queue_pairs,
+ false);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "VSI seid %d XDP Tx ring %d disable timeout\n",
+ vsi->seid, pf_q);
+ return ret;
+ }
+wait_rx:
+ /* Check and wait for the Rx queue */
ret = i40e_pf_rxq_wait(pf, pf_q, false);
if (ret) {
dev_info(&pf->pdev->dev,
@@ -5446,6 +5591,8 @@ void i40e_down(struct i40e_vsi *vsi)
for (i = 0; i < vsi->num_queue_pairs; i++) {
i40e_clean_tx_ring(vsi->tx_rings[i]);
+ if (i40e_enabled_xdp_vsi(vsi))
+ i40e_clean_tx_ring(vsi->xdp_rings[i]);
i40e_clean_rx_ring(vsi->rx_rings[i]);
}
@@ -6419,9 +6566,7 @@ static void i40e_reset_subtask(struct i40e_pf *pf)
if (reset_flags &&
!test_bit(__I40E_DOWN, pf->state) &&
!test_bit(__I40E_CONFIG_BUSY, pf->state)) {
- rtnl_lock();
- i40e_do_reset(pf, reset_flags, true);
- rtnl_unlock();
+ i40e_do_reset(pf, reset_flags, false);
}
}
@@ -6970,6 +7115,51 @@ static void i40e_send_version(struct i40e_pf *pf)
}
/**
+ * i40e_get_oem_version - get OEM specific version information
+ * @hw: pointer to the hardware structure
+ **/
+static void i40e_get_oem_version(struct i40e_hw *hw)
+{
+ u16 block_offset = 0xffff;
+ u16 block_length = 0;
+ u16 capabilities = 0;
+ u16 gen_snap = 0;
+ u16 release = 0;
+
+#define I40E_SR_NVM_OEM_VERSION_PTR 0x1B
+#define I40E_NVM_OEM_LENGTH_OFFSET 0x00
+#define I40E_NVM_OEM_CAPABILITIES_OFFSET 0x01
+#define I40E_NVM_OEM_GEN_OFFSET 0x02
+#define I40E_NVM_OEM_RELEASE_OFFSET 0x03
+#define I40E_NVM_OEM_CAPABILITIES_MASK 0x000F
+#define I40E_NVM_OEM_LENGTH 3
+
+ /* Check if pointer to OEM version block is valid. */
+ i40e_read_nvm_word(hw, I40E_SR_NVM_OEM_VERSION_PTR, &block_offset);
+ if (block_offset == 0xffff)
+ return;
+
+ /* Check if OEM version block has correct length. */
+ i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_LENGTH_OFFSET,
+ &block_length);
+ if (block_length < I40E_NVM_OEM_LENGTH)
+ return;
+
+ /* Check if OEM version format is as expected. */
+ i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_CAPABILITIES_OFFSET,
+ &capabilities);
+ if ((capabilities & I40E_NVM_OEM_CAPABILITIES_MASK) != 0)
+ return;
+
+ i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_GEN_OFFSET,
+ &gen_snap);
+ i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_RELEASE_OFFSET,
+ &release);
+ hw->nvm.oem_ver = (gen_snap << I40E_OEM_SNAP_SHIFT) | release;
+ hw->nvm.eetrack = I40E_OEM_EETRACK_ID;
+}
+
+/**
* i40e_reset - wait for core reset to finish reset, reset pf if corer not seen
* @pf: board private structure
**/
@@ -7016,6 +7206,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
goto clear_recovery;
}
+ i40e_get_oem_version(&pf->hw);
/* re-verify the eeprom if we just had an EMP reset */
if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state))
@@ -7515,15 +7706,22 @@ static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
**/
static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors)
{
+ struct i40e_ring **next_rings;
int size;
int ret = 0;
- /* allocate memory for both Tx and Rx ring pointers */
- size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs * 2;
+ /* allocate memory for both Tx, XDP Tx and Rx ring pointers */
+ size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs *
+ (i40e_enabled_xdp_vsi(vsi) ? 3 : 2);
vsi->tx_rings = kzalloc(size, GFP_KERNEL);
if (!vsi->tx_rings)
return -ENOMEM;
- vsi->rx_rings = &vsi->tx_rings[vsi->alloc_queue_pairs];
+ next_rings = vsi->tx_rings + vsi->alloc_queue_pairs;
+ if (i40e_enabled_xdp_vsi(vsi)) {
+ vsi->xdp_rings = next_rings;
+ next_rings += vsi->alloc_queue_pairs;
+ }
+ vsi->rx_rings = next_rings;
if (alloc_qvectors) {
/* allocate memory for q_vector pointers */
@@ -7643,6 +7841,7 @@ static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors)
kfree(vsi->tx_rings);
vsi->tx_rings = NULL;
vsi->rx_rings = NULL;
+ vsi->xdp_rings = NULL;
}
/**
@@ -7726,6 +7925,8 @@ static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
kfree_rcu(vsi->tx_rings[i], rcu);
vsi->tx_rings[i] = NULL;
vsi->rx_rings[i] = NULL;
+ if (vsi->xdp_rings)
+ vsi->xdp_rings[i] = NULL;
}
}
}
@@ -7736,43 +7937,61 @@ static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
**/
static int i40e_alloc_rings(struct i40e_vsi *vsi)
{
- struct i40e_ring *tx_ring, *rx_ring;
+ int i, qpv = i40e_enabled_xdp_vsi(vsi) ? 3 : 2;
struct i40e_pf *pf = vsi->back;
- int i;
+ struct i40e_ring *ring;
/* Set basic values in the rings to be used later during open() */
for (i = 0; i < vsi->alloc_queue_pairs; i++) {
/* allocate space for both Tx and Rx in one shot */
- tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL);
- if (!tx_ring)
+ ring = kcalloc(qpv, sizeof(struct i40e_ring), GFP_KERNEL);
+ if (!ring)
goto err_out;
- tx_ring->queue_index = i;
- tx_ring->reg_idx = vsi->base_queue + i;
- tx_ring->ring_active = false;
- tx_ring->vsi = vsi;
- tx_ring->netdev = vsi->netdev;
- tx_ring->dev = &pf->pdev->dev;
- tx_ring->count = vsi->num_desc;
- tx_ring->size = 0;
- tx_ring->dcb_tc = 0;
+ ring->queue_index = i;
+ ring->reg_idx = vsi->base_queue + i;
+ ring->ring_active = false;
+ ring->vsi = vsi;
+ ring->netdev = vsi->netdev;
+ ring->dev = &pf->pdev->dev;
+ ring->count = vsi->num_desc;
+ ring->size = 0;
+ ring->dcb_tc = 0;
if (vsi->back->flags & I40E_FLAG_WB_ON_ITR_CAPABLE)
- tx_ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
- tx_ring->tx_itr_setting = pf->tx_itr_default;
- vsi->tx_rings[i] = tx_ring;
-
- rx_ring = &tx_ring[1];
- rx_ring->queue_index = i;
- rx_ring->reg_idx = vsi->base_queue + i;
- rx_ring->ring_active = false;
- rx_ring->vsi = vsi;
- rx_ring->netdev = vsi->netdev;
- rx_ring->dev = &pf->pdev->dev;
- rx_ring->count = vsi->num_desc;
- rx_ring->size = 0;
- rx_ring->dcb_tc = 0;
- rx_ring->rx_itr_setting = pf->rx_itr_default;
- vsi->rx_rings[i] = rx_ring;
+ ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
+ ring->tx_itr_setting = pf->tx_itr_default;
+ vsi->tx_rings[i] = ring++;
+
+ if (!i40e_enabled_xdp_vsi(vsi))
+ goto setup_rx;
+
+ ring->queue_index = vsi->alloc_queue_pairs + i;
+ ring->reg_idx = vsi->base_queue + ring->queue_index;
+ ring->ring_active = false;
+ ring->vsi = vsi;
+ ring->netdev = NULL;
+ ring->dev = &pf->pdev->dev;
+ ring->count = vsi->num_desc;
+ ring->size = 0;
+ ring->dcb_tc = 0;
+ if (vsi->back->flags & I40E_FLAG_WB_ON_ITR_CAPABLE)
+ ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
+ set_ring_xdp(ring);
+ ring->tx_itr_setting = pf->tx_itr_default;
+ vsi->xdp_rings[i] = ring++;
+
+setup_rx:
+ ring->queue_index = i;
+ ring->reg_idx = vsi->base_queue + i;
+ ring->ring_active = false;
+ ring->vsi = vsi;
+ ring->netdev = vsi->netdev;
+ ring->dev = &pf->pdev->dev;
+ ring->count = vsi->num_desc;
+ ring->size = 0;
+ ring->dcb_tc = 0;
+ ring->rx_itr_setting = pf->rx_itr_default;
+ vsi->rx_rings[i] = ring;
}
return 0;
@@ -8574,10 +8793,10 @@ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
}
/**
- * i40e_get_npar_bw_setting - Retrieve BW settings for this PF partition
+ * i40e_get_partition_bw_setting - Retrieve BW settings for this PF partition
* @pf: board private structure
**/
-i40e_status i40e_get_npar_bw_setting(struct i40e_pf *pf)
+i40e_status i40e_get_partition_bw_setting(struct i40e_pf *pf)
{
i40e_status status;
bool min_valid, max_valid;
@@ -8588,27 +8807,27 @@ i40e_status i40e_get_npar_bw_setting(struct i40e_pf *pf)
if (!status) {
if (min_valid)
- pf->npar_min_bw = min_bw;
+ pf->min_bw = min_bw;
if (max_valid)
- pf->npar_max_bw = max_bw;
+ pf->max_bw = max_bw;
}
return status;
}
/**
- * i40e_set_npar_bw_setting - Set BW settings for this PF partition
+ * i40e_set_partition_bw_setting - Set BW settings for this PF partition
* @pf: board private structure
**/
-i40e_status i40e_set_npar_bw_setting(struct i40e_pf *pf)
+i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf)
{
struct i40e_aqc_configure_partition_bw_data bw_data;
i40e_status status;
/* Set the valid bit for this PF */
bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id));
- bw_data.max_bw[pf->hw.pf_id] = pf->npar_max_bw & I40E_ALT_BW_VALUE_MASK;
- bw_data.min_bw[pf->hw.pf_id] = pf->npar_min_bw & I40E_ALT_BW_VALUE_MASK;
+ bw_data.max_bw[pf->hw.pf_id] = pf->max_bw & I40E_ALT_BW_VALUE_MASK;
+ bw_data.min_bw[pf->hw.pf_id] = pf->min_bw & I40E_ALT_BW_VALUE_MASK;
/* Set the new bandwidths */
status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL);
@@ -8617,10 +8836,10 @@ i40e_status i40e_set_npar_bw_setting(struct i40e_pf *pf)
}
/**
- * i40e_commit_npar_bw_setting - Commit BW settings for this PF partition
+ * i40e_commit_partition_bw_setting - Commit BW settings for this PF partition
* @pf: board private structure
**/
-i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf)
+i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf)
{
/* Commit temporary BW setting to permanent NVM image */
enum i40e_admin_queue_err last_aq_status;
@@ -8739,16 +8958,19 @@ static int i40e_sw_init(struct i40e_pf *pf)
if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) {
pf->flags |= I40E_FLAG_MFP_ENABLED;
dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
- if (i40e_get_npar_bw_setting(pf))
+ if (i40e_get_partition_bw_setting(pf)) {
dev_warn(&pf->pdev->dev,
- "Could not get NPAR bw settings\n");
- else
+ "Could not get partition bw settings\n");
+ } else {
dev_info(&pf->pdev->dev,
- "Min BW = %8.8x, Max BW = %8.8x\n",
- pf->npar_min_bw, pf->npar_max_bw);
+ "Partition BW Min = %8.8x, Max = %8.8x\n",
+ pf->min_bw, pf->max_bw);
+
+ /* nudge the Tx scheduler */
+ i40e_set_partition_bw_setting(pf);
+ }
}
- /* FW/NVM is not yet fixed in this regard */
if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
(pf->hw.func_caps.fd_filters_best_effort > 0)) {
pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
@@ -8851,10 +9073,6 @@ static int i40e_sw_init(struct i40e_pf *pf)
mutex_init(&pf->switch_mutex);
- /* If NPAR is enabled nudge the Tx scheduler */
- if (pf->hw.func_caps.npar_enable && (!i40e_get_npar_bw_setting(pf)))
- i40e_set_npar_bw_setting(pf);
-
sw_init_done:
return err;
}
@@ -9311,6 +9529,72 @@ out_err:
return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
}
+/**
+ * i40e_xdp_setup - add/remove an XDP program
+ * @vsi: VSI to changed
+ * @prog: XDP program
+ **/
+static int i40e_xdp_setup(struct i40e_vsi *vsi,
+ struct bpf_prog *prog)
+{
+ int frame_size = vsi->netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+ struct i40e_pf *pf = vsi->back;
+ struct bpf_prog *old_prog;
+ bool need_reset;
+ int i;
+
+ /* Don't allow frames that span over multiple buffers */
+ if (frame_size > vsi->rx_buf_len)
+ return -EINVAL;
+
+ if (!i40e_enabled_xdp_vsi(vsi) && !prog)
+ return 0;
+
+ /* When turning XDP on->off/off->on we reset and rebuild the rings. */
+ need_reset = (i40e_enabled_xdp_vsi(vsi) != !!prog);
+
+ if (need_reset)
+ i40e_prep_for_reset(pf, true);
+
+ old_prog = xchg(&vsi->xdp_prog, prog);
+
+ if (need_reset)
+ i40e_reset_and_rebuild(pf, true, true);
+
+ for (i = 0; i < vsi->num_queue_pairs; i++)
+ WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
+
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ return 0;
+}
+
+/**
+ * i40e_xdp - implements ndo_xdp for i40e
+ * @dev: netdevice
+ * @xdp: XDP command
+ **/
+static int i40e_xdp(struct net_device *dev,
+ struct netdev_xdp *xdp)
+{
+ struct i40e_netdev_priv *np = netdev_priv(dev);
+ struct i40e_vsi *vsi = np->vsi;
+
+ if (vsi->type != I40E_VSI_MAIN)
+ return -EINVAL;
+
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return i40e_xdp_setup(vsi, xdp->prog);
+ case XDP_QUERY_PROG:
+ xdp->prog_attached = i40e_enabled_xdp_vsi(vsi);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
static const struct net_device_ops i40e_netdev_ops = {
.ndo_open = i40e_open,
.ndo_stop = i40e_close,
@@ -9343,6 +9627,7 @@ static const struct net_device_ops i40e_netdev_ops = {
.ndo_features_check = i40e_features_check,
.ndo_bridge_getlink = i40e_ndo_bridge_getlink,
.ndo_bridge_setlink = i40e_ndo_bridge_setlink,
+ .ndo_xdp = i40e_xdp,
};
/**
@@ -9911,6 +10196,7 @@ vector_setup_out:
**/
static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
{
+ u16 alloc_queue_pairs;
struct i40e_pf *pf;
u8 enabled_tc;
int ret;
@@ -9929,11 +10215,14 @@ static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
if (ret)
goto err_vsi;
- ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx);
+ alloc_queue_pairs = vsi->alloc_queue_pairs *
+ (i40e_enabled_xdp_vsi(vsi) ? 2 : 1);
+
+ ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx);
if (ret < 0) {
dev_info(&pf->pdev->dev,
"failed to get tracking for %d queues for VSI %d err %d\n",
- vsi->alloc_queue_pairs, vsi->seid, ret);
+ alloc_queue_pairs, vsi->seid, ret);
goto err_vsi;
}
vsi->base_queue = ret;
@@ -9989,6 +10278,7 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
{
struct i40e_vsi *vsi = NULL;
struct i40e_veb *veb = NULL;
+ u16 alloc_queue_pairs;
int ret, i;
int v_idx;
@@ -10076,12 +10366,14 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
else if (type == I40E_VSI_SRIOV)
vsi->vf_id = param1;
/* assign it some queues */
- ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs,
- vsi->idx);
+ alloc_queue_pairs = vsi->alloc_queue_pairs *
+ (i40e_enabled_xdp_vsi(vsi) ? 2 : 1);
+
+ ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx);
if (ret < 0) {
dev_info(&pf->pdev->dev,
"failed to get tracking for %d queues for VSI %d err=%d\n",
- vsi->alloc_queue_pairs, vsi->seid, ret);
+ alloc_queue_pairs, vsi->seid, ret);
goto err_vsi;
}
vsi->base_queue = ret;
@@ -11099,6 +11391,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_pf_reset;
}
+ i40e_get_oem_version(hw);
/* provide nvm, fw, api versions */
dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s\n",
@@ -11611,11 +11904,8 @@ static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
}
/* shutdown all operations */
- if (!test_bit(__I40E_SUSPENDED, pf->state)) {
- rtnl_lock();
- i40e_prep_for_reset(pf, true);
- rtnl_unlock();
- }
+ if (!test_bit(__I40E_SUSPENDED, pf->state))
+ i40e_prep_for_reset(pf, false);
/* Request a slot reset */
return PCI_ERS_RESULT_NEED_RESET;
@@ -11681,9 +11971,7 @@ static void i40e_pci_error_resume(struct pci_dev *pdev)
if (test_bit(__I40E_SUSPENDED, pf->state))
return;
- rtnl_lock();
- i40e_handle_reset_warning(pf, true);
- rtnl_unlock();
+ i40e_handle_reset_warning(pf, false);
}
/**
@@ -11763,9 +12051,7 @@ static void i40e_shutdown(struct pci_dev *pdev)
if (pf->wol_en && (pf->flags & I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE))
i40e_enable_mc_magic_wake(pf);
- rtnl_lock();
- i40e_prep_for_reset(pf, true);
- rtnl_unlock();
+ i40e_prep_for_reset(pf, false);
wr32(hw, I40E_PFPM_APM,
(pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
@@ -11797,9 +12083,7 @@ static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)
if (pf->wol_en && (pf->flags & I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE))
i40e_enable_mc_magic_wake(pf);
- rtnl_lock();
- i40e_prep_for_reset(pf, true);
- rtnl_unlock();
+ i40e_prep_for_reset(pf, false);
wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
@@ -11845,9 +12129,7 @@ static int i40e_resume(struct pci_dev *pdev)
/* handling the reset will rebuild the device state */
if (test_and_clear_bit(__I40E_SUSPENDED, pf->state)) {
clear_bit(__I40E_DOWN, pf->state);
- rtnl_lock();
- i40e_reset_and_rebuild(pf, false, true);
- rtnl_unlock();
+ i40e_reset_and_rebuild(pf, false, false);
}
return 0;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index af554f3cda19..b936febc315a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -26,6 +26,7 @@
#include <linux/prefetch.h>
#include <net/busy_poll.h>
+#include <linux/bpf_trace.h>
#include "i40e.h"
#include "i40e_trace.h"
#include "i40e_prototype.h"
@@ -629,6 +630,8 @@ static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
if (tx_buffer->skb) {
if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
kfree(tx_buffer->raw_buf);
+ else if (ring_is_xdp(ring))
+ page_frag_free(tx_buffer->raw_buf);
else
dev_kfree_skb_any(tx_buffer->skb);
if (dma_unmap_len(tx_buffer, len))
@@ -770,8 +773,11 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
total_bytes += tx_buf->bytecount;
total_packets += tx_buf->gso_segs;
- /* free the skb */
- napi_consume_skb(tx_buf->skb, napi_budget);
+ /* free the skb/XDP data */
+ if (ring_is_xdp(tx_ring))
+ page_frag_free(tx_buf->raw_buf);
+ else
+ napi_consume_skb(tx_buf->skb, napi_budget);
/* unmap skb header data */
dma_unmap_single(tx_ring->dev,
@@ -847,6 +853,9 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
tx_ring->arm_wb = true;
}
+ if (ring_is_xdp(tx_ring))
+ return !!budget;
+
/* notify netdev of completed buffers */
netdev_tx_completed_queue(txring_txq(tx_ring),
total_packets, total_bytes);
@@ -1195,6 +1204,7 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
void i40e_free_rx_resources(struct i40e_ring *rx_ring)
{
i40e_clean_rx_ring(rx_ring);
+ rx_ring->xdp_prog = NULL;
kfree(rx_ring->rx_bi);
rx_ring->rx_bi = NULL;
@@ -1241,6 +1251,8 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
+ rx_ring->xdp_prog = rx_ring->vsi->xdp_prog;
+
return 0;
err:
kfree(rx_ring->rx_bi);
@@ -1593,6 +1605,7 @@ void i40e_process_skb_fields(struct i40e_ring *rx_ring,
* i40e_cleanup_headers - Correct empty headers
* @rx_ring: rx descriptor ring packet is being transacted on
* @skb: pointer to current skb being fixed
+ * @rx_desc: pointer to the EOP Rx descriptor
*
* Also address the case where we are pulling data in on pages only
* and as such no data is present in the skb header.
@@ -1602,8 +1615,25 @@ void i40e_process_skb_fields(struct i40e_ring *rx_ring,
*
* Returns true if an error was encountered and skb was freed.
**/
-static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb)
+static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb,
+ union i40e_rx_desc *rx_desc)
+
{
+ /* XDP packets use error pointer so abort at this point */
+ if (IS_ERR(skb))
+ return true;
+
+ /* ERR_MASK will only have valid bits if EOP set, and
+ * what we are doing here is actually checking
+ * I40E_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in
+ * the error field
+ */
+ if (unlikely(i40e_test_staterr(rx_desc,
+ BIT(I40E_RXD_QW1_ERROR_SHIFT)))) {
+ dev_kfree_skb_any(skb);
+ return true;
+ }
+
/* if eth_skb_pad returns an error the skb was freed */
if (eth_skb_pad(skb))
return true;
@@ -1776,7 +1806,7 @@ static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring,
* i40e_construct_skb - Allocate skb and populate it
* @rx_ring: rx descriptor ring to transact packets on
* @rx_buffer: rx buffer to pull data from
- * @size: size of buffer to add to skb
+ * @xdp: xdp_buff pointing to the data
*
* This function allocates an skb. It then populates it with the page
* data from the current receive descriptor, taking care to set up the
@@ -1784,9 +1814,9 @@ static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring,
*/
static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
struct i40e_rx_buffer *rx_buffer,
- unsigned int size)
+ struct xdp_buff *xdp)
{
- void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
+ unsigned int size = xdp->data_end - xdp->data;
#if (PAGE_SIZE < 8192)
unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
#else
@@ -1796,9 +1826,9 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
struct sk_buff *skb;
/* prefetch first cache line of first page */
- prefetch(va);
+ prefetch(xdp->data);
#if L1_CACHE_BYTES < 128
- prefetch(va + L1_CACHE_BYTES);
+ prefetch(xdp->data + L1_CACHE_BYTES);
#endif
/* allocate a skb to store the frags */
@@ -1811,10 +1841,11 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
/* Determine available headroom for copy */
headlen = size;
if (headlen > I40E_RX_HDR_SIZE)
- headlen = eth_get_headlen(va, I40E_RX_HDR_SIZE);
+ headlen = eth_get_headlen(xdp->data, I40E_RX_HDR_SIZE);
/* align pull length to size of long to optimize memcpy performance */
- memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
+ memcpy(__skb_put(skb, headlen), xdp->data,
+ ALIGN(headlen, sizeof(long)));
/* update all of the pointers */
size -= headlen;
@@ -1841,16 +1872,16 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
* i40e_build_skb - Build skb around an existing buffer
* @rx_ring: Rx descriptor ring to transact packets on
* @rx_buffer: Rx buffer to pull data from
- * @size: size of buffer to add to skb
+ * @xdp: xdp_buff pointing to the data
*
* This function builds an skb around an existing Rx buffer, taking care
* to set up the skb correctly and avoid any memcpy overhead.
*/
static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
struct i40e_rx_buffer *rx_buffer,
- unsigned int size)
+ struct xdp_buff *xdp)
{
- void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
+ unsigned int size = xdp->data_end - xdp->data;
#if (PAGE_SIZE < 8192)
unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
#else
@@ -1860,12 +1891,12 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
struct sk_buff *skb;
/* prefetch first cache line of first page */
- prefetch(va);
+ prefetch(xdp->data);
#if L1_CACHE_BYTES < 128
- prefetch(va + L1_CACHE_BYTES);
+ prefetch(xdp->data + L1_CACHE_BYTES);
#endif
/* build an skb around the page buffer */
- skb = build_skb(va - I40E_SKB_PAD, truesize);
+ skb = build_skb(xdp->data_hard_start, truesize);
if (unlikely(!skb))
return NULL;
@@ -1944,6 +1975,75 @@ static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
return true;
}
+#define I40E_XDP_PASS 0
+#define I40E_XDP_CONSUMED 1
+#define I40E_XDP_TX 2
+
+static int i40e_xmit_xdp_ring(struct xdp_buff *xdp,
+ struct i40e_ring *xdp_ring);
+
+/**
+ * i40e_run_xdp - run an XDP program
+ * @rx_ring: Rx ring being processed
+ * @xdp: XDP buffer containing the frame
+ **/
+static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring,
+ struct xdp_buff *xdp)
+{
+ int result = I40E_XDP_PASS;
+ struct i40e_ring *xdp_ring;
+ struct bpf_prog *xdp_prog;
+ u32 act;
+
+ rcu_read_lock();
+ xdp_prog = READ_ONCE(rx_ring->xdp_prog);
+
+ if (!xdp_prog)
+ goto xdp_out;
+
+ act = bpf_prog_run_xdp(xdp_prog, xdp);
+ switch (act) {
+ case XDP_PASS:
+ break;
+ case XDP_TX:
+ xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
+ result = i40e_xmit_xdp_ring(xdp, xdp_ring);
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(act);
+ case XDP_ABORTED:
+ trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
+ /* fallthrough -- handle aborts by dropping packet */
+ case XDP_DROP:
+ result = I40E_XDP_CONSUMED;
+ break;
+ }
+xdp_out:
+ rcu_read_unlock();
+ return ERR_PTR(-result);
+}
+
+/**
+ * i40e_rx_buffer_flip - adjusted rx_buffer to point to an unused region
+ * @rx_ring: Rx ring
+ * @rx_buffer: Rx buffer to adjust
+ * @size: Size of adjustment
+ **/
+static void i40e_rx_buffer_flip(struct i40e_ring *rx_ring,
+ struct i40e_rx_buffer *rx_buffer,
+ unsigned int size)
+{
+#if (PAGE_SIZE < 8192)
+ unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
+
+ rx_buffer->page_offset ^= truesize;
+#else
+ unsigned int truesize = SKB_DATA_ALIGN(i40e_rx_offset(rx_ring) + size);
+
+ rx_buffer->page_offset += truesize;
+#endif
+}
+
/**
* i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
* @rx_ring: rx descriptor ring to transact packets on
@@ -1961,11 +2061,12 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
struct sk_buff *skb = rx_ring->skb;
u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
- bool failure = false;
+ bool failure = false, xdp_xmit = false;
while (likely(total_rx_packets < budget)) {
struct i40e_rx_buffer *rx_buffer;
union i40e_rx_desc *rx_desc;
+ struct xdp_buff xdp;
unsigned int size;
u16 vlan_tag;
u8 rx_ptype;
@@ -2006,12 +2107,32 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
rx_buffer = i40e_get_rx_buffer(rx_ring, size);
/* retrieve a buffer from the ring */
- if (skb)
+ if (!skb) {
+ xdp.data = page_address(rx_buffer->page) +
+ rx_buffer->page_offset;
+ xdp.data_hard_start = xdp.data -
+ i40e_rx_offset(rx_ring);
+ xdp.data_end = xdp.data + size;
+
+ skb = i40e_run_xdp(rx_ring, &xdp);
+ }
+
+ if (IS_ERR(skb)) {
+ if (PTR_ERR(skb) == -I40E_XDP_TX) {
+ xdp_xmit = true;
+ i40e_rx_buffer_flip(rx_ring, rx_buffer, size);
+ } else {
+ rx_buffer->pagecnt_bias++;
+ }
+ total_rx_bytes += size;
+ total_rx_packets++;
+ } else if (skb) {
i40e_add_rx_frag(rx_ring, rx_buffer, skb, size);
- else if (ring_uses_build_skb(rx_ring))
- skb = i40e_build_skb(rx_ring, rx_buffer, size);
- else
- skb = i40e_construct_skb(rx_ring, rx_buffer, size);
+ } else if (ring_uses_build_skb(rx_ring)) {
+ skb = i40e_build_skb(rx_ring, rx_buffer, &xdp);
+ } else {
+ skb = i40e_construct_skb(rx_ring, rx_buffer, &xdp);
+ }
/* exit if we failed to retrieve a buffer */
if (!skb) {
@@ -2026,18 +2147,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
if (i40e_is_non_eop(rx_ring, rx_desc, skb))
continue;
- /* ERR_MASK will only have valid bits if EOP set, and
- * what we are doing here is actually checking
- * I40E_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in
- * the error field
- */
- if (unlikely(i40e_test_staterr(rx_desc, BIT(I40E_RXD_QW1_ERROR_SHIFT)))) {
- dev_kfree_skb_any(skb);
- skb = NULL;
- continue;
- }
-
- if (i40e_cleanup_headers(rx_ring, skb)) {
+ if (i40e_cleanup_headers(rx_ring, skb, rx_desc)) {
skb = NULL;
continue;
}
@@ -2063,6 +2173,19 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
total_rx_packets++;
}
+ if (xdp_xmit) {
+ struct i40e_ring *xdp_ring;
+
+ xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
+
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch.
+ */
+ wmb();
+
+ writel(xdp_ring->next_to_use, xdp_ring->tail);
+ }
+
rx_ring->skb = skb;
u64_stats_update_begin(&rx_ring->syncp);
@@ -3120,6 +3243,59 @@ dma_error:
}
/**
+ * i40e_xmit_xdp_ring - transmits an XDP buffer to an XDP Tx ring
+ * @xdp: data to transmit
+ * @xdp_ring: XDP Tx ring
+ **/
+static int i40e_xmit_xdp_ring(struct xdp_buff *xdp,
+ struct i40e_ring *xdp_ring)
+{
+ u32 size = xdp->data_end - xdp->data;
+ u16 i = xdp_ring->next_to_use;
+ struct i40e_tx_buffer *tx_bi;
+ struct i40e_tx_desc *tx_desc;
+ dma_addr_t dma;
+
+ if (!unlikely(I40E_DESC_UNUSED(xdp_ring))) {
+ xdp_ring->tx_stats.tx_busy++;
+ return I40E_XDP_CONSUMED;
+ }
+
+ dma = dma_map_single(xdp_ring->dev, xdp->data, size, DMA_TO_DEVICE);
+ if (dma_mapping_error(xdp_ring->dev, dma))
+ return I40E_XDP_CONSUMED;
+
+ tx_bi = &xdp_ring->tx_bi[i];
+ tx_bi->bytecount = size;
+ tx_bi->gso_segs = 1;
+ tx_bi->raw_buf = xdp->data;
+
+ /* record length, and DMA address */
+ dma_unmap_len_set(tx_bi, len, size);
+ dma_unmap_addr_set(tx_bi, dma, dma);
+
+ tx_desc = I40E_TX_DESC(xdp_ring, i);
+ tx_desc->buffer_addr = cpu_to_le64(dma);
+ tx_desc->cmd_type_offset_bsz = build_ctob(I40E_TX_DESC_CMD_ICRC
+ | I40E_TXD_CMD,
+ 0, size, 0);
+
+ /* Make certain all of the status bits have been updated
+ * before next_to_watch is written.
+ */
+ smp_wmb();
+
+ i++;
+ if (i == xdp_ring->count)
+ i = 0;
+
+ tx_bi->next_to_watch = tx_desc;
+ xdp_ring->next_to_use = i;
+
+ return I40E_XDP_TX;
+}
+
+/**
* i40e_xmit_frame_ring - Sends buffer on Tx ring
* @skb: send buffer
* @tx_ring: ring to send buffer on
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index f5de51124cae..b288d58313a6 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -360,6 +360,7 @@ struct i40e_ring {
void *desc; /* Descriptor ring memory */
struct device *dev; /* Used for DMA mapping */
struct net_device *netdev; /* netdev ring maps to */
+ struct bpf_prog *xdp_prog;
union {
struct i40e_tx_buffer *tx_bi;
struct i40e_rx_buffer *rx_bi;
@@ -395,6 +396,7 @@ struct i40e_ring {
u16 flags;
#define I40E_TXR_FLAGS_WB_ON_ITR BIT(0)
#define I40E_RXR_FLAGS_BUILD_SKB_ENABLED BIT(1)
+#define I40E_TXR_FLAGS_XDP BIT(2)
/* stats structs */
struct i40e_queue_stats stats;
@@ -437,6 +439,16 @@ static inline void clear_ring_build_skb_enabled(struct i40e_ring *ring)
ring->flags &= ~I40E_RXR_FLAGS_BUILD_SKB_ENABLED;
}
+static inline bool ring_is_xdp(struct i40e_ring *ring)
+{
+ return !!(ring->flags & I40E_TXR_FLAGS_XDP);
+}
+
+static inline void set_ring_xdp(struct i40e_ring *ring)
+{
+ ring->flags |= I40E_TXR_FLAGS_XDP;
+}
+
enum i40e_latency_range {
I40E_LOWEST_LATENCY = 0,
I40E_LOW_LATENCY = 1,
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
index 91d8786d386d..83e63e55c4b4 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
+ * Copyright(c) 2013 - 2017 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -528,7 +528,7 @@ struct i40e_aqc_mac_address_read {
#define I40E_AQC_PORT_ADDR_VALID 0x40
#define I40E_AQC_WOL_ADDR_VALID 0x80
#define I40E_AQC_MC_MAG_EN_VALID 0x100
-#define I40E_AQC_ADDR_VALID_MASK 0x1F0
+#define I40E_AQC_ADDR_VALID_MASK 0x3F0
u8 reserved[6];
__le32 addr_high;
__le32 addr_low;
@@ -586,6 +586,7 @@ struct i40e_aqc_set_wol_filter {
__le16 cmd_flags;
#define I40E_AQC_SET_WOL_FILTER 0x8000
#define I40E_AQC_SET_WOL_FILTER_NO_TCO_WOL 0x4000
+#define I40E_AQC_SET_WOL_FILTER_WOL_PRESERVE_ON_PFR 0x2000
#define I40E_AQC_SET_WOL_FILTER_ACTION_CLEAR 0
#define I40E_AQC_SET_WOL_FILTER_ACTION_SET 1
__le16 valid_flags;
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index 3a3ca965b242..7c213a347909 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -1198,6 +1198,7 @@ static void i40evf_free_queues(struct i40evf_adapter *adapter)
{
if (!adapter->vsi_res)
return;
+ adapter->num_active_queues = 0;
kfree(adapter->tx_rings);
adapter->tx_rings = NULL;
kfree(adapter->rx_rings);
@@ -1214,18 +1215,22 @@ static void i40evf_free_queues(struct i40evf_adapter *adapter)
**/
static int i40evf_alloc_queues(struct i40evf_adapter *adapter)
{
- int i;
+ int i, num_active_queues;
+
+ num_active_queues = min_t(int,
+ adapter->vsi_res->num_queue_pairs,
+ (int)(num_online_cpus()));
- adapter->tx_rings = kcalloc(adapter->num_active_queues,
+ adapter->tx_rings = kcalloc(num_active_queues,
sizeof(struct i40e_ring), GFP_KERNEL);
if (!adapter->tx_rings)
goto err_out;
- adapter->rx_rings = kcalloc(adapter->num_active_queues,
+ adapter->rx_rings = kcalloc(num_active_queues,
sizeof(struct i40e_ring), GFP_KERNEL);
if (!adapter->rx_rings)
goto err_out;
- for (i = 0; i < adapter->num_active_queues; i++) {
+ for (i = 0; i < num_active_queues; i++) {
struct i40e_ring *tx_ring;
struct i40e_ring *rx_ring;
@@ -1247,6 +1252,8 @@ static int i40evf_alloc_queues(struct i40evf_adapter *adapter)
rx_ring->rx_itr_setting = (I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF);
}
+ adapter->num_active_queues = num_active_queues;
+
return 0;
err_out:
@@ -2636,9 +2643,6 @@ static void i40evf_init_task(struct work_struct *work)
adapter->watchdog_timer.data = (unsigned long)adapter;
mod_timer(&adapter->watchdog_timer, jiffies + 1);
- adapter->num_active_queues = min_t(int,
- adapter->vsi_res->num_queue_pairs,
- (int)(num_online_cpus()));
adapter->tx_desc_count = I40EVF_DEFAULT_TXD;
adapter->rx_desc_count = I40EVF_DEFAULT_RXD;
err = i40evf_init_interrupt_scheme(adapter);
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index ca4b55c60682..48d21c1e09f2 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -3917,17 +3917,6 @@ static void *mvpp2_buf_alloc(struct mvpp2_port *port,
return data;
}
-/* Set pool number in a BM cookie */
-static inline u32 mvpp2_bm_cookie_pool_set(u32 cookie, int pool)
-{
- u32 bm;
-
- bm = cookie & ~(0xFF << MVPP2_BM_COOKIE_POOL_OFFS);
- bm |= ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS);
-
- return bm;
-}
-
/* Release buffer to BM */
static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
dma_addr_t buf_dma_addr,
@@ -3964,14 +3953,6 @@ static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
put_cpu();
}
-/* Refill BM pool */
-static void mvpp2_pool_refill(struct mvpp2_port *port, int pool,
- dma_addr_t dma_addr,
- phys_addr_t phys_addr)
-{
- mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
-}
-
/* Allocate buffers for the pool */
static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
struct mvpp2_bm_pool *bm_pool, int buf_num)
@@ -4162,7 +4143,10 @@ static inline void mvpp2_interrupts_disable(struct mvpp2_port *port)
MVPP2_ISR_DISABLE_INTERRUPT(cpu_mask));
}
-/* Mask the current CPU's Rx/Tx interrupts */
+/* Mask the current CPU's Rx/Tx interrupts
+ * Called by on_each_cpu(), guaranteed to run with migration disabled,
+ * using smp_processor_id() is OK.
+ */
static void mvpp2_interrupts_mask(void *arg)
{
struct mvpp2_port *port = arg;
@@ -4171,7 +4155,10 @@ static void mvpp2_interrupts_mask(void *arg)
MVPP2_ISR_RX_TX_MASK_REG(port->id), 0);
}
-/* Unmask the current CPU's Rx/Tx interrupts */
+/* Unmask the current CPU's Rx/Tx interrupts.
+ * Called by on_each_cpu(), guaranteed to run with migration disabled,
+ * using smp_processor_id() is OK.
+ */
static void mvpp2_interrupts_unmask(void *arg)
{
struct mvpp2_port *port = arg;
@@ -4554,7 +4541,11 @@ mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq)
return txq->descs + tx_desc;
}
-/* Update HW with number of aggregated Tx descriptors to be sent */
+/* Update HW with number of aggregated Tx descriptors to be sent
+ *
+ * Called only from mvpp2_tx(), so migration is disabled, using
+ * smp_processor_id() is OK.
+ */
static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending)
{
/* aggregated access - relevant TXQ number is written in TX desc */
@@ -4565,6 +4556,9 @@ static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending)
/* Check if there are enough free descriptors in aggregated txq.
* If not, update the number of occupied descriptors and repeat the check.
+ *
+ * Called only from mvpp2_tx(), so migration is disabled, using
+ * smp_processor_id() is OK.
*/
static int mvpp2_aggr_desc_num_check(struct mvpp2 *priv,
struct mvpp2_tx_queue *aggr_txq, int num)
@@ -4583,7 +4577,12 @@ static int mvpp2_aggr_desc_num_check(struct mvpp2 *priv,
return 0;
}
-/* Reserved Tx descriptors allocation request */
+/* Reserved Tx descriptors allocation request
+ *
+ * Called only from mvpp2_txq_reserved_desc_num_proc(), itself called
+ * only by mvpp2_tx(), so migration is disabled, using
+ * smp_processor_id() is OK.
+ */
static int mvpp2_txq_alloc_reserved_desc(struct mvpp2 *priv,
struct mvpp2_tx_queue *txq, int num)
{
@@ -4687,6 +4686,10 @@ static u32 mvpp2_txq_desc_csum(int l3_offs, int l3_proto,
/* Get number of sent descriptors and decrement counter.
* The number of sent descriptors is returned.
* Per-CPU access
+ *
+ * Called only from mvpp2_txq_done(), called from mvpp2_tx()
+ * (migration disabled) and from the TX completion tasklet (migration
+ * disabled) so using smp_processor_id() is OK.
*/
static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port,
struct mvpp2_tx_queue *txq)
@@ -4701,6 +4704,9 @@ static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port,
MVPP2_TRANSMITTED_COUNT_OFFSET;
}
+/* Called through on_each_cpu(), so runs on all CPUs, with migration
+ * disabled, therefore using smp_processor_id() is OK.
+ */
static void mvpp2_txq_sent_counter_clear(void *arg)
{
struct mvpp2_port *port = arg;
@@ -5001,7 +5007,7 @@ static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port,
pool = (status & MVPP2_RXD_BM_POOL_ID_MASK) >>
MVPP2_RXD_BM_POOL_ID_OFFS;
- mvpp2_pool_refill(port, pool,
+ mvpp2_bm_pool_put(port, pool,
mvpp2_rxdesc_dma_addr_get(port, rx_desc),
mvpp2_rxdesc_cookie_get(port, rx_desc));
}
@@ -5455,7 +5461,7 @@ static int mvpp2_rx_refill(struct mvpp2_port *port,
if (!buf)
return -ENOMEM;
- mvpp2_pool_refill(port, pool, dma_addr, phys_addr);
+ mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
return 0;
}
@@ -5539,7 +5545,7 @@ err_drop_frame:
dev->stats.rx_errors++;
mvpp2_rx_error(port, rx_desc);
/* Return the buffer to the pool */
- mvpp2_pool_refill(port, pool, dma_addr, phys_addr);
+ mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
continue;
}
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 962975d192d1..adaaafc20532 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -221,6 +221,9 @@ static void mtk_phy_link_adjust(struct net_device *dev)
netif_carrier_on(dev);
else
netif_carrier_off(dev);
+
+ if (!of_phy_is_fixed_link(mac->of_node))
+ phy_print_status(dev->phydev);
}
static int mtk_phy_connect_node(struct mtk_eth *eth, struct mtk_mac *mac,
@@ -369,28 +372,48 @@ static void mtk_mdio_cleanup(struct mtk_eth *eth)
mdiobus_unregister(eth->mii_bus);
}
-static inline void mtk_irq_disable(struct mtk_eth *eth,
- unsigned reg, u32 mask)
+static inline void mtk_tx_irq_disable(struct mtk_eth *eth, u32 mask)
+{
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&eth->tx_irq_lock, flags);
+ val = mtk_r32(eth, MTK_QDMA_INT_MASK);
+ mtk_w32(eth, val & ~mask, MTK_QDMA_INT_MASK);
+ spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
+}
+
+static inline void mtk_tx_irq_enable(struct mtk_eth *eth, u32 mask)
{
unsigned long flags;
u32 val;
- spin_lock_irqsave(&eth->irq_lock, flags);
- val = mtk_r32(eth, reg);
- mtk_w32(eth, val & ~mask, reg);
- spin_unlock_irqrestore(&eth->irq_lock, flags);
+ spin_lock_irqsave(&eth->tx_irq_lock, flags);
+ val = mtk_r32(eth, MTK_QDMA_INT_MASK);
+ mtk_w32(eth, val | mask, MTK_QDMA_INT_MASK);
+ spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
}
-static inline void mtk_irq_enable(struct mtk_eth *eth,
- unsigned reg, u32 mask)
+static inline void mtk_rx_irq_disable(struct mtk_eth *eth, u32 mask)
{
unsigned long flags;
u32 val;
- spin_lock_irqsave(&eth->irq_lock, flags);
- val = mtk_r32(eth, reg);
- mtk_w32(eth, val | mask, reg);
- spin_unlock_irqrestore(&eth->irq_lock, flags);
+ spin_lock_irqsave(&eth->rx_irq_lock, flags);
+ val = mtk_r32(eth, MTK_PDMA_INT_MASK);
+ mtk_w32(eth, val & ~mask, MTK_PDMA_INT_MASK);
+ spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
+}
+
+static inline void mtk_rx_irq_enable(struct mtk_eth *eth, u32 mask)
+{
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&eth->rx_irq_lock, flags);
+ val = mtk_r32(eth, MTK_PDMA_INT_MASK);
+ mtk_w32(eth, val | mask, MTK_PDMA_INT_MASK);
+ spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
}
static int mtk_set_mac_address(struct net_device *dev, void *p)
@@ -969,6 +992,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
RX_DMA_VID(trxd.rxd3))
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
RX_DMA_VID(trxd.rxd3));
+ skb_record_rx_queue(skb, 0);
napi_gro_receive(napi, skb);
ring->data[idx] = new_data;
@@ -1098,7 +1122,7 @@ static int mtk_napi_tx(struct napi_struct *napi, int budget)
return budget;
napi_complete(napi);
- mtk_irq_enable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
+ mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
return tx_done;
}
@@ -1132,7 +1156,7 @@ poll_again:
goto poll_again;
}
napi_complete(napi);
- mtk_irq_enable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
+ mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
return rx_done + budget - remain_budget;
}
@@ -1667,7 +1691,7 @@ static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
if (likely(napi_schedule_prep(&eth->rx_napi))) {
__napi_schedule(&eth->rx_napi);
- mtk_irq_disable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
+ mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
}
return IRQ_HANDLED;
@@ -1679,7 +1703,7 @@ static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
if (likely(napi_schedule_prep(&eth->tx_napi))) {
__napi_schedule(&eth->tx_napi);
- mtk_irq_disable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
+ mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
}
return IRQ_HANDLED;
@@ -1691,11 +1715,11 @@ static void mtk_poll_controller(struct net_device *dev)
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_eth *eth = mac->hw;
- mtk_irq_disable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
- mtk_irq_disable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
+ mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
+ mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
mtk_handle_irq_rx(eth->irq[2], dev);
- mtk_irq_enable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
- mtk_irq_enable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
+ mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
+ mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
}
#endif
@@ -1736,8 +1760,8 @@ static int mtk_open(struct net_device *dev)
napi_enable(&eth->tx_napi);
napi_enable(&eth->rx_napi);
- mtk_irq_enable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
- mtk_irq_enable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
+ mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
+ mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
}
atomic_inc(&eth->dma_refcnt);
@@ -1782,8 +1806,8 @@ static int mtk_stop(struct net_device *dev)
if (!atomic_dec_and_test(&eth->dma_refcnt))
return 0;
- mtk_irq_disable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
- mtk_irq_disable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
+ mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
+ mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
napi_disable(&eth->tx_napi);
napi_disable(&eth->rx_napi);
@@ -1858,11 +1882,13 @@ static int mtk_hw_init(struct mtk_eth *eth)
/* Enable RX VLan Offloading */
mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
+ /* enable interrupt delay for RX */
+ mtk_w32(eth, MTK_PDMA_DELAY_RX_DELAY, MTK_PDMA_DELAY_INT);
+
/* disable delay and normal interrupt */
mtk_w32(eth, 0, MTK_QDMA_DELAY_INT);
- mtk_w32(eth, 0, MTK_PDMA_DELAY_INT);
- mtk_irq_disable(eth, MTK_QDMA_INT_MASK, ~0);
- mtk_irq_disable(eth, MTK_PDMA_INT_MASK, ~0);
+ mtk_tx_irq_disable(eth, ~0);
+ mtk_rx_irq_disable(eth, ~0);
mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
mtk_w32(eth, 0, MTK_RST_GL);
@@ -1933,8 +1959,8 @@ static void mtk_uninit(struct net_device *dev)
phy_disconnect(dev->phydev);
if (of_phy_is_fixed_link(mac->of_node))
of_phy_deregister_fixed_link(mac->of_node);
- mtk_irq_disable(eth, MTK_QDMA_INT_MASK, ~0);
- mtk_irq_disable(eth, MTK_PDMA_INT_MASK, ~0);
+ mtk_tx_irq_disable(eth, ~0);
+ mtk_rx_irq_disable(eth, ~0);
}
static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
@@ -2394,7 +2420,8 @@ static int mtk_probe(struct platform_device *pdev)
return PTR_ERR(eth->base);
spin_lock_init(&eth->page_lock);
- spin_lock_init(&eth->irq_lock);
+ spin_lock_init(&eth->tx_irq_lock);
+ spin_lock_init(&eth->rx_irq_lock);
eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
"mediatek,ethsys");
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index 3c46a3b613b9..5868a09f623a 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -125,7 +125,14 @@
#define MTK_PST_DRX_IDX_CFG(x) (MTK_PST_DRX_IDX0 << (x))
/* PDMA Delay Interrupt Register */
-#define MTK_PDMA_DELAY_INT 0xa0c
+#define MTK_PDMA_DELAY_INT 0xa0c
+#define MTK_PDMA_DELAY_RX_EN BIT(15)
+#define MTK_PDMA_DELAY_RX_PINT 4
+#define MTK_PDMA_DELAY_RX_PINT_SHIFT 8
+#define MTK_PDMA_DELAY_RX_PTIME 4
+#define MTK_PDMA_DELAY_RX_DELAY \
+ (MTK_PDMA_DELAY_RX_EN | MTK_PDMA_DELAY_RX_PTIME | \
+ (MTK_PDMA_DELAY_RX_PINT << MTK_PDMA_DELAY_RX_PINT_SHIFT))
/* PDMA Interrupt Status Register */
#define MTK_PDMA_INT_STATUS 0xa20
@@ -206,6 +213,7 @@
/* QDMA Interrupt Status Register */
#define MTK_QMTK_INT_STATUS 0x1A18
+#define MTK_RX_DONE_DLY BIT(30)
#define MTK_RX_DONE_INT3 BIT(19)
#define MTK_RX_DONE_INT2 BIT(18)
#define MTK_RX_DONE_INT1 BIT(17)
@@ -214,8 +222,7 @@
#define MTK_TX_DONE_INT2 BIT(2)
#define MTK_TX_DONE_INT1 BIT(1)
#define MTK_TX_DONE_INT0 BIT(0)
-#define MTK_RX_DONE_INT (MTK_RX_DONE_INT0 | MTK_RX_DONE_INT1 | \
- MTK_RX_DONE_INT2 | MTK_RX_DONE_INT3)
+#define MTK_RX_DONE_INT MTK_RX_DONE_DLY
#define MTK_TX_DONE_INT (MTK_TX_DONE_INT0 | MTK_TX_DONE_INT1 | \
MTK_TX_DONE_INT2 | MTK_TX_DONE_INT3)
@@ -512,6 +519,8 @@ struct mtk_rx_ring {
* @dev: The device pointer
* @base: The mapped register i/o base
* @page_lock: Make sure that register operations are atomic
+ * @tx_irq__lock: Make sure that IRQ register operations are atomic
+ * @rx_irq__lock: Make sure that IRQ register operations are atomic
* @dummy_dev: we run 2 netdevs on 1 physical DMA ring and need a
* dummy for NAPI to work
* @netdev: The netdev instances
@@ -540,7 +549,8 @@ struct mtk_eth {
struct device *dev;
void __iomem *base;
spinlock_t page_lock;
- spinlock_t irq_lock;
+ spinlock_t tx_irq_lock;
+ spinlock_t rx_irq_lock;
struct net_device dummy_dev;
struct net_device *netdev[MTK_MAX_DEVS];
struct mtk_mac *mac[MTK_MAX_DEVS];
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index c1af47e45d3f..674773b28b2e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -3280,7 +3280,7 @@ int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_stat
if (mlx4_master_immediate_activate_vlan_qos(priv, slave, port))
mlx4_dbg(dev,
- "updating vf %d port %d no link state HW enforcment\n",
+ "updating vf %d port %d no link state HW enforcement\n",
vf, port);
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
index 1dae8e40fb25..5f41dc92aa68 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
@@ -238,7 +238,7 @@ static u8 mlx4_en_dcbnl_set_state(struct net_device *dev, u8 state)
priv->flags &= ~MLX4_EN_FLAG_DCB_ENABLED;
}
- if (mlx4_en_setup_tc(dev, num_tcs))
+ if (mlx4_en_alloc_tx_queue_per_tc(dev, num_tcs))
return 1;
return 0;
@@ -303,7 +303,7 @@ static int mlx4_en_ets_validate(struct mlx4_en_priv *priv, struct ieee_ets *ets)
int has_ets_tc = 0;
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
- if (ets->prio_tc[i] >= MLX4_EN_NUM_UP) {
+ if (ets->prio_tc[i] >= MLX4_EN_NUM_UP_HIGH) {
en_err(priv, "Bad priority in UP <=> TC mapping. TC: %d, UP: %d\n",
i, ets->prio_tc[i]);
return -EINVAL;
@@ -472,7 +472,7 @@ static u8 mlx4_en_dcbnl_setdcbx(struct net_device *dev, u8 mode)
goto err;
if (mlx4_en_dcbnl_ieee_setpfc(dev, &pfc))
goto err;
- if (mlx4_en_setup_tc(dev, 0))
+ if (mlx4_en_alloc_tx_queue_per_tc(dev, 0))
goto err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index e97fbf327594..c751a1d434ad 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -1750,7 +1750,8 @@ static void mlx4_en_get_channels(struct net_device *dev,
channel->max_tx = MLX4_EN_MAX_TX_RING_P_UP;
channel->rx_count = priv->rx_ring_num;
- channel->tx_count = priv->tx_ring_num[TX] / MLX4_EN_NUM_UP;
+ channel->tx_count = priv->tx_ring_num[TX] /
+ priv->prof->num_up;
}
static int mlx4_en_set_channels(struct net_device *dev,
@@ -1763,6 +1764,7 @@ static int mlx4_en_set_channels(struct net_device *dev,
int port_up = 0;
int xdp_count;
int err = 0;
+ u8 up;
if (!channel->tx_count || !channel->rx_count)
return -EINVAL;
@@ -1773,18 +1775,19 @@ static int mlx4_en_set_channels(struct net_device *dev,
mutex_lock(&mdev->state_lock);
xdp_count = priv->tx_ring_num[TX_XDP] ? channel->rx_count : 0;
- if (channel->tx_count * MLX4_EN_NUM_UP + xdp_count > MAX_TX_RINGS) {
+ if (channel->tx_count * priv->prof->num_up + xdp_count >
+ MAX_TX_RINGS) {
err = -EINVAL;
en_err(priv,
"Total number of TX and XDP rings (%d) exceeds the maximum supported (%d)\n",
- channel->tx_count * MLX4_EN_NUM_UP + xdp_count,
+ channel->tx_count * priv->prof->num_up + xdp_count,
MAX_TX_RINGS);
goto out;
}
memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
new_prof.num_tx_rings_p_up = channel->tx_count;
- new_prof.tx_ring_num[TX] = channel->tx_count * MLX4_EN_NUM_UP;
+ new_prof.tx_ring_num[TX] = channel->tx_count * priv->prof->num_up;
new_prof.tx_ring_num[TX_XDP] = xdp_count;
new_prof.rx_ring_num = channel->rx_count;
@@ -1799,11 +1802,11 @@ static int mlx4_en_set_channels(struct net_device *dev,
mlx4_en_safe_replace_resources(priv, tmp);
- netif_set_real_num_tx_queues(dev, priv->tx_ring_num[TX]);
netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
- if (netdev_get_num_tc(dev))
- mlx4_en_setup_tc(dev, MLX4_EN_NUM_UP);
+ up = (priv->prof->num_up == MLX4_EN_NUM_UP_LOW) ?
+ 0 : priv->prof->num_up;
+ mlx4_en_setup_tc(dev, up);
en_warn(priv, "Using %d TX rings\n", priv->tx_ring_num[TX]);
en_warn(priv, "Using %d RX rings\n", priv->rx_ring_num);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
index 56cdf38d150e..2b0cbca4beb5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -169,8 +169,10 @@ static int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
params->prof[i].tx_ppp = pfctx;
params->prof[i].tx_ring_size = MLX4_EN_DEF_TX_RING_SIZE;
params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE;
+ params->prof[i].num_up = MLX4_EN_NUM_UP_LOW;
+ params->prof[i].num_tx_rings_p_up = params->num_tx_rings_p_up;
params->prof[i].tx_ring_num[TX] = params->num_tx_rings_p_up *
- MLX4_EN_NUM_UP;
+ params->prof[i].num_up;
params->prof[i].rss_rings = 0;
params->prof[i].inline_thold = inline_thold;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 18252a79a074..3a291fc1780a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -60,11 +60,11 @@ int mlx4_en_setup_tc(struct net_device *dev, u8 up)
int i;
unsigned int offset = 0;
- if (up && up != MLX4_EN_NUM_UP)
+ if (up && up != MLX4_EN_NUM_UP_HIGH)
return -EINVAL;
netdev_set_num_tc(dev, up);
-
+ netif_set_real_num_tx_queues(dev, priv->tx_ring_num[TX]);
/* Partition Tx queues evenly amongst UP's */
for (i = 0; i < up; i++) {
netdev_set_tc_queue(dev, i, priv->num_tx_rings_p_up, offset);
@@ -86,6 +86,50 @@ int mlx4_en_setup_tc(struct net_device *dev, u8 up)
return 0;
}
+int mlx4_en_alloc_tx_queue_per_tc(struct net_device *dev, u8 tc)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_en_port_profile new_prof;
+ struct mlx4_en_priv *tmp;
+ int port_up = 0;
+ int err = 0;
+
+ tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ mutex_lock(&mdev->state_lock);
+ memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
+ new_prof.num_up = (tc == 0) ? MLX4_EN_NUM_UP_LOW :
+ MLX4_EN_NUM_UP_HIGH;
+ new_prof.tx_ring_num[TX] = new_prof.num_tx_rings_p_up *
+ new_prof.num_up;
+ err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, true);
+ if (err)
+ goto out;
+
+ if (priv->port_up) {
+ port_up = 1;
+ mlx4_en_stop_port(dev, 1);
+ }
+
+ mlx4_en_safe_replace_resources(priv, tmp);
+ if (port_up) {
+ err = mlx4_en_start_port(dev);
+ if (err) {
+ en_err(priv, "Failed starting port for setup TC\n");
+ goto out;
+ }
+ }
+
+ err = mlx4_en_setup_tc(dev, tc);
+out:
+ mutex_unlock(&mdev->state_lock);
+ kfree(tmp);
+ return err;
+}
+
static int __mlx4_en_setup_tc(struct net_device *dev, u32 handle,
u32 chain_index, __be16 proto,
struct tc_to_netdev *tc)
@@ -93,9 +137,12 @@ static int __mlx4_en_setup_tc(struct net_device *dev, u32 handle,
if (tc->type != TC_SETUP_MQPRIO)
return -EINVAL;
+ if (tc->mqprio->num_tc && tc->mqprio->num_tc != MLX4_EN_NUM_UP_HIGH)
+ return -EINVAL;
+
tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
- return mlx4_en_setup_tc(dev, tc->mqprio->num_tc);
+ return mlx4_en_alloc_tx_queue_per_tc(dev, tc->mqprio->num_tc);
}
#ifdef CONFIG_RFS_ACCEL
@@ -1352,7 +1399,7 @@ static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
priv->rx_usecs = MLX4_EN_RX_COAL_TIME;
priv->tx_frames = MLX4_EN_TX_COAL_PKTS;
priv->tx_usecs = MLX4_EN_TX_COAL_TIME;
- en_dbg(INTR, priv, "Default coalesing params for mtu:%d - rx_frames:%d rx_usecs:%d\n",
+ en_dbg(INTR, priv, "Default coalescing params for mtu:%d - rx_frames:%d rx_usecs:%d\n",
priv->dev->mtu, priv->rx_frames, priv->rx_usecs);
/* Setup cq moderation params */
@@ -2144,7 +2191,7 @@ static int mlx4_en_copy_priv(struct mlx4_en_priv *dst,
memcpy(&dst->hwtstamp_config, &prof->hwtstamp_config,
sizeof(dst->hwtstamp_config));
- dst->num_tx_rings_p_up = src->mdev->profile.num_tx_rings_p_up;
+ dst->num_tx_rings_p_up = prof->num_tx_rings_p_up;
dst->rx_ring_num = prof->rx_ring_num;
dst->flags = prof->flags;
dst->mdev = src->mdev;
@@ -2197,6 +2244,7 @@ static void mlx4_en_update_priv(struct mlx4_en_priv *dst,
dst->tx_ring[t] = src->tx_ring[t];
dst->tx_cq[t] = src->tx_cq[t];
}
+ dst->num_tx_rings_p_up = src->num_tx_rings_p_up;
dst->rx_ring_num = src->rx_ring_num;
memcpy(dst->prof, src->prof, sizeof(struct mlx4_en_port_profile));
}
@@ -2780,7 +2828,7 @@ static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog)
if (priv->tx_ring_num[TX] + xdp_ring_num > MAX_TX_RINGS) {
tx_changed = 1;
new_prof.tx_ring_num[TX] =
- MAX_TX_RINGS - ALIGN(xdp_ring_num, MLX4_EN_NUM_UP);
+ MAX_TX_RINGS - ALIGN(xdp_ring_num, priv->prof->num_up);
en_warn(priv, "Reducing the number of TX rings, to not exceed the max total rings number.\n");
}
@@ -3271,7 +3319,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
priv->flags |= MLX4_EN_DCB_ENABLED;
priv->cee_config.pfc_state = false;
- for (i = 0; i < MLX4_EN_NUM_UP; i++)
+ for (i = 0; i < MLX4_EN_NUM_UP_HIGH; i++)
priv->cee_config.dcb_pfc[i] = pfc_disabled;
if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_resources.c b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
index a6b0db0e0383..86d2d42d658d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_resources.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
@@ -63,7 +63,8 @@ void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
context->local_qpn = cpu_to_be32(qpn);
context->pri_path.ackto = 1 & 0x07;
context->pri_path.sched_queue = 0x83 | (priv->port - 1) << 6;
- if (user_prio >= 0) {
+ /* force user priority per tx ring */
+ if (user_prio >= 0 && priv->prof->num_up == MLX4_EN_NUM_UP_HIGH) {
context->pri_path.sched_queue |= user_prio << 3;
context->pri_path.feup = MLX4_FEUP_FORCE_ETH_UP;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 7d69d939ee2d..4f3a9b27ce4a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -691,15 +691,11 @@ u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
{
struct mlx4_en_priv *priv = netdev_priv(dev);
u16 rings_p_up = priv->num_tx_rings_p_up;
- u8 up = 0;
if (netdev_get_num_tc(dev))
return skb_tx_hash(dev, skb);
- if (skb_vlan_tag_present(skb))
- up = skb_vlan_tag_get(skb) >> VLAN_PRIO_SHIFT;
-
- return fallback(dev, skb) % rings_p_up + up * rings_p_up;
+ return fallback(dev, skb) % rings_p_up;
}
static void mlx4_bf_copy(void __iomem *dst, const void *src,
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 963b77d51b48..d350b2158104 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -115,11 +115,12 @@
#define MLX4_EN_SMALL_PKT_SIZE 64
#define MLX4_EN_MIN_TX_RING_P_UP 1
#define MLX4_EN_MAX_TX_RING_P_UP 32
-#define MLX4_EN_NUM_UP 8
+#define MLX4_EN_NUM_UP_LOW 1
+#define MLX4_EN_NUM_UP_HIGH 8
#define MLX4_EN_DEF_RX_RING_SIZE 1024
#define MLX4_EN_DEF_TX_RING_SIZE MLX4_EN_DEF_RX_RING_SIZE
#define MAX_TX_RINGS (MLX4_EN_MAX_TX_RING_P_UP * \
- MLX4_EN_NUM_UP)
+ MLX4_EN_NUM_UP_HIGH)
#define MLX4_EN_DEFAULT_TX_WORK 256
@@ -386,6 +387,7 @@ struct mlx4_en_port_profile {
u8 rx_ppp;
u8 tx_pause;
u8 tx_ppp;
+ u8 num_up;
int rss_rings;
int inline_thold;
struct hwtstamp_config hwtstamp_config;
@@ -485,7 +487,7 @@ enum dcb_pfc_type {
struct mlx4_en_cee_config {
bool pfc_state;
- enum dcb_pfc_type dcb_pfc[MLX4_EN_NUM_UP];
+ enum dcb_pfc_type dcb_pfc[MLX4_EN_NUM_UP_HIGH];
};
#endif
@@ -761,6 +763,7 @@ extern const struct dcbnl_rtnl_ops mlx4_en_dcbnl_pfc_ops;
#endif
int mlx4_en_setup_tc(struct net_device *dev, u8 up);
+int mlx4_en_alloc_tx_queue_per_tc(struct net_device *dev, u8 tc);
#ifdef CONFIG_RFS_ACCEL
void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index cf1ef48bfd8d..5aee05992f27 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -11,9 +11,13 @@ config MLX5_CORE
Core driver for low level functionality of the ConnectX-4 and
Connect-IB cards by Mellanox Technologies.
+config MLX5_ACCEL
+ bool
+
config MLX5_FPGA
bool "Mellanox Technologies Innova support"
depends on MLX5_CORE
+ select MLX5_ACCEL
---help---
Build support for the Innova family of network cards by Mellanox
Technologies. Innova network cards are comprised of a ConnectX chip
@@ -48,3 +52,15 @@ config MLX5_CORE_IPOIB
default n
---help---
MLX5 IPoIB offloads & acceleration support.
+
+config MLX5_EN_IPSEC
+ bool "IPSec XFRM cryptography-offload accelaration"
+ depends on MLX5_ACCEL
+ depends on MLX5_CORE_EN
+ depends on XFRM_OFFLOAD
+ depends on INET_ESP_OFFLOAD || INET6_ESP_OFFLOAD
+ default n
+ ---help---
+ Build support for IPsec cryptography-offload accelaration in the NIC.
+ Note: Support for hardware with this capability needs to be selected
+ for this option to become available.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 12556c03eec4..ca367445f864 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -4,9 +4,12 @@ subdir-ccflags-y += -I$(src)
mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \
mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o \
- fs_counters.o rl.o lag.o dev.o
+ fs_counters.o rl.o lag.o dev.o lib/gid.o
-mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o
+mlx5_core-$(CONFIG_MLX5_ACCEL) += accel/ipsec.o
+
+mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o \
+ fpga/ipsec.o
mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o eswitch.o eswitch_offloads.o \
en_main.o en_common.o en_fs.o en_ethtool.o en_tx.o \
@@ -15,4 +18,7 @@ mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o eswitch.o eswitch_offloads.o \
mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o
-mlx5_core-$(CONFIG_MLX5_CORE_IPOIB) += ipoib.o
+mlx5_core-$(CONFIG_MLX5_CORE_IPOIB) += ipoib/ipoib.o ipoib/ethtool.o
+
+mlx5_core-$(CONFIG_MLX5_EN_IPSEC) += en_accel/ipsec.o en_accel/ipsec_rxtx.o \
+ en_accel/ipsec_stats.o
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c
new file mode 100644
index 000000000000..53e69edaedde
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/mlx5/device.h>
+
+#include "accel/ipsec.h"
+#include "mlx5_core.h"
+#include "fpga/ipsec.h"
+
+void *mlx5_accel_ipsec_sa_cmd_exec(struct mlx5_core_dev *mdev,
+ struct mlx5_accel_ipsec_sa *cmd)
+{
+ if (!MLX5_IPSEC_DEV(mdev))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ return mlx5_fpga_ipsec_sa_cmd_exec(mdev, cmd);
+}
+
+int mlx5_accel_ipsec_sa_cmd_wait(void *ctx)
+{
+ return mlx5_fpga_ipsec_sa_cmd_wait(ctx);
+}
+
+u32 mlx5_accel_ipsec_device_caps(struct mlx5_core_dev *mdev)
+{
+ return mlx5_fpga_ipsec_device_caps(mdev);
+}
+
+unsigned int mlx5_accel_ipsec_counters_count(struct mlx5_core_dev *mdev)
+{
+ return mlx5_fpga_ipsec_counters_count(mdev);
+}
+
+int mlx5_accel_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters,
+ unsigned int count)
+{
+ return mlx5_fpga_ipsec_counters_read(mdev, counters, count);
+}
+
+int mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev)
+{
+ return mlx5_fpga_ipsec_init(mdev);
+}
+
+void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev)
+{
+ mlx5_fpga_ipsec_cleanup(mdev);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h
new file mode 100644
index 000000000000..d6e20fea9554
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h
@@ -0,0 +1,138 @@
+/*
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef __MLX5_ACCEL_IPSEC_H__
+#define __MLX5_ACCEL_IPSEC_H__
+
+#ifdef CONFIG_MLX5_ACCEL
+
+#include <linux/mlx5/driver.h>
+
+enum {
+ MLX5_ACCEL_IPSEC_DEVICE = BIT(1),
+ MLX5_ACCEL_IPSEC_IPV6 = BIT(2),
+ MLX5_ACCEL_IPSEC_ESP = BIT(3),
+ MLX5_ACCEL_IPSEC_LSO = BIT(4),
+};
+
+#define MLX5_IPSEC_SADB_IP_AH BIT(7)
+#define MLX5_IPSEC_SADB_IP_ESP BIT(6)
+#define MLX5_IPSEC_SADB_SA_VALID BIT(5)
+#define MLX5_IPSEC_SADB_SPI_EN BIT(4)
+#define MLX5_IPSEC_SADB_DIR_SX BIT(3)
+#define MLX5_IPSEC_SADB_IPV6 BIT(2)
+
+enum {
+ MLX5_IPSEC_CMD_ADD_SA = 0,
+ MLX5_IPSEC_CMD_DEL_SA = 1,
+};
+
+enum mlx5_accel_ipsec_enc_mode {
+ MLX5_IPSEC_SADB_MODE_NONE = 0,
+ MLX5_IPSEC_SADB_MODE_AES_GCM_128_AUTH_128 = 1,
+ MLX5_IPSEC_SADB_MODE_AES_GCM_256_AUTH_128 = 3,
+};
+
+#define MLX5_IPSEC_DEV(mdev) (mlx5_accel_ipsec_device_caps(mdev) & \
+ MLX5_ACCEL_IPSEC_DEVICE)
+
+struct mlx5_accel_ipsec_sa {
+ __be32 cmd;
+ u8 key_enc[32];
+ u8 key_auth[32];
+ __be32 sip[4];
+ __be32 dip[4];
+ union {
+ struct {
+ __be32 reserved;
+ u8 salt_iv[8];
+ __be32 salt;
+ } __packed gcm;
+ struct {
+ u8 salt[16];
+ } __packed cbc;
+ };
+ __be32 spi;
+ __be32 sw_sa_handle;
+ __be16 tfclen;
+ u8 enc_mode;
+ u8 sip_masklen;
+ u8 dip_masklen;
+ u8 flags;
+ u8 reserved[2];
+} __packed;
+
+/**
+ * mlx5_accel_ipsec_sa_cmd_exec - Execute an IPSec SADB command
+ * @mdev: mlx5 device
+ * @cmd: command to execute
+ * May be called from atomic context. Returns context pointer, or error
+ * Caller must eventually call mlx5_accel_ipsec_sa_cmd_wait from non-atomic
+ * context, to cleanup the context pointer
+ */
+void *mlx5_accel_ipsec_sa_cmd_exec(struct mlx5_core_dev *mdev,
+ struct mlx5_accel_ipsec_sa *cmd);
+
+/**
+ * mlx5_accel_ipsec_sa_cmd_wait - Wait for command execution completion
+ * @context: Context pointer returned from call to mlx5_accel_ipsec_sa_cmd_exec
+ * Sleeps (killable) until command execution is complete.
+ * Returns the command result, or -EINTR if killed
+ */
+int mlx5_accel_ipsec_sa_cmd_wait(void *context);
+
+u32 mlx5_accel_ipsec_device_caps(struct mlx5_core_dev *mdev);
+
+unsigned int mlx5_accel_ipsec_counters_count(struct mlx5_core_dev *mdev);
+int mlx5_accel_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters,
+ unsigned int count);
+
+int mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev);
+void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev);
+
+#else
+
+#define MLX5_IPSEC_DEV(mdev) false
+
+static inline int mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev)
+{
+ return 0;
+}
+
+static inline void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev)
+{
+}
+
+#endif
+
+#endif /* __MLX5_ACCEL_IPSEC_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 4d5bd01f1ebb..f5a2c605749f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -307,6 +307,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT:
case MLX5_CMD_OP_DEALLOC_ENCAP_HEADER:
case MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT:
+ case MLX5_CMD_OP_FPGA_DESTROY_QP:
return MLX5_CMD_STAT_OK;
case MLX5_CMD_OP_QUERY_HCA_CAP:
@@ -419,6 +420,10 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
case MLX5_CMD_OP_ALLOC_ENCAP_HEADER:
case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
+ case MLX5_CMD_OP_FPGA_CREATE_QP:
+ case MLX5_CMD_OP_FPGA_MODIFY_QP:
+ case MLX5_CMD_OP_FPGA_QUERY_QP:
+ case MLX5_CMD_OP_FPGA_QUERY_QP_COUNTERS:
*status = MLX5_DRIVER_STATUS_ABORTED;
*synd = MLX5_DRIVER_SYND;
return -EIO;
@@ -585,6 +590,11 @@ const char *mlx5_command_str(int command)
MLX5_COMMAND_STR_CASE(DEALLOC_ENCAP_HEADER);
MLX5_COMMAND_STR_CASE(ALLOC_MODIFY_HEADER_CONTEXT);
MLX5_COMMAND_STR_CASE(DEALLOC_MODIFY_HEADER_CONTEXT);
+ MLX5_COMMAND_STR_CASE(FPGA_CREATE_QP);
+ MLX5_COMMAND_STR_CASE(FPGA_MODIFY_QP);
+ MLX5_COMMAND_STR_CASE(FPGA_QUERY_QP);
+ MLX5_COMMAND_STR_CASE(FPGA_QUERY_QP_COUNTERS);
+ MLX5_COMMAND_STR_CASE(FPGA_DESTROY_QP);
default: return "unknown command opcode";
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 8094e78292de..e1b7ddfecd01 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -52,8 +52,10 @@
#define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v)
-#define MLX5E_HW2SW_MTU(hwmtu) ((hwmtu) - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
-#define MLX5E_SW2HW_MTU(swmtu) ((swmtu) + (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
+#define MLX5E_ETH_HARD_MTU (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)
+
+#define MLX5E_HW2SW_MTU(priv, hwmtu) ((hwmtu) - ((priv)->hard_mtu))
+#define MLX5E_SW2HW_MTU(priv, swmtu) ((swmtu) + ((priv)->hard_mtu))
#define MLX5E_MAX_NUM_TC 8
@@ -70,6 +72,8 @@
#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW 0x6
#define MLX5_RX_HEADROOM NET_SKB_PAD
+#define MLX5_SKB_FRAG_SZ(len) (SKB_DATA_ALIGN(len) + \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
#define MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev) \
(6 + MLX5_CAP_GEN(mdev, cache_line_128byte)) /* HW restriction */
@@ -213,6 +217,7 @@ struct mlx5e_cq_moder {
struct mlx5e_params {
u8 log_sq_size;
u8 rq_wq_type;
+ u16 rq_headroom;
u8 mpwqe_log_stride_sz;
u8 mpwqe_log_num_strides;
u8 log_rq_size;
@@ -323,6 +328,7 @@ struct mlx5e_sq_dma {
enum {
MLX5E_SQ_STATE_ENABLED,
+ MLX5E_SQ_STATE_IPSEC,
};
struct mlx5e_sq_wqe_info {
@@ -443,6 +449,11 @@ struct mlx5e_dma_info {
dma_addr_t addr;
};
+struct mlx5e_wqe_frag_info {
+ struct mlx5e_dma_info di;
+ u32 offset;
+};
+
struct mlx5e_umr_dma_info {
__be64 *mtt;
dma_addr_t mtt_addr;
@@ -504,7 +515,12 @@ struct mlx5e_rq {
struct mlx5_wq_ll wq;
union {
- struct mlx5e_dma_info *dma_info;
+ struct {
+ struct mlx5e_wqe_frag_info *frag_info;
+ u32 frag_sz; /* max possible skb frag_sz */
+ bool page_reuse;
+ bool xdp_xmit;
+ } wqe;
struct {
struct mlx5e_mpw_info *info;
void *mtt_no_align;
@@ -747,6 +763,7 @@ struct mlx5e_priv {
struct mlx5e_tir indir_tir[MLX5E_NUM_INDIR_TIRS];
struct mlx5e_tir direct_tir[MLX5E_MAX_NUM_CHANNELS];
u32 tx_rates[MLX5E_MAX_NUM_SQS];
+ int hard_mtu;
struct mlx5e_flow_steering fs;
struct mlx5e_vxlan_db vxlan;
@@ -768,6 +785,9 @@ struct mlx5e_priv {
const struct mlx5e_profile *profile;
void *ppriv;
+#ifdef CONFIG_MLX5_EN_IPSEC
+ struct mlx5e_ipsec *ipsec;
+#endif
};
struct mlx5e_profile {
@@ -782,6 +802,7 @@ struct mlx5e_profile {
void (*enable)(struct mlx5e_priv *priv);
void (*disable)(struct mlx5e_priv *priv);
void (*update_stats)(struct mlx5e_priv *priv);
+ void (*update_carrier)(struct mlx5e_priv *priv);
int (*max_nch)(struct mlx5_core_dev *mdev);
struct {
mlx5e_fp_handle_rx_cqe handle_rx_cqe;
@@ -816,7 +837,6 @@ void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix);
void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix);
void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq);
void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi);
-struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq);
void mlx5e_rx_am(struct mlx5e_rq *rq);
void mlx5e_rx_am_work(struct work_struct *work);
@@ -849,8 +869,8 @@ void mlx5e_timestamp_init(struct mlx5e_priv *priv);
void mlx5e_timestamp_cleanup(struct mlx5e_priv *priv);
void mlx5e_pps_event_handler(struct mlx5e_priv *priv,
struct ptp_clock_event *event);
-int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr);
-int mlx5e_hwstamp_get(struct net_device *dev, struct ifreq *ifr);
+int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr);
+int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr);
int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val);
int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
@@ -1021,6 +1041,31 @@ int mlx5e_open(struct net_device *netdev);
void mlx5e_update_stats_work(struct work_struct *work);
u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout);
+/* ethtool helpers */
+void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
+ struct ethtool_drvinfo *drvinfo);
+void mlx5e_ethtool_get_strings(struct mlx5e_priv *priv,
+ uint32_t stringset, uint8_t *data);
+int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset);
+void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv,
+ struct ethtool_stats *stats, u64 *data);
+void mlx5e_ethtool_get_ringparam(struct mlx5e_priv *priv,
+ struct ethtool_ringparam *param);
+int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
+ struct ethtool_ringparam *param);
+void mlx5e_ethtool_get_channels(struct mlx5e_priv *priv,
+ struct ethtool_channels *ch);
+int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
+ struct ethtool_channels *ch);
+int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv,
+ struct ethtool_coalesce *coal);
+int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
+ struct ethtool_coalesce *coal);
+int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
+ struct ethtool_ts_info *info);
+int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv,
+ struct ethtool_flash *flash);
+
/* mlx5e generic netdev management API */
struct net_device*
mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
new file mode 100644
index 000000000000..bac5103efad3
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -0,0 +1,461 @@
+/*
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <crypto/internal/geniv.h>
+#include <crypto/aead.h>
+#include <linux/inetdevice.h>
+#include <linux/netdevice.h>
+#include <linux/module.h>
+
+#include "en.h"
+#include "accel/ipsec.h"
+#include "en_accel/ipsec.h"
+#include "en_accel/ipsec_rxtx.h"
+
+struct mlx5e_ipsec_sa_entry {
+ struct hlist_node hlist; /* Item in SADB_RX hashtable */
+ unsigned int handle; /* Handle in SADB_RX */
+ struct xfrm_state *x;
+ struct mlx5e_ipsec *ipsec;
+ void *context;
+};
+
+struct xfrm_state *mlx5e_ipsec_sadb_rx_lookup(struct mlx5e_ipsec *ipsec,
+ unsigned int handle)
+{
+ struct mlx5e_ipsec_sa_entry *sa_entry;
+ struct xfrm_state *ret = NULL;
+
+ rcu_read_lock();
+ hash_for_each_possible_rcu(ipsec->sadb_rx, sa_entry, hlist, handle)
+ if (sa_entry->handle == handle) {
+ ret = sa_entry->x;
+ xfrm_state_hold(ret);
+ break;
+ }
+ rcu_read_unlock();
+
+ return ret;
+}
+
+static int mlx5e_ipsec_sadb_rx_add(struct mlx5e_ipsec_sa_entry *sa_entry)
+{
+ struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&ipsec->sadb_rx_lock, flags);
+ ret = ida_simple_get(&ipsec->halloc, 1, 0, GFP_KERNEL);
+ if (ret < 0)
+ goto out;
+
+ sa_entry->handle = ret;
+ hash_add_rcu(ipsec->sadb_rx, &sa_entry->hlist, sa_entry->handle);
+ ret = 0;
+
+out:
+ spin_unlock_irqrestore(&ipsec->sadb_rx_lock, flags);
+ return ret;
+}
+
+static void mlx5e_ipsec_sadb_rx_del(struct mlx5e_ipsec_sa_entry *sa_entry)
+{
+ struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ipsec->sadb_rx_lock, flags);
+ hash_del_rcu(&sa_entry->hlist);
+ spin_unlock_irqrestore(&ipsec->sadb_rx_lock, flags);
+}
+
+static void mlx5e_ipsec_sadb_rx_free(struct mlx5e_ipsec_sa_entry *sa_entry)
+{
+ struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
+ unsigned long flags;
+
+ /* Wait for the hash_del_rcu call in sadb_rx_del to affect data path */
+ synchronize_rcu();
+ spin_lock_irqsave(&ipsec->sadb_rx_lock, flags);
+ ida_simple_remove(&ipsec->halloc, sa_entry->handle);
+ spin_unlock_irqrestore(&ipsec->sadb_rx_lock, flags);
+}
+
+static enum mlx5_accel_ipsec_enc_mode mlx5e_ipsec_enc_mode(struct xfrm_state *x)
+{
+ unsigned int key_len = (x->aead->alg_key_len + 7) / 8 - 4;
+
+ switch (key_len) {
+ case 16:
+ return MLX5_IPSEC_SADB_MODE_AES_GCM_128_AUTH_128;
+ case 32:
+ return MLX5_IPSEC_SADB_MODE_AES_GCM_256_AUTH_128;
+ default:
+ netdev_warn(x->xso.dev, "Bad key len: %d for alg %s\n",
+ key_len, x->aead->alg_name);
+ return -1;
+ }
+}
+
+static void mlx5e_ipsec_build_hw_sa(u32 op, struct mlx5e_ipsec_sa_entry *sa_entry,
+ struct mlx5_accel_ipsec_sa *hw_sa)
+{
+ struct xfrm_state *x = sa_entry->x;
+ struct aead_geniv_ctx *geniv_ctx;
+ unsigned int crypto_data_len;
+ struct crypto_aead *aead;
+ unsigned int key_len;
+ int ivsize;
+
+ memset(hw_sa, 0, sizeof(*hw_sa));
+
+ if (op == MLX5_IPSEC_CMD_ADD_SA) {
+ crypto_data_len = (x->aead->alg_key_len + 7) / 8;
+ key_len = crypto_data_len - 4; /* 4 bytes salt at end */
+ aead = x->data;
+ geniv_ctx = crypto_aead_ctx(aead);
+ ivsize = crypto_aead_ivsize(aead);
+
+ memcpy(&hw_sa->key_enc, x->aead->alg_key, key_len);
+ /* Duplicate 128 bit key twice according to HW layout */
+ if (key_len == 16)
+ memcpy(&hw_sa->key_enc[16], x->aead->alg_key, key_len);
+ memcpy(&hw_sa->gcm.salt_iv, geniv_ctx->salt, ivsize);
+ hw_sa->gcm.salt = *((__be32 *)(x->aead->alg_key + key_len));
+ }
+
+ hw_sa->cmd = htonl(op);
+ hw_sa->flags |= MLX5_IPSEC_SADB_SA_VALID | MLX5_IPSEC_SADB_SPI_EN;
+ if (x->props.family == AF_INET) {
+ hw_sa->sip[3] = x->props.saddr.a4;
+ hw_sa->dip[3] = x->id.daddr.a4;
+ hw_sa->sip_masklen = 32;
+ hw_sa->dip_masklen = 32;
+ } else {
+ memcpy(hw_sa->sip, x->props.saddr.a6, sizeof(hw_sa->sip));
+ memcpy(hw_sa->dip, x->id.daddr.a6, sizeof(hw_sa->dip));
+ hw_sa->sip_masklen = 128;
+ hw_sa->dip_masklen = 128;
+ hw_sa->flags |= MLX5_IPSEC_SADB_IPV6;
+ }
+ hw_sa->spi = x->id.spi;
+ hw_sa->sw_sa_handle = htonl(sa_entry->handle);
+ switch (x->id.proto) {
+ case IPPROTO_ESP:
+ hw_sa->flags |= MLX5_IPSEC_SADB_IP_ESP;
+ break;
+ case IPPROTO_AH:
+ hw_sa->flags |= MLX5_IPSEC_SADB_IP_AH;
+ break;
+ default:
+ break;
+ }
+ hw_sa->enc_mode = mlx5e_ipsec_enc_mode(x);
+ if (!(x->xso.flags & XFRM_OFFLOAD_INBOUND))
+ hw_sa->flags |= MLX5_IPSEC_SADB_DIR_SX;
+}
+
+static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x)
+{
+ struct net_device *netdev = x->xso.dev;
+ struct mlx5e_priv *priv;
+
+ priv = netdev_priv(netdev);
+
+ if (x->props.aalgo != SADB_AALG_NONE) {
+ netdev_info(netdev, "Cannot offload authenticated xfrm states\n");
+ return -EINVAL;
+ }
+ if (x->props.ealgo != SADB_X_EALG_AES_GCM_ICV16) {
+ netdev_info(netdev, "Only AES-GCM-ICV16 xfrm state may be offloaded\n");
+ return -EINVAL;
+ }
+ if (x->props.calgo != SADB_X_CALG_NONE) {
+ netdev_info(netdev, "Cannot offload compressed xfrm states\n");
+ return -EINVAL;
+ }
+ if (x->props.flags & XFRM_STATE_ESN) {
+ netdev_info(netdev, "Cannot offload ESN xfrm states\n");
+ return -EINVAL;
+ }
+ if (x->props.family != AF_INET &&
+ x->props.family != AF_INET6) {
+ netdev_info(netdev, "Only IPv4/6 xfrm states may be offloaded\n");
+ return -EINVAL;
+ }
+ if (x->props.mode != XFRM_MODE_TRANSPORT &&
+ x->props.mode != XFRM_MODE_TUNNEL) {
+ dev_info(&netdev->dev, "Only transport and tunnel xfrm states may be offloaded\n");
+ return -EINVAL;
+ }
+ if (x->id.proto != IPPROTO_ESP) {
+ netdev_info(netdev, "Only ESP xfrm state may be offloaded\n");
+ return -EINVAL;
+ }
+ if (x->encap) {
+ netdev_info(netdev, "Encapsulated xfrm state may not be offloaded\n");
+ return -EINVAL;
+ }
+ if (!x->aead) {
+ netdev_info(netdev, "Cannot offload xfrm states without aead\n");
+ return -EINVAL;
+ }
+ if (x->aead->alg_icv_len != 128) {
+ netdev_info(netdev, "Cannot offload xfrm states with AEAD ICV length other than 128bit\n");
+ return -EINVAL;
+ }
+ if ((x->aead->alg_key_len != 128 + 32) &&
+ (x->aead->alg_key_len != 256 + 32)) {
+ netdev_info(netdev, "Cannot offload xfrm states with AEAD key length other than 128/256 bit\n");
+ return -EINVAL;
+ }
+ if (x->tfcpad) {
+ netdev_info(netdev, "Cannot offload xfrm states with tfc padding\n");
+ return -EINVAL;
+ }
+ if (!x->geniv) {
+ netdev_info(netdev, "Cannot offload xfrm states without geniv\n");
+ return -EINVAL;
+ }
+ if (strcmp(x->geniv, "seqiv")) {
+ netdev_info(netdev, "Cannot offload xfrm states with geniv other than seqiv\n");
+ return -EINVAL;
+ }
+ if (x->props.family == AF_INET6 &&
+ !(mlx5_accel_ipsec_device_caps(priv->mdev) & MLX5_ACCEL_IPSEC_IPV6)) {
+ netdev_info(netdev, "IPv6 xfrm state offload is not supported by this device\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int mlx5e_xfrm_add_state(struct xfrm_state *x)
+{
+ struct mlx5e_ipsec_sa_entry *sa_entry = NULL;
+ struct net_device *netdev = x->xso.dev;
+ struct mlx5_accel_ipsec_sa hw_sa;
+ struct mlx5e_priv *priv;
+ void *context;
+ int err;
+
+ priv = netdev_priv(netdev);
+
+ err = mlx5e_xfrm_validate_state(x);
+ if (err)
+ return err;
+
+ sa_entry = kzalloc(sizeof(*sa_entry), GFP_KERNEL);
+ if (!sa_entry) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ sa_entry->x = x;
+ sa_entry->ipsec = priv->ipsec;
+
+ /* Add the SA to handle processed incoming packets before the add SA
+ * completion was received
+ */
+ if (x->xso.flags & XFRM_OFFLOAD_INBOUND) {
+ err = mlx5e_ipsec_sadb_rx_add(sa_entry);
+ if (err) {
+ netdev_info(netdev, "Failed adding to SADB_RX: %d\n", err);
+ goto err_entry;
+ }
+ }
+
+ mlx5e_ipsec_build_hw_sa(MLX5_IPSEC_CMD_ADD_SA, sa_entry, &hw_sa);
+ context = mlx5_accel_ipsec_sa_cmd_exec(sa_entry->ipsec->en_priv->mdev, &hw_sa);
+ if (IS_ERR(context)) {
+ err = PTR_ERR(context);
+ goto err_sadb_rx;
+ }
+
+ err = mlx5_accel_ipsec_sa_cmd_wait(context);
+ if (err)
+ goto err_sadb_rx;
+
+ x->xso.offload_handle = (unsigned long)sa_entry;
+ goto out;
+
+err_sadb_rx:
+ if (x->xso.flags & XFRM_OFFLOAD_INBOUND) {
+ mlx5e_ipsec_sadb_rx_del(sa_entry);
+ mlx5e_ipsec_sadb_rx_free(sa_entry);
+ }
+err_entry:
+ kfree(sa_entry);
+out:
+ return err;
+}
+
+static void mlx5e_xfrm_del_state(struct xfrm_state *x)
+{
+ struct mlx5e_ipsec_sa_entry *sa_entry;
+ struct mlx5_accel_ipsec_sa hw_sa;
+ void *context;
+
+ if (!x->xso.offload_handle)
+ return;
+
+ sa_entry = (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle;
+ WARN_ON(sa_entry->x != x);
+
+ if (x->xso.flags & XFRM_OFFLOAD_INBOUND)
+ mlx5e_ipsec_sadb_rx_del(sa_entry);
+
+ mlx5e_ipsec_build_hw_sa(MLX5_IPSEC_CMD_DEL_SA, sa_entry, &hw_sa);
+ context = mlx5_accel_ipsec_sa_cmd_exec(sa_entry->ipsec->en_priv->mdev, &hw_sa);
+ if (IS_ERR(context))
+ return;
+
+ sa_entry->context = context;
+}
+
+static void mlx5e_xfrm_free_state(struct xfrm_state *x)
+{
+ struct mlx5e_ipsec_sa_entry *sa_entry;
+ int res;
+
+ if (!x->xso.offload_handle)
+ return;
+
+ sa_entry = (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle;
+ WARN_ON(sa_entry->x != x);
+
+ res = mlx5_accel_ipsec_sa_cmd_wait(sa_entry->context);
+ sa_entry->context = NULL;
+ if (res) {
+ /* Leftover object will leak */
+ return;
+ }
+
+ if (x->xso.flags & XFRM_OFFLOAD_INBOUND)
+ mlx5e_ipsec_sadb_rx_free(sa_entry);
+
+ kfree(sa_entry);
+}
+
+int mlx5e_ipsec_init(struct mlx5e_priv *priv)
+{
+ struct mlx5e_ipsec *ipsec = NULL;
+
+ if (!MLX5_IPSEC_DEV(priv->mdev)) {
+ netdev_dbg(priv->netdev, "Not an IPSec offload device\n");
+ return 0;
+ }
+
+ ipsec = kzalloc(sizeof(*ipsec), GFP_KERNEL);
+ if (!ipsec)
+ return -ENOMEM;
+
+ hash_init(ipsec->sadb_rx);
+ spin_lock_init(&ipsec->sadb_rx_lock);
+ ida_init(&ipsec->halloc);
+ ipsec->en_priv = priv;
+ ipsec->en_priv->ipsec = ipsec;
+ netdev_dbg(priv->netdev, "IPSec attached to netdevice\n");
+ return 0;
+}
+
+void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv)
+{
+ struct mlx5e_ipsec *ipsec = priv->ipsec;
+
+ if (!ipsec)
+ return;
+
+ ida_destroy(&ipsec->halloc);
+ kfree(ipsec);
+ priv->ipsec = NULL;
+}
+
+static bool mlx5e_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
+{
+ if (x->props.family == AF_INET) {
+ /* Offload with IPv4 options is not supported yet */
+ if (ip_hdr(skb)->ihl > 5)
+ return false;
+ } else {
+ /* Offload with IPv6 extension headers is not support yet */
+ if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
+ return false;
+ }
+
+ return true;
+}
+
+static const struct xfrmdev_ops mlx5e_ipsec_xfrmdev_ops = {
+ .xdo_dev_state_add = mlx5e_xfrm_add_state,
+ .xdo_dev_state_delete = mlx5e_xfrm_del_state,
+ .xdo_dev_state_free = mlx5e_xfrm_free_state,
+ .xdo_dev_offload_ok = mlx5e_ipsec_offload_ok,
+};
+
+void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct net_device *netdev = priv->netdev;
+
+ if (!priv->ipsec)
+ return;
+
+ if (!(mlx5_accel_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_ESP) ||
+ !MLX5_CAP_ETH(mdev, swp)) {
+ mlx5_core_dbg(mdev, "mlx5e: ESP and SWP offload not supported\n");
+ return;
+ }
+
+ mlx5_core_info(mdev, "mlx5e: IPSec ESP acceleration enabled\n");
+ netdev->xfrmdev_ops = &mlx5e_ipsec_xfrmdev_ops;
+ netdev->features |= NETIF_F_HW_ESP;
+ netdev->hw_enc_features |= NETIF_F_HW_ESP;
+
+ if (!MLX5_CAP_ETH(mdev, swp_csum)) {
+ mlx5_core_dbg(mdev, "mlx5e: SWP checksum not supported\n");
+ return;
+ }
+
+ netdev->features |= NETIF_F_HW_ESP_TX_CSUM;
+ netdev->hw_enc_features |= NETIF_F_HW_ESP_TX_CSUM;
+
+ if (!(mlx5_accel_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_LSO) ||
+ !MLX5_CAP_ETH(mdev, swp_lso)) {
+ mlx5_core_dbg(mdev, "mlx5e: ESP LSO not supported\n");
+ return;
+ }
+
+ mlx5_core_dbg(mdev, "mlx5e: ESP GSO capability turned on\n");
+ netdev->features |= NETIF_F_GSO_ESP;
+ netdev->hw_features |= NETIF_F_GSO_ESP;
+ netdev->hw_enc_features |= NETIF_F_GSO_ESP;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
new file mode 100644
index 000000000000..56e00baf16cc
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
@@ -0,0 +1,140 @@
+/*
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef __MLX5E_IPSEC_H__
+#define __MLX5E_IPSEC_H__
+
+#ifdef CONFIG_MLX5_EN_IPSEC
+
+#include <linux/mlx5/device.h>
+#include <net/xfrm.h>
+#include <linux/idr.h>
+
+#define MLX5E_IPSEC_SADB_RX_BITS 10
+#define MLX5E_METADATA_ETHER_TYPE (0x8CE4)
+#define MLX5E_METADATA_ETHER_LEN 8
+
+struct mlx5e_priv;
+
+struct mlx5e_ipsec_sw_stats {
+ atomic64_t ipsec_rx_drop_sp_alloc;
+ atomic64_t ipsec_rx_drop_sadb_miss;
+ atomic64_t ipsec_rx_drop_syndrome;
+ atomic64_t ipsec_tx_drop_bundle;
+ atomic64_t ipsec_tx_drop_no_state;
+ atomic64_t ipsec_tx_drop_not_ip;
+ atomic64_t ipsec_tx_drop_trailer;
+ atomic64_t ipsec_tx_drop_metadata;
+};
+
+struct mlx5e_ipsec_stats {
+ u64 ipsec_dec_in_packets;
+ u64 ipsec_dec_out_packets;
+ u64 ipsec_dec_bypass_packets;
+ u64 ipsec_enc_in_packets;
+ u64 ipsec_enc_out_packets;
+ u64 ipsec_enc_bypass_packets;
+ u64 ipsec_dec_drop_packets;
+ u64 ipsec_dec_auth_fail_packets;
+ u64 ipsec_enc_drop_packets;
+ u64 ipsec_add_sa_success;
+ u64 ipsec_add_sa_fail;
+ u64 ipsec_del_sa_success;
+ u64 ipsec_del_sa_fail;
+ u64 ipsec_cmd_drop;
+};
+
+struct mlx5e_ipsec {
+ struct mlx5e_priv *en_priv;
+ DECLARE_HASHTABLE(sadb_rx, MLX5E_IPSEC_SADB_RX_BITS);
+ spinlock_t sadb_rx_lock; /* Protects sadb_rx and halloc */
+ struct ida halloc;
+ struct mlx5e_ipsec_sw_stats sw_stats;
+ struct mlx5e_ipsec_stats stats;
+};
+
+void mlx5e_ipsec_build_inverse_table(void);
+int mlx5e_ipsec_init(struct mlx5e_priv *priv);
+void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv);
+void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv);
+
+int mlx5e_ipsec_get_count(struct mlx5e_priv *priv);
+int mlx5e_ipsec_get_strings(struct mlx5e_priv *priv, uint8_t *data);
+void mlx5e_ipsec_update_stats(struct mlx5e_priv *priv);
+int mlx5e_ipsec_get_stats(struct mlx5e_priv *priv, u64 *data);
+
+struct xfrm_state *mlx5e_ipsec_sadb_rx_lookup(struct mlx5e_ipsec *dev,
+ unsigned int handle);
+
+#else
+
+static inline void mlx5e_ipsec_build_inverse_table(void)
+{
+}
+
+static inline int mlx5e_ipsec_init(struct mlx5e_priv *priv)
+{
+ return 0;
+}
+
+static inline void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv)
+{
+}
+
+static inline void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv)
+{
+}
+
+static inline int mlx5e_ipsec_get_count(struct mlx5e_priv *priv)
+{
+ return 0;
+}
+
+static inline int mlx5e_ipsec_get_strings(struct mlx5e_priv *priv,
+ uint8_t *data)
+{
+ return 0;
+}
+
+static inline void mlx5e_ipsec_update_stats(struct mlx5e_priv *priv)
+{
+}
+
+static inline int mlx5e_ipsec_get_stats(struct mlx5e_priv *priv, u64 *data)
+{
+ return 0;
+}
+
+#endif
+
+#endif /* __MLX5E_IPSEC_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
new file mode 100644
index 000000000000..4a78aefdf157
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
@@ -0,0 +1,378 @@
+/*
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <crypto/aead.h>
+#include <net/xfrm.h>
+#include <net/esp.h>
+
+#include "en_accel/ipsec_rxtx.h"
+#include "en_accel/ipsec.h"
+#include "en.h"
+
+enum {
+ MLX5E_IPSEC_RX_SYNDROME_DECRYPTED = 0x11,
+ MLX5E_IPSEC_RX_SYNDROME_AUTH_FAILED = 0x12,
+};
+
+struct mlx5e_ipsec_rx_metadata {
+ unsigned char reserved;
+ __be32 sa_handle;
+} __packed;
+
+enum {
+ MLX5E_IPSEC_TX_SYNDROME_OFFLOAD = 0x8,
+ MLX5E_IPSEC_TX_SYNDROME_OFFLOAD_WITH_LSO_TCP = 0x9,
+};
+
+struct mlx5e_ipsec_tx_metadata {
+ __be16 mss_inv; /* 1/MSS in 16bit fixed point, only for LSO */
+ __be16 seq; /* LSBs of the first TCP seq, only for LSO */
+ u8 esp_next_proto; /* Next protocol of ESP */
+} __packed;
+
+struct mlx5e_ipsec_metadata {
+ unsigned char syndrome;
+ union {
+ unsigned char raw[5];
+ /* from FPGA to host, on successful decrypt */
+ struct mlx5e_ipsec_rx_metadata rx;
+ /* from host to FPGA */
+ struct mlx5e_ipsec_tx_metadata tx;
+ } __packed content;
+ /* packet type ID field */
+ __be16 ethertype;
+} __packed;
+
+#define MAX_LSO_MSS 2048
+
+/* Pre-calculated (Q0.16) fixed-point inverse 1/x function */
+static __be16 mlx5e_ipsec_inverse_table[MAX_LSO_MSS];
+
+static inline __be16 mlx5e_ipsec_mss_inv(struct sk_buff *skb)
+{
+ return mlx5e_ipsec_inverse_table[skb_shinfo(skb)->gso_size];
+}
+
+static struct mlx5e_ipsec_metadata *mlx5e_ipsec_add_metadata(struct sk_buff *skb)
+{
+ struct mlx5e_ipsec_metadata *mdata;
+ struct ethhdr *eth;
+
+ if (unlikely(skb_cow_head(skb, sizeof(*mdata))))
+ return ERR_PTR(-ENOMEM);
+
+ eth = (struct ethhdr *)skb_push(skb, sizeof(*mdata));
+ skb->mac_header -= sizeof(*mdata);
+ mdata = (struct mlx5e_ipsec_metadata *)(eth + 1);
+
+ memmove(skb->data, skb->data + sizeof(*mdata),
+ 2 * ETH_ALEN);
+
+ eth->h_proto = cpu_to_be16(MLX5E_METADATA_ETHER_TYPE);
+
+ memset(mdata->content.raw, 0, sizeof(mdata->content.raw));
+ return mdata;
+}
+
+static int mlx5e_ipsec_remove_trailer(struct sk_buff *skb, struct xfrm_state *x)
+{
+ unsigned int alen = crypto_aead_authsize(x->data);
+ struct ipv6hdr *ipv6hdr = ipv6_hdr(skb);
+ struct iphdr *ipv4hdr = ip_hdr(skb);
+ unsigned int trailer_len;
+ u8 plen;
+ int ret;
+
+ ret = skb_copy_bits(skb, skb->len - alen - 2, &plen, 1);
+ if (unlikely(ret))
+ return ret;
+
+ trailer_len = alen + plen + 2;
+
+ pskb_trim(skb, skb->len - trailer_len);
+ if (skb->protocol == htons(ETH_P_IP)) {
+ ipv4hdr->tot_len = htons(ntohs(ipv4hdr->tot_len) - trailer_len);
+ ip_send_check(ipv4hdr);
+ } else {
+ ipv6hdr->payload_len = htons(ntohs(ipv6hdr->payload_len) -
+ trailer_len);
+ }
+ return 0;
+}
+
+static void mlx5e_ipsec_set_swp(struct sk_buff *skb,
+ struct mlx5_wqe_eth_seg *eseg, u8 mode,
+ struct xfrm_offload *xo)
+{
+ u8 proto;
+
+ /* Tunnel Mode:
+ * SWP: OutL3 InL3 InL4
+ * Pkt: MAC IP ESP IP L4
+ *
+ * Transport Mode:
+ * SWP: OutL3 InL4
+ * InL3
+ * Pkt: MAC IP ESP L4
+ *
+ * Offsets are in 2-byte words, counting from start of frame
+ */
+ eseg->swp_outer_l3_offset = skb_network_offset(skb) / 2;
+ if (skb->protocol == htons(ETH_P_IPV6))
+ eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L3_IPV6;
+
+ if (mode == XFRM_MODE_TUNNEL) {
+ eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
+ if (xo->proto == IPPROTO_IPV6) {
+ eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
+ proto = inner_ipv6_hdr(skb)->nexthdr;
+ } else {
+ proto = inner_ip_hdr(skb)->protocol;
+ }
+ } else {
+ eseg->swp_inner_l3_offset = skb_network_offset(skb) / 2;
+ if (skb->protocol == htons(ETH_P_IPV6))
+ eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
+ proto = xo->proto;
+ }
+ switch (proto) {
+ case IPPROTO_UDP:
+ eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
+ /* Fall through */
+ case IPPROTO_TCP:
+ eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
+ break;
+ }
+}
+
+static void mlx5e_ipsec_set_iv(struct sk_buff *skb, struct xfrm_offload *xo)
+{
+ int iv_offset;
+ __be64 seqno;
+
+ /* Place the SN in the IV field */
+ seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32));
+ iv_offset = skb_transport_offset(skb) + sizeof(struct ip_esp_hdr);
+ skb_store_bits(skb, iv_offset, &seqno, 8);
+}
+
+static void mlx5e_ipsec_set_metadata(struct sk_buff *skb,
+ struct mlx5e_ipsec_metadata *mdata,
+ struct xfrm_offload *xo)
+{
+ struct ip_esp_hdr *esph;
+ struct tcphdr *tcph;
+
+ if (skb_is_gso(skb)) {
+ /* Add LSO metadata indication */
+ esph = ip_esp_hdr(skb);
+ tcph = inner_tcp_hdr(skb);
+ netdev_dbg(skb->dev, " Offloading GSO packet outer L3 %u; L4 %u; Inner L3 %u; L4 %u\n",
+ skb->network_header,
+ skb->transport_header,
+ skb->inner_network_header,
+ skb->inner_transport_header);
+ netdev_dbg(skb->dev, " Offloading GSO packet of len %u; mss %u; TCP sp %u dp %u seq 0x%x ESP seq 0x%x\n",
+ skb->len, skb_shinfo(skb)->gso_size,
+ ntohs(tcph->source), ntohs(tcph->dest),
+ ntohl(tcph->seq), ntohl(esph->seq_no));
+ mdata->syndrome = MLX5E_IPSEC_TX_SYNDROME_OFFLOAD_WITH_LSO_TCP;
+ mdata->content.tx.mss_inv = mlx5e_ipsec_mss_inv(skb);
+ mdata->content.tx.seq = htons(ntohl(tcph->seq) & 0xFFFF);
+ } else {
+ mdata->syndrome = MLX5E_IPSEC_TX_SYNDROME_OFFLOAD;
+ }
+ mdata->content.tx.esp_next_proto = xo->proto;
+
+ netdev_dbg(skb->dev, " TX metadata syndrome %u proto %u mss_inv %04x seq %04x\n",
+ mdata->syndrome, mdata->content.tx.esp_next_proto,
+ ntohs(mdata->content.tx.mss_inv),
+ ntohs(mdata->content.tx.seq));
+}
+
+struct sk_buff *mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
+ struct mlx5e_tx_wqe *wqe,
+ struct sk_buff *skb)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct xfrm_offload *xo = xfrm_offload(skb);
+ struct mlx5e_ipsec_metadata *mdata;
+ struct xfrm_state *x;
+
+ if (!xo)
+ return skb;
+
+ if (unlikely(skb->sp->len != 1)) {
+ atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_bundle);
+ goto drop;
+ }
+
+ x = xfrm_input_state(skb);
+ if (unlikely(!x)) {
+ atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_no_state);
+ goto drop;
+ }
+
+ if (unlikely(!x->xso.offload_handle ||
+ (skb->protocol != htons(ETH_P_IP) &&
+ skb->protocol != htons(ETH_P_IPV6)))) {
+ atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_not_ip);
+ goto drop;
+ }
+
+ if (!skb_is_gso(skb))
+ if (unlikely(mlx5e_ipsec_remove_trailer(skb, x))) {
+ atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_trailer);
+ goto drop;
+ }
+ mdata = mlx5e_ipsec_add_metadata(skb);
+ if (unlikely(IS_ERR(mdata))) {
+ atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_metadata);
+ goto drop;
+ }
+ mlx5e_ipsec_set_swp(skb, &wqe->eth, x->props.mode, xo);
+ mlx5e_ipsec_set_iv(skb, xo);
+ mlx5e_ipsec_set_metadata(skb, mdata, xo);
+
+ return skb;
+
+drop:
+ kfree_skb(skb);
+ return NULL;
+}
+
+static inline struct xfrm_state *
+mlx5e_ipsec_build_sp(struct net_device *netdev, struct sk_buff *skb,
+ struct mlx5e_ipsec_metadata *mdata)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct xfrm_offload *xo;
+ struct xfrm_state *xs;
+ u32 sa_handle;
+
+ skb->sp = secpath_dup(skb->sp);
+ if (unlikely(!skb->sp)) {
+ atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_sp_alloc);
+ return NULL;
+ }
+
+ sa_handle = be32_to_cpu(mdata->content.rx.sa_handle);
+ xs = mlx5e_ipsec_sadb_rx_lookup(priv->ipsec, sa_handle);
+ if (unlikely(!xs)) {
+ atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_sadb_miss);
+ return NULL;
+ }
+
+ skb->sp->xvec[skb->sp->len++] = xs;
+ skb->sp->olen++;
+
+ xo = xfrm_offload(skb);
+ xo->flags = CRYPTO_DONE;
+ switch (mdata->syndrome) {
+ case MLX5E_IPSEC_RX_SYNDROME_DECRYPTED:
+ xo->status = CRYPTO_SUCCESS;
+ break;
+ case MLX5E_IPSEC_RX_SYNDROME_AUTH_FAILED:
+ xo->status = CRYPTO_TUNNEL_ESP_AUTH_FAILED;
+ break;
+ default:
+ atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_syndrome);
+ return NULL;
+ }
+ return xs;
+}
+
+struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev,
+ struct sk_buff *skb)
+{
+ struct mlx5e_ipsec_metadata *mdata;
+ struct ethhdr *old_eth;
+ struct ethhdr *new_eth;
+ struct xfrm_state *xs;
+ __be16 *ethtype;
+
+ /* Detect inline metadata */
+ if (skb->len < ETH_HLEN + MLX5E_METADATA_ETHER_LEN)
+ return skb;
+ ethtype = (__be16 *)(skb->data + ETH_ALEN * 2);
+ if (*ethtype != cpu_to_be16(MLX5E_METADATA_ETHER_TYPE))
+ return skb;
+
+ /* Use the metadata */
+ mdata = (struct mlx5e_ipsec_metadata *)(skb->data + ETH_HLEN);
+ xs = mlx5e_ipsec_build_sp(netdev, skb, mdata);
+ if (unlikely(!xs)) {
+ kfree_skb(skb);
+ return NULL;
+ }
+
+ /* Remove the metadata from the buffer */
+ old_eth = (struct ethhdr *)skb->data;
+ new_eth = (struct ethhdr *)(skb->data + MLX5E_METADATA_ETHER_LEN);
+ memmove(new_eth, old_eth, 2 * ETH_ALEN);
+ /* Ethertype is already in its new place */
+ skb_pull_inline(skb, MLX5E_METADATA_ETHER_LEN);
+
+ return skb;
+}
+
+bool mlx5e_ipsec_feature_check(struct sk_buff *skb, struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct xfrm_state *x;
+
+ if (skb->sp && skb->sp->len) {
+ x = skb->sp->xvec[0];
+ if (x && x->xso.offload_handle)
+ return true;
+ }
+ return false;
+}
+
+void mlx5e_ipsec_build_inverse_table(void)
+{
+ u16 mss_inv;
+ u32 mss;
+
+ /* Calculate 1/x inverse table for use in GSO data path.
+ * Using this table, we provide the IPSec accelerator with the value of
+ * 1/gso_size so that it can infer the position of each segment inside
+ * the GSO, and increment the ESP sequence number, and generate the IV.
+ * The HW needs this value in Q0.16 fixed-point number format
+ */
+ mlx5e_ipsec_inverse_table[1] = htons(0xFFFF);
+ for (mss = 2; mss < MAX_LSO_MSS; mss++) {
+ mss_inv = ((1ULL << 32) / mss) >> 16;
+ mlx5e_ipsec_inverse_table[mss] = htons(mss_inv);
+ }
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
new file mode 100644
index 000000000000..e37ae2598dbb
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef __MLX5E_IPSEC_RXTX_H__
+#define __MLX5E_IPSEC_RXTX_H__
+
+#ifdef CONFIG_MLX5_EN_IPSEC
+
+#include <linux/skbuff.h>
+#include "en.h"
+
+struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev,
+ struct sk_buff *skb);
+void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
+
+void mlx5e_ipsec_inverse_table_init(void);
+bool mlx5e_ipsec_feature_check(struct sk_buff *skb, struct net_device *netdev,
+ netdev_features_t features);
+struct sk_buff *mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
+ struct mlx5e_tx_wqe *wqe,
+ struct sk_buff *skb);
+
+#endif /* CONFIG_MLX5_EN_IPSEC */
+
+#endif /* __MLX5E_IPSEC_RXTX_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c
new file mode 100644
index 000000000000..6fea59223dc4
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c
@@ -0,0 +1,133 @@
+/*
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/ethtool.h>
+#include <net/sock.h>
+
+#include "en.h"
+#include "accel/ipsec.h"
+#include "fpga/sdk.h"
+#include "en_accel/ipsec.h"
+
+static const struct counter_desc mlx5e_ipsec_hw_stats_desc[] = {
+ { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_dec_in_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_dec_out_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_dec_bypass_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_enc_in_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_enc_out_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_enc_bypass_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_dec_drop_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_dec_auth_fail_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_enc_drop_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_add_sa_success) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_add_sa_fail) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_del_sa_success) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_del_sa_fail) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_cmd_drop) },
+};
+
+static const struct counter_desc mlx5e_ipsec_sw_stats_desc[] = {
+ { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_rx_drop_sp_alloc) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_rx_drop_sadb_miss) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_rx_drop_syndrome) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_tx_drop_bundle) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_tx_drop_no_state) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_tx_drop_not_ip) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_tx_drop_trailer) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_tx_drop_metadata) },
+};
+
+#define MLX5E_READ_CTR_ATOMIC64(ptr, dsc, i) \
+ atomic64_read((atomic64_t *)((char *)(ptr) + (dsc)[i].offset))
+
+#define NUM_IPSEC_HW_COUNTERS ARRAY_SIZE(mlx5e_ipsec_hw_stats_desc)
+#define NUM_IPSEC_SW_COUNTERS ARRAY_SIZE(mlx5e_ipsec_sw_stats_desc)
+
+#define NUM_IPSEC_COUNTERS (NUM_IPSEC_HW_COUNTERS + NUM_IPSEC_SW_COUNTERS)
+
+int mlx5e_ipsec_get_count(struct mlx5e_priv *priv)
+{
+ if (!priv->ipsec)
+ return 0;
+
+ return NUM_IPSEC_COUNTERS;
+}
+
+int mlx5e_ipsec_get_strings(struct mlx5e_priv *priv, uint8_t *data)
+{
+ unsigned int i, idx = 0;
+
+ if (!priv->ipsec)
+ return 0;
+
+ for (i = 0; i < NUM_IPSEC_HW_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN,
+ mlx5e_ipsec_hw_stats_desc[i].format);
+
+ for (i = 0; i < NUM_IPSEC_SW_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN,
+ mlx5e_ipsec_sw_stats_desc[i].format);
+
+ return NUM_IPSEC_COUNTERS;
+}
+
+void mlx5e_ipsec_update_stats(struct mlx5e_priv *priv)
+{
+ int ret;
+
+ if (!priv->ipsec)
+ return;
+
+ ret = mlx5_accel_ipsec_counters_read(priv->mdev, (u64 *)&priv->ipsec->stats,
+ NUM_IPSEC_HW_COUNTERS);
+ if (ret)
+ memset(&priv->ipsec->stats, 0, sizeof(priv->ipsec->stats));
+}
+
+int mlx5e_ipsec_get_stats(struct mlx5e_priv *priv, u64 *data)
+{
+ int i, idx = 0;
+
+ if (!priv->ipsec)
+ return 0;
+
+ for (i = 0; i < NUM_IPSEC_HW_COUNTERS; i++)
+ data[idx++] = MLX5E_READ_CTR64_CPU(&priv->ipsec->stats,
+ mlx5e_ipsec_hw_stats_desc, i);
+
+ for (i = 0; i < NUM_IPSEC_SW_COUNTERS; i++)
+ data[idx++] = MLX5E_READ_CTR_ATOMIC64(&priv->ipsec->sw_stats,
+ mlx5e_ipsec_sw_stats_desc, i);
+
+ return NUM_IPSEC_COUNTERS;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
index e29494464cae..66f432385dbb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
@@ -86,9 +86,8 @@ static void mlx5e_timestamp_overflow(struct work_struct *work)
schedule_delayed_work(&tstamp->overflow_work, tstamp->overflow_period);
}
-int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr)
+int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
{
- struct mlx5e_priv *priv = netdev_priv(dev);
struct hwtstamp_config config;
int err;
@@ -130,10 +129,10 @@ int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
case HWTSTAMP_FILTER_NTP_ALL:
/* Disable CQE compression */
- netdev_warn(dev, "Disabling cqe compression");
+ netdev_warn(priv->netdev, "Disabling cqe compression");
err = mlx5e_modify_rx_cqe_compression_locked(priv, false);
if (err) {
- netdev_err(dev, "Failed disabling cqe compression err=%d\n", err);
+ netdev_err(priv->netdev, "Failed disabling cqe compression err=%d\n", err);
mutex_unlock(&priv->state_lock);
return err;
}
@@ -151,9 +150,8 @@ int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr)
sizeof(config)) ? -EFAULT : 0;
}
-int mlx5e_hwstamp_get(struct net_device *dev, struct ifreq *ifr)
+int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr)
{
- struct mlx5e_priv *priv = netdev_priv(dev);
struct hwtstamp_config *cfg = &priv->tstamp.hwtstamp_config;
if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 216752070391..917fade5f5d5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -31,11 +31,11 @@
*/
#include "en.h"
+#include "en_accel/ipsec.h"
-static void mlx5e_get_drvinfo(struct net_device *dev,
- struct ethtool_drvinfo *drvinfo)
+void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
+ struct ethtool_drvinfo *drvinfo)
{
- struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_core_dev *mdev = priv->mdev;
strlcpy(drvinfo->driver, DRIVER_NAME, sizeof(drvinfo->driver));
@@ -49,6 +49,14 @@ static void mlx5e_get_drvinfo(struct net_device *dev,
sizeof(drvinfo->bus_info));
}
+static void mlx5e_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ mlx5e_ethtool_get_drvinfo(priv, drvinfo);
+}
+
struct ptys2ethtool_config {
__ETHTOOL_DECLARE_LINK_MODE_MASK(supported);
__ETHTOOL_DECLARE_LINK_MODE_MASK(advertised);
@@ -135,6 +143,9 @@ static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv)
u8 pfc_en_rx;
int err;
+ if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
+ return 0;
+
err = mlx5_query_port_pfc(mdev, &pfc_en_tx, &pfc_en_rx);
return err ? 0 : pfc_en_tx | pfc_en_rx;
@@ -147,6 +158,9 @@ static bool mlx5e_query_global_pause_combined(struct mlx5e_priv *priv)
u32 tx_pause;
int err;
+ if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
+ return false;
+
err = mlx5_query_port_pause(mdev, &rx_pause, &tx_pause);
return err ? false : rx_pause | tx_pause;
@@ -160,9 +174,8 @@ static bool mlx5e_query_global_pause_combined(struct mlx5e_priv *priv)
((mlx5e_query_global_pause_combined(priv) + hweight8(mlx5e_query_pfc_combined(priv))) * \
NUM_PPORT_PER_PRIO_PFC_COUNTERS)
-static int mlx5e_get_sset_count(struct net_device *dev, int sset)
+int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset)
{
- struct mlx5e_priv *priv = netdev_priv(dev);
switch (sset) {
case ETH_SS_STATS:
@@ -174,7 +187,8 @@ static int mlx5e_get_sset_count(struct net_device *dev, int sset)
MLX5E_NUM_SQ_STATS(priv) +
MLX5E_NUM_PFC_COUNTERS(priv) +
ARRAY_SIZE(mlx5e_pme_status_desc) +
- ARRAY_SIZE(mlx5e_pme_error_desc);
+ ARRAY_SIZE(mlx5e_pme_error_desc) +
+ mlx5e_ipsec_get_count(priv);
case ETH_SS_PRIV_FLAGS:
return ARRAY_SIZE(mlx5e_priv_flags);
@@ -186,6 +200,13 @@ static int mlx5e_get_sset_count(struct net_device *dev, int sset)
}
}
+static int mlx5e_get_sset_count(struct net_device *dev, int sset)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ return mlx5e_ethtool_get_sset_count(priv, sset);
+}
+
static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, uint8_t *data)
{
int i, j, tc, prio, idx = 0;
@@ -256,6 +277,9 @@ static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, uint8_t *data)
for (i = 0; i < ARRAY_SIZE(mlx5e_pme_error_desc); i++)
strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_error_desc[i].format);
+ /* IPSec counters */
+ idx += mlx5e_ipsec_get_strings(priv, data + idx * ETH_GSTRING_LEN);
+
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
return;
@@ -273,10 +297,9 @@ static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, uint8_t *data)
priv->channel_tc2txq[i][tc]);
}
-static void mlx5e_get_strings(struct net_device *dev,
- uint32_t stringset, uint8_t *data)
+void mlx5e_ethtool_get_strings(struct mlx5e_priv *priv,
+ uint32_t stringset, uint8_t *data)
{
- struct mlx5e_priv *priv = netdev_priv(dev);
int i;
switch (stringset) {
@@ -297,10 +320,17 @@ static void mlx5e_get_strings(struct net_device *dev,
}
}
-static void mlx5e_get_ethtool_stats(struct net_device *dev,
- struct ethtool_stats *stats, u64 *data)
+static void mlx5e_get_strings(struct net_device *dev,
+ uint32_t stringset, uint8_t *data)
{
struct mlx5e_priv *priv = netdev_priv(dev);
+
+ mlx5e_ethtool_get_strings(priv, stringset, data);
+}
+
+void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv,
+ struct ethtool_stats *stats, u64 *data)
+{
struct mlx5e_channels *channels;
struct mlx5_priv *mlx5_priv;
int i, j, tc, prio, idx = 0;
@@ -378,6 +408,9 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev,
data[idx++] = MLX5E_READ_CTR64_CPU(mlx5_priv->pme_stats.error_counters,
mlx5e_pme_error_desc, i);
+ /* IPSec counters */
+ idx += mlx5e_ipsec_get_stats(priv, data + idx);
+
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
return;
@@ -395,6 +428,15 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev,
sq_stats_desc, j);
}
+static void mlx5e_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats,
+ u64 *data)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ mlx5e_ethtool_get_ethtool_stats(priv, stats, data);
+}
+
static u32 mlx5e_rx_wqes_to_packets(struct mlx5e_priv *priv, int rq_wq_type,
int num_wqe)
{
@@ -439,10 +481,9 @@ static u32 mlx5e_packets_to_rx_wqes(struct mlx5e_priv *priv, int rq_wq_type,
return 1 << (order_base_2(num_wqes));
}
-static void mlx5e_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *param)
+void mlx5e_ethtool_get_ringparam(struct mlx5e_priv *priv,
+ struct ethtool_ringparam *param)
{
- struct mlx5e_priv *priv = netdev_priv(dev);
int rq_wq_type = priv->channels.params.rq_wq_type;
param->rx_max_pending = mlx5e_rx_wqes_to_packets(priv, rq_wq_type,
@@ -453,10 +494,17 @@ static void mlx5e_get_ringparam(struct net_device *dev,
param->tx_pending = 1 << priv->channels.params.log_sq_size;
}
-static int mlx5e_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *param)
+static void mlx5e_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *param)
{
struct mlx5e_priv *priv = netdev_priv(dev);
+
+ mlx5e_ethtool_get_ringparam(priv, param);
+}
+
+int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
+ struct ethtool_ringparam *param)
+{
int rq_wq_type = priv->channels.params.rq_wq_type;
struct mlx5e_channels new_channels = {};
u32 rx_pending_wqes;
@@ -468,12 +516,12 @@ static int mlx5e_set_ringparam(struct net_device *dev,
int err = 0;
if (param->rx_jumbo_pending) {
- netdev_info(dev, "%s: rx_jumbo_pending not supported\n",
+ netdev_info(priv->netdev, "%s: rx_jumbo_pending not supported\n",
__func__);
return -EINVAL;
}
if (param->rx_mini_pending) {
- netdev_info(dev, "%s: rx_mini_pending not supported\n",
+ netdev_info(priv->netdev, "%s: rx_mini_pending not supported\n",
__func__);
return -EINVAL;
}
@@ -486,13 +534,13 @@ static int mlx5e_set_ringparam(struct net_device *dev,
param->rx_pending);
if (param->rx_pending < min_rq_size) {
- netdev_info(dev, "%s: rx_pending (%d) < min (%d)\n",
+ netdev_info(priv->netdev, "%s: rx_pending (%d) < min (%d)\n",
__func__, param->rx_pending,
min_rq_size);
return -EINVAL;
}
if (param->rx_pending > max_rq_size) {
- netdev_info(dev, "%s: rx_pending (%d) > max (%d)\n",
+ netdev_info(priv->netdev, "%s: rx_pending (%d) > max (%d)\n",
__func__, param->rx_pending,
max_rq_size);
return -EINVAL;
@@ -501,19 +549,19 @@ static int mlx5e_set_ringparam(struct net_device *dev,
num_mtts = MLX5E_REQUIRED_MTTS(rx_pending_wqes);
if (priv->channels.params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ &&
!MLX5E_VALID_NUM_MTTS(num_mtts)) {
- netdev_info(dev, "%s: rx_pending (%d) request can't be satisfied, try to reduce.\n",
+ netdev_info(priv->netdev, "%s: rx_pending (%d) request can't be satisfied, try to reduce.\n",
__func__, param->rx_pending);
return -EINVAL;
}
if (param->tx_pending < (1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)) {
- netdev_info(dev, "%s: tx_pending (%d) < min (%d)\n",
+ netdev_info(priv->netdev, "%s: tx_pending (%d) < min (%d)\n",
__func__, param->tx_pending,
1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE);
return -EINVAL;
}
if (param->tx_pending > (1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE)) {
- netdev_info(dev, "%s: tx_pending (%d) > max (%d)\n",
+ netdev_info(priv->netdev, "%s: tx_pending (%d) > max (%d)\n",
__func__, param->tx_pending,
1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE);
return -EINVAL;
@@ -549,26 +597,39 @@ unlock:
return err;
}
-static void mlx5e_get_channels(struct net_device *dev,
- struct ethtool_channels *ch)
+static int mlx5e_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *param)
{
struct mlx5e_priv *priv = netdev_priv(dev);
+ return mlx5e_ethtool_set_ringparam(priv, param);
+}
+
+void mlx5e_ethtool_get_channels(struct mlx5e_priv *priv,
+ struct ethtool_channels *ch)
+{
ch->max_combined = priv->profile->max_nch(priv->mdev);
ch->combined_count = priv->channels.params.num_channels;
}
-static int mlx5e_set_channels(struct net_device *dev,
- struct ethtool_channels *ch)
+static void mlx5e_get_channels(struct net_device *dev,
+ struct ethtool_channels *ch)
{
struct mlx5e_priv *priv = netdev_priv(dev);
+
+ mlx5e_ethtool_get_channels(priv, ch);
+}
+
+int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
+ struct ethtool_channels *ch)
+{
unsigned int count = ch->combined_count;
struct mlx5e_channels new_channels = {};
bool arfs_enabled;
int err = 0;
if (!count) {
- netdev_info(dev, "%s: combined_count=0 not supported\n",
+ netdev_info(priv->netdev, "%s: combined_count=0 not supported\n",
__func__);
return -EINVAL;
}
@@ -593,7 +654,7 @@ static int mlx5e_set_channels(struct net_device *dev,
if (err)
goto out;
- arfs_enabled = dev->features & NETIF_F_NTUPLE;
+ arfs_enabled = priv->netdev->features & NETIF_F_NTUPLE;
if (arfs_enabled)
mlx5e_arfs_disable(priv);
@@ -603,7 +664,7 @@ static int mlx5e_set_channels(struct net_device *dev,
if (arfs_enabled) {
err = mlx5e_arfs_enable(priv);
if (err)
- netdev_err(dev, "%s: mlx5e_arfs_enable failed: %d\n",
+ netdev_err(priv->netdev, "%s: mlx5e_arfs_enable failed: %d\n",
__func__, err);
}
@@ -613,11 +674,17 @@ out:
return err;
}
-static int mlx5e_get_coalesce(struct net_device *netdev,
- struct ethtool_coalesce *coal)
+static int mlx5e_set_channels(struct net_device *dev,
+ struct ethtool_channels *ch)
{
- struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ return mlx5e_ethtool_set_channels(priv, ch);
+}
+
+int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv,
+ struct ethtool_coalesce *coal)
+{
if (!MLX5_CAP_GEN(priv->mdev, cq_moderation))
return -EOPNOTSUPP;
@@ -630,6 +697,14 @@ static int mlx5e_get_coalesce(struct net_device *netdev,
return 0;
}
+static int mlx5e_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *coal)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ return mlx5e_ethtool_get_coalesce(priv, coal);
+}
+
static void
mlx5e_set_priv_channels_coalesce(struct mlx5e_priv *priv, struct ethtool_coalesce *coal)
{
@@ -653,10 +728,9 @@ mlx5e_set_priv_channels_coalesce(struct mlx5e_priv *priv, struct ethtool_coalesc
}
}
-static int mlx5e_set_coalesce(struct net_device *netdev,
- struct ethtool_coalesce *coal)
+int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
+ struct ethtool_coalesce *coal)
{
- struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_channels new_channels = {};
int err = 0;
@@ -699,6 +773,14 @@ out:
return err;
}
+static int mlx5e_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *coal)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ return mlx5e_ethtool_set_coalesce(priv, coal);
+}
+
static void ptys2ethtool_supported_link(unsigned long *supported_modes,
u32 eth_proto_cap)
{
@@ -1298,13 +1380,12 @@ static int mlx5e_set_pauseparam(struct net_device *netdev,
return err;
}
-static int mlx5e_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
+int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
+ struct ethtool_ts_info *info)
{
- struct mlx5e_priv *priv = netdev_priv(dev);
int ret;
- ret = ethtool_op_get_ts_info(dev, info);
+ ret = ethtool_op_get_ts_info(priv->netdev, info);
if (ret)
return ret;
@@ -1318,15 +1399,23 @@ static int mlx5e_get_ts_info(struct net_device *dev,
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
- info->tx_types = (BIT(1) << HWTSTAMP_TX_OFF) |
- (BIT(1) << HWTSTAMP_TX_ON);
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) |
+ BIT(HWTSTAMP_TX_ON);
- info->rx_filters = (BIT(1) << HWTSTAMP_FILTER_NONE) |
- (BIT(1) << HWTSTAMP_FILTER_ALL);
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_ALL);
return 0;
}
+static int mlx5e_get_ts_info(struct net_device *dev,
+ struct ethtool_ts_info *info)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ return mlx5e_ethtool_get_ts_info(priv, info);
+}
+
static __u32 mlx5e_get_wol_supported(struct mlx5_core_dev *mdev)
{
__u32 ret = 0;
@@ -1714,6 +1803,40 @@ static int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
return err;
}
+int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv,
+ struct ethtool_flash *flash)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct net_device *dev = priv->netdev;
+ const struct firmware *fw;
+ int err;
+
+ if (flash->region != ETHTOOL_FLASH_ALL_REGIONS)
+ return -EOPNOTSUPP;
+
+ err = request_firmware_direct(&fw, flash->data, &dev->dev);
+ if (err)
+ return err;
+
+ dev_hold(dev);
+ rtnl_unlock();
+
+ err = mlx5_firmware_flash(mdev, fw);
+ release_firmware(fw);
+
+ rtnl_lock();
+ dev_put(dev);
+ return err;
+}
+
+static int mlx5e_flash_device(struct net_device *dev,
+ struct ethtool_flash *flash)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ return mlx5e_ethtool_flash_device(priv, flash);
+}
+
const struct ethtool_ops mlx5e_ethtool_ops = {
.get_drvinfo = mlx5e_get_drvinfo,
.get_link = ethtool_op_get_link,
@@ -1734,6 +1857,7 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
.set_rxfh = mlx5e_set_rxfh,
.get_rxnfc = mlx5e_get_rxnfc,
.set_rxnfc = mlx5e_set_rxnfc,
+ .flash_device = mlx5e_flash_device,
.get_tunable = mlx5e_get_tunable,
.set_tunable = mlx5e_set_tunable,
.get_pauseparam = mlx5e_get_pauseparam,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 06eb7a8b487c..a09b11f467a4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -39,6 +39,9 @@
#include "en.h"
#include "en_tc.h"
#include "en_rep.h"
+#include "en_accel/ipsec.h"
+#include "en_accel/ipsec_rxtx.h"
+#include "accel/ipsec.h"
#include "vxlan.h"
struct mlx5e_rq_param {
@@ -96,9 +99,12 @@ void mlx5e_set_rq_type_params(struct mlx5_core_dev *mdev,
params->log_rq_size = is_kdump_kernel() ?
MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE :
MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
+ params->rq_headroom = params->xdp_prog ?
+ XDP_PACKET_HEADROOM : MLX5_RX_HEADROOM;
+ params->rq_headroom += NET_IP_ALIGN;
/* Extra room needed for build_skb */
- params->lro_wqe_sz -= MLX5_RX_HEADROOM +
+ params->lro_wqe_sz -= params->rq_headroom +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
}
@@ -112,7 +118,7 @@ void mlx5e_set_rq_type_params(struct mlx5_core_dev *mdev,
static void mlx5e_set_rq_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
{
u8 rq_type = mlx5e_check_fragmented_striding_rq_cap(mdev) &&
- !params->xdp_prog ?
+ !params->xdp_prog && !MLX5_IPSEC_DEV(mdev) ?
MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ :
MLX5_WQ_TYPE_LINKED_LIST;
mlx5e_set_rq_type_params(mdev, params, rq_type);
@@ -143,7 +149,8 @@ static void mlx5e_update_carrier_work(struct work_struct *work)
mutex_lock(&priv->state_lock);
if (test_bit(MLX5E_STATE_OPENED, &priv->state))
- mlx5e_update_carrier(priv);
+ if (priv->profile->update_carrier)
+ priv->profile->update_carrier(priv);
mutex_unlock(&priv->state_lock);
}
@@ -196,6 +203,7 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
s->rx_buff_alloc_err += rq_stats->buff_alloc_err;
s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks;
s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts;
+ s->rx_page_reuse += rq_stats->page_reuse;
s->rx_cache_reuse += rq_stats->cache_reuse;
s->rx_cache_full += rq_stats->cache_full;
s->rx_cache_empty += rq_stats->cache_empty;
@@ -323,8 +331,10 @@ static void mlx5e_update_pcie_counters(struct mlx5e_priv *priv)
void mlx5e_update_stats(struct mlx5e_priv *priv, bool full)
{
- if (full)
+ if (full) {
mlx5e_update_pcie_counters(priv);
+ mlx5e_ipsec_update_stats(priv);
+ }
mlx5e_update_pport_counters(priv, full);
mlx5e_update_vport_counters(priv);
mlx5e_update_q_counter(priv);
@@ -546,7 +556,6 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
void *rqc = rqp->rqc;
void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
u32 byte_count;
- u32 frag_sz;
int npages;
int wq_sz;
int err;
@@ -578,13 +587,8 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
goto err_rq_wq_destroy;
}
- if (rq->xdp_prog) {
- rq->buff.map_dir = DMA_BIDIRECTIONAL;
- rq->rx_headroom = XDP_PACKET_HEADROOM;
- } else {
- rq->buff.map_dir = DMA_FROM_DEVICE;
- rq->rx_headroom = MLX5_RX_HEADROOM;
- }
+ rq->buff.map_dir = rq->xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
+ rq->rx_headroom = params->rq_headroom;
switch (rq->wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
@@ -593,6 +597,13 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
rq->handle_rx_cqe = c->priv->profile->rx_handlers.handle_rx_cqe_mpwqe;
+#ifdef CONFIG_MLX5_EN_IPSEC
+ if (MLX5_IPSEC_DEV(mdev)) {
+ err = -EINVAL;
+ netdev_err(c->netdev, "MPWQE RQ with IPSec offload not supported\n");
+ goto err_rq_wq_destroy;
+ }
+#endif
if (!rq->handle_rx_cqe) {
err = -EINVAL;
netdev_err(c->netdev, "RX handler of MPWQE RQ is not set, err %d\n", err);
@@ -615,18 +626,24 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
goto err_destroy_umr_mkey;
break;
default: /* MLX5_WQ_TYPE_LINKED_LIST */
- rq->dma_info = kzalloc_node(wq_sz * sizeof(*rq->dma_info),
- GFP_KERNEL, cpu_to_node(c->cpu));
- if (!rq->dma_info) {
+ rq->wqe.frag_info =
+ kzalloc_node(wq_sz * sizeof(*rq->wqe.frag_info),
+ GFP_KERNEL, cpu_to_node(c->cpu));
+ if (!rq->wqe.frag_info) {
err = -ENOMEM;
goto err_rq_wq_destroy;
}
rq->alloc_wqe = mlx5e_alloc_rx_wqe;
rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
- rq->handle_rx_cqe = c->priv->profile->rx_handlers.handle_rx_cqe;
+#ifdef CONFIG_MLX5_EN_IPSEC
+ if (c->priv->ipsec)
+ rq->handle_rx_cqe = mlx5e_ipsec_handle_rx_cqe;
+ else
+#endif
+ rq->handle_rx_cqe = c->priv->profile->rx_handlers.handle_rx_cqe;
if (!rq->handle_rx_cqe) {
- kfree(rq->dma_info);
+ kfree(rq->wqe.frag_info);
err = -EINVAL;
netdev_err(c->netdev, "RX handler of RQ is not set, err %d\n", err);
goto err_rq_wq_destroy;
@@ -634,16 +651,17 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
rq->buff.wqe_sz = params->lro_en ?
params->lro_wqe_sz :
- MLX5E_SW2HW_MTU(c->netdev->mtu);
+ MLX5E_SW2HW_MTU(c->priv, c->netdev->mtu);
+#ifdef CONFIG_MLX5_EN_IPSEC
+ if (MLX5_IPSEC_DEV(mdev))
+ rq->buff.wqe_sz += MLX5E_METADATA_ETHER_LEN;
+#endif
+ rq->wqe.page_reuse = !params->xdp_prog && !params->lro_en;
byte_count = rq->buff.wqe_sz;
/* calc the required page order */
- frag_sz = rq->rx_headroom +
- byte_count /* packet data */ +
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- frag_sz = SKB_DATA_ALIGN(frag_sz);
-
- npages = DIV_ROUND_UP(frag_sz, PAGE_SIZE);
+ rq->wqe.frag_sz = MLX5_SKB_FRAG_SZ(rq->rx_headroom + byte_count);
+ npages = DIV_ROUND_UP(rq->wqe.frag_sz, PAGE_SIZE);
rq->buff.page_order = order_base_2(npages);
byte_count |= MLX5_HW_START_PADDING;
@@ -688,7 +706,7 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq)
mlx5_core_destroy_mkey(rq->mdev, &rq->umr_mkey);
break;
default: /* MLX5_WQ_TYPE_LINKED_LIST */
- kfree(rq->dma_info);
+ kfree(rq->wqe.frag_info);
}
for (i = rq->page_cache.head; i != rq->page_cache.tail;
@@ -870,6 +888,16 @@ static void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
mlx5_wq_ll_pop(&rq->wq, wqe_ix_be,
&wqe->next.next_wqe_index);
}
+
+ if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST && rq->wqe.page_reuse) {
+ /* Clean outstanding pages on handled WQEs that decided to do page-reuse,
+ * but yet to be re-posted.
+ */
+ int wq_sz = mlx5_wq_ll_get_size(&rq->wq);
+
+ for (wqe_ix = 0; wqe_ix < wq_sz; wqe_ix++)
+ rq->dealloc_wqe(rq, wqe_ix);
+ }
}
static int mlx5e_open_rq(struct mlx5e_channel *c,
@@ -1088,6 +1116,8 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
sq->uar_map = mdev->mlx5e_res.bfreg.map;
sq->max_inline = params->tx_max_inline;
sq->min_inline_mode = params->tx_min_inline_mode;
+ if (MLX5_IPSEC_DEV(c->priv->mdev))
+ set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state);
param->wq.db_numa_node = cpu_to_node(c->cpu);
err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq, &sq->wq_ctrl);
@@ -1907,6 +1937,7 @@ static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
mlx5e_build_sq_param_common(priv, param);
MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
+ MLX5_SET(sqc, sqc, allow_swp, !!MLX5_IPSEC_DEV(priv->mdev));
}
static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
@@ -2467,7 +2498,7 @@ free_in:
static int mlx5e_set_mtu(struct mlx5e_priv *priv, u16 mtu)
{
struct mlx5_core_dev *mdev = priv->mdev;
- u16 hw_mtu = MLX5E_SW2HW_MTU(mtu);
+ u16 hw_mtu = MLX5E_SW2HW_MTU(priv, mtu);
int err;
err = mlx5_set_port_mtu(mdev, hw_mtu, 1);
@@ -2489,7 +2520,7 @@ static void mlx5e_query_mtu(struct mlx5e_priv *priv, u16 *mtu)
if (err || !hw_mtu) /* fallback to port oper mtu */
mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1);
- *mtu = MLX5E_HW2SW_MTU(hw_mtu);
+ *mtu = MLX5E_HW2SW_MTU(priv, hw_mtu);
}
static int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv)
@@ -2598,9 +2629,10 @@ void mlx5e_switch_priv_channels(struct mlx5e_priv *priv,
{
struct net_device *netdev = priv->netdev;
int new_num_txqs;
-
+ int carrier_ok;
new_num_txqs = new_chs->num * new_chs->params.num_tc;
+ carrier_ok = netif_carrier_ok(netdev);
netif_carrier_off(netdev);
if (new_num_txqs < netdev->real_num_tx_queues)
@@ -2618,7 +2650,9 @@ void mlx5e_switch_priv_channels(struct mlx5e_priv *priv,
mlx5e_refresh_tirs(priv, false);
mlx5e_activate_priv_channels(priv);
- mlx5e_update_carrier(priv);
+ /* return carrier back if needed */
+ if (carrier_ok)
+ netif_carrier_on(netdev);
}
int mlx5e_open_locked(struct net_device *netdev)
@@ -2634,7 +2668,8 @@ int mlx5e_open_locked(struct net_device *netdev)
mlx5e_refresh_tirs(priv, false);
mlx5e_activate_priv_channels(priv);
- mlx5e_update_carrier(priv);
+ if (priv->profile->update_carrier)
+ priv->profile->update_carrier(priv);
mlx5e_timestamp_init(priv);
if (priv->profile->update_stats)
@@ -3312,11 +3347,13 @@ out:
static int mlx5e_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
switch (cmd) {
case SIOCSHWTSTAMP:
- return mlx5e_hwstamp_set(dev, ifr);
+ return mlx5e_hwstamp_set(priv, ifr);
case SIOCGHWTSTAMP:
- return mlx5e_hwstamp_get(dev, ifr);
+ return mlx5e_hwstamp_get(priv, ifr);
default:
return -EOPNOTSUPP;
}
@@ -3495,6 +3532,11 @@ static netdev_features_t mlx5e_features_check(struct sk_buff *skb,
features = vlan_features_check(skb, features);
features = vxlan_features_check(skb, features);
+#ifdef CONFIG_MLX5_EN_IPSEC
+ if (mlx5e_ipsec_feature_check(skb, netdev, features))
+ return features;
+#endif
+
/* Validate if the tunneled packet is being offloaded by HW */
if (skb->encapsulation &&
(features & NETIF_F_CSUM_MASK || features & NETIF_F_GSO_MASK))
@@ -3542,6 +3584,12 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
goto unlock;
}
+ if ((netdev->features & NETIF_F_HW_ESP) && prog) {
+ netdev_warn(netdev, "can't set XDP with IPSec offload\n");
+ err = -EINVAL;
+ goto unlock;
+ }
+
was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
/* no need for full reset when exchanging programs */
reset = (!priv->channels.params.xdp_prog || !prog);
@@ -3871,6 +3919,7 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
mlx5e_set_rq_params(mdev, params);
/* HW LRO */
+
/* TODO: && MLX5_CAP_ETH(mdev, lro_cap) */
if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
params->lro_en = hw_lro_heuristic(link_speed, pci_bw);
@@ -3911,6 +3960,7 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
priv->netdev = netdev;
priv->profile = profile;
priv->ppriv = ppriv;
+ priv->hard_mtu = MLX5E_ETH_HARD_MTU;
mlx5e_build_nic_params(mdev, &priv->channels.params, profile->max_nch(mdev));
@@ -4031,6 +4081,8 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
if (MLX5_CAP_GEN(mdev, vport_group_manager))
netdev->switchdev_ops = &mlx5e_switchdev_ops;
#endif
+
+ mlx5e_ipsec_build_netdev(priv);
}
static void mlx5e_create_q_counter(struct mlx5e_priv *priv)
@@ -4059,14 +4111,19 @@ static void mlx5e_nic_init(struct mlx5_core_dev *mdev,
void *ppriv)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+ int err;
mlx5e_build_nic_netdev_priv(mdev, netdev, profile, ppriv);
+ err = mlx5e_ipsec_init(priv);
+ if (err)
+ mlx5_core_err(mdev, "IPSec initialization failed, %d\n", err);
mlx5e_build_nic_netdev(netdev);
mlx5e_vxlan_init(priv);
}
static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
{
+ mlx5e_ipsec_cleanup(priv);
mlx5e_vxlan_cleanup(priv);
if (priv->channels.params.xdp_prog)
@@ -4156,7 +4213,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
/* MTU range: 68 - hw-specific max */
netdev->min_mtu = ETH_MIN_MTU;
mlx5_query_port_max_mtu(priv->mdev, &max_mtu, 1);
- netdev->max_mtu = MLX5E_HW2SW_MTU(max_mtu);
+ netdev->max_mtu = MLX5E_HW2SW_MTU(priv, max_mtu);
mlx5e_set_dev_port_mtu(priv);
mlx5_lag_add(mdev, netdev);
@@ -4215,6 +4272,7 @@ static const struct mlx5e_profile mlx5e_nic_profile = {
.disable = mlx5e_nic_disable,
.update_stats = mlx5e_update_ndo_stats,
.max_nch = mlx5e_get_max_num_channels,
+ .update_carrier = mlx5e_update_carrier,
.rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe,
.rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
.max_tc = MLX5E_MAX_NUM_TC,
@@ -4255,7 +4313,8 @@ struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
return netdev;
err_cleanup_nic:
- profile->cleanup(priv);
+ if (profile->cleanup)
+ profile->cleanup(priv);
free_netdev(netdev);
return NULL;
@@ -4456,6 +4515,7 @@ static struct mlx5_interface mlx5e_interface = {
void mlx5e_init(void)
{
+ mlx5e_ipsec_build_inverse_table();
mlx5e_build_ptys2ethtool_map();
mlx5_register_interface(&mlx5e_interface);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 01798e1ab667..45e60be9c277 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -796,6 +796,8 @@ static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev,
params->tx_max_inline = mlx5e_get_max_inline_cap(mdev);
params->num_tc = 1;
params->lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
+
+ mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
}
static void mlx5e_build_rep_netdev(struct net_device *netdev)
@@ -833,6 +835,9 @@ static void mlx5e_init_rep(struct mlx5_core_dev *mdev,
INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
priv->channels.params.num_channels = profile->max_nch(mdev);
+
+ priv->hard_mtu = MLX5E_ETH_HARD_MTU;
+
mlx5e_build_rep_params(mdev, &priv->channels.params);
mlx5e_build_rep_netdev(netdev);
}
@@ -916,6 +921,7 @@ static struct mlx5e_profile mlx5e_rep_profile = {
.cleanup_tx = mlx5e_cleanup_nic_tx,
.update_stats = mlx5e_rep_update_stats,
.max_nch = mlx5e_get_rep_max_num_channels,
+ .update_carrier = NULL,
.rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep,
.rx_handlers.handle_rx_cqe_mpwqe = NULL /* Not supported */,
.max_tc = 1,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 66b5fec15313..325b2c8c1c6d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -40,7 +40,8 @@
#include "en_tc.h"
#include "eswitch.h"
#include "en_rep.h"
-#include "ipoib.h"
+#include "ipoib/ipoib.h"
+#include "en_accel/ipsec_rxtx.h"
static inline bool mlx5e_rx_hw_stamp(struct mlx5e_tstamp *tstamp)
{
@@ -160,6 +161,11 @@ static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq,
#define RQ_PAGE_SIZE(rq) ((1 << rq->buff.page_order) << PAGE_SHIFT)
+static inline bool mlx5e_page_is_reserved(struct page *page)
+{
+ return page_is_pfmemalloc(page) || page_to_nid(page) != numa_node_id();
+}
+
static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq,
struct mlx5e_dma_info *dma_info)
{
@@ -238,22 +244,54 @@ void mlx5e_page_release(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info,
put_page(dma_info->page);
}
+static inline bool mlx5e_page_reuse(struct mlx5e_rq *rq,
+ struct mlx5e_wqe_frag_info *wi)
+{
+ return rq->wqe.page_reuse && wi->di.page &&
+ (wi->offset + rq->wqe.frag_sz <= RQ_PAGE_SIZE(rq)) &&
+ !mlx5e_page_is_reserved(wi->di.page);
+}
+
int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix)
{
- struct mlx5e_dma_info *di = &rq->dma_info[ix];
+ struct mlx5e_wqe_frag_info *wi = &rq->wqe.frag_info[ix];
- if (unlikely(mlx5e_page_alloc_mapped(rq, di)))
- return -ENOMEM;
+ /* check if page exists, hence can be reused */
+ if (!wi->di.page) {
+ if (unlikely(mlx5e_page_alloc_mapped(rq, &wi->di)))
+ return -ENOMEM;
+ wi->offset = 0;
+ }
- wqe->data.addr = cpu_to_be64(di->addr + rq->rx_headroom);
+ wqe->data.addr = cpu_to_be64(wi->di.addr + wi->offset +
+ rq->rx_headroom);
return 0;
}
+static inline void mlx5e_free_rx_wqe(struct mlx5e_rq *rq,
+ struct mlx5e_wqe_frag_info *wi)
+{
+ mlx5e_page_release(rq, &wi->di, true);
+ wi->di.page = NULL;
+}
+
+static inline void mlx5e_free_rx_wqe_reuse(struct mlx5e_rq *rq,
+ struct mlx5e_wqe_frag_info *wi)
+{
+ if (mlx5e_page_reuse(rq, wi)) {
+ rq->stats.page_reuse++;
+ return;
+ }
+
+ mlx5e_free_rx_wqe(rq, wi);
+}
+
void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix)
{
- struct mlx5e_dma_info *di = &rq->dma_info[ix];
+ struct mlx5e_wqe_frag_info *wi = &rq->wqe.frag_info[ix];
- mlx5e_page_release(rq, di, true);
+ if (wi->di.page)
+ mlx5e_free_rx_wqe(rq, wi);
}
static inline int mlx5e_mpwqe_strides_per_page(struct mlx5e_rq *rq)
@@ -648,9 +686,8 @@ static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
prefetchw(wqe);
if (unlikely(dma_len < MLX5E_XDP_MIN_INLINE ||
- MLX5E_SW2HW_MTU(rq->netdev->mtu) < dma_len)) {
+ MLX5E_SW2HW_MTU(rq->channel->priv, rq->netdev->mtu) < dma_len)) {
rq->stats.xdp_drop++;
- mlx5e_page_release(rq, di, true);
return false;
}
@@ -661,7 +698,6 @@ static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
sq->db.doorbell = false;
}
rq->stats.xdp_tx_full++;
- mlx5e_page_release(rq, di, true);
return false;
}
@@ -686,10 +722,15 @@ static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_SEND);
+ /* move page to reference to sq responsibility,
+ * and mark so it's not put back in page-cache.
+ */
+ rq->wqe.xdp_xmit = true;
sq->db.di[pi] = *di;
sq->pc++;
sq->db.doorbell = true;
+
rq->stats.xdp_tx++;
return true;
}
@@ -726,35 +767,34 @@ static inline int mlx5e_xdp_handle(struct mlx5e_rq *rq,
trace_xdp_exception(rq->netdev, prog, act);
case XDP_DROP:
rq->stats.xdp_drop++;
- mlx5e_page_release(rq, di, true);
return true;
}
}
static inline
struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
- u16 wqe_counter, u32 cqe_bcnt)
+ struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt)
{
- struct mlx5e_dma_info *di;
+ struct mlx5e_dma_info *di = &wi->di;
struct sk_buff *skb;
void *va, *data;
u16 rx_headroom = rq->rx_headroom;
bool consumed;
+ u32 frag_size;
- di = &rq->dma_info[wqe_counter];
- va = page_address(di->page);
+ va = page_address(di->page) + wi->offset;
data = va + rx_headroom;
+ frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
dma_sync_single_range_for_cpu(rq->pdev,
- di->addr,
- rx_headroom,
- rq->buff.wqe_sz,
+ di->addr + wi->offset,
+ 0, frag_size,
DMA_FROM_DEVICE);
prefetch(data);
+ wi->offset += frag_size;
if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) {
rq->stats.wqe_err++;
- mlx5e_page_release(rq, di, true);
return NULL;
}
@@ -764,16 +804,14 @@ struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
if (consumed)
return NULL; /* page/packet was consumed by XDP */
- skb = build_skb(va, RQ_PAGE_SIZE(rq));
+ skb = build_skb(va, frag_size);
if (unlikely(!skb)) {
rq->stats.buff_alloc_err++;
- mlx5e_page_release(rq, di, true);
return NULL;
}
- /* queue up for recycling ..*/
+ /* queue up for recycling/reuse */
page_ref_inc(di->page);
- mlx5e_page_release(rq, di, true);
skb_reserve(skb, rx_headroom);
skb_put(skb, cqe_bcnt);
@@ -783,6 +821,7 @@ struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
{
+ struct mlx5e_wqe_frag_info *wi;
struct mlx5e_rx_wqe *wqe;
__be16 wqe_counter_be;
struct sk_buff *skb;
@@ -792,15 +831,27 @@ void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
wqe_counter_be = cqe->wqe_counter;
wqe_counter = be16_to_cpu(wqe_counter_be);
wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter);
+ wi = &rq->wqe.frag_info[wqe_counter];
cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
- skb = skb_from_cqe(rq, cqe, wqe_counter, cqe_bcnt);
- if (!skb)
+ skb = skb_from_cqe(rq, cqe, wi, cqe_bcnt);
+ if (!skb) {
+ /* probably for XDP */
+ if (rq->wqe.xdp_xmit) {
+ wi->di.page = NULL;
+ rq->wqe.xdp_xmit = false;
+ /* do not return page to cache, it will be returned on XDP_TX completion */
+ goto wq_ll_pop;
+ }
+ /* probably an XDP_DROP, save the page-reuse checks */
+ mlx5e_free_rx_wqe(rq, wi);
goto wq_ll_pop;
+ }
mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
napi_gro_receive(rq->cq.napi, skb);
+ mlx5e_free_rx_wqe_reuse(rq, wi);
wq_ll_pop:
mlx5_wq_ll_pop(&rq->wq, wqe_counter_be,
&wqe->next.next_wqe_index);
@@ -812,6 +863,7 @@ void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_eswitch_rep *rep = rpriv->rep;
+ struct mlx5e_wqe_frag_info *wi;
struct mlx5e_rx_wqe *wqe;
struct sk_buff *skb;
__be16 wqe_counter_be;
@@ -821,11 +873,21 @@ void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
wqe_counter_be = cqe->wqe_counter;
wqe_counter = be16_to_cpu(wqe_counter_be);
wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter);
+ wi = &rq->wqe.frag_info[wqe_counter];
cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
- skb = skb_from_cqe(rq, cqe, wqe_counter, cqe_bcnt);
- if (!skb)
+ skb = skb_from_cqe(rq, cqe, wi, cqe_bcnt);
+ if (!skb) {
+ if (rq->wqe.xdp_xmit) {
+ wi->di.page = NULL;
+ rq->wqe.xdp_xmit = false;
+ /* do not return page to cache, it will be returned on XDP_TX completion */
+ goto wq_ll_pop;
+ }
+ /* probably an XDP_DROP, save the page-reuse checks */
+ mlx5e_free_rx_wqe(rq, wi);
goto wq_ll_pop;
+ }
mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
@@ -834,6 +896,7 @@ void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
napi_gro_receive(rq->cq.napi, skb);
+ mlx5e_free_rx_wqe_reuse(rq, wi);
wq_ll_pop:
mlx5_wq_ll_pop(&rq->wq, wqe_counter_be,
&wqe->next.next_wqe_index);
@@ -934,7 +997,7 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
work_done += mlx5e_decompress_cqes_cont(rq, cq, 0, budget);
for (; work_done < budget; work_done++) {
- struct mlx5_cqe64 *cqe = mlx5e_get_cqe(cq);
+ struct mlx5_cqe64 *cqe = mlx5_cqwq_get_cqe(&cq->wq);
if (!cqe)
break;
@@ -988,7 +1051,7 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
u16 wqe_counter;
bool last_wqe;
- cqe = mlx5e_get_cqe(cq);
+ cqe = mlx5_cqwq_get_cqe(&cq->wq);
if (!cqe)
break;
@@ -1038,11 +1101,7 @@ void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq)
#ifdef CONFIG_MLX5_CORE_IPOIB
#define MLX5_IB_GRH_DGID_OFFSET 24
-#define MLX5_IB_GRH_BYTES 40
-#define MLX5_IPOIB_ENCAP_LEN 4
#define MLX5_GID_SIZE 16
-#define MLX5_IPOIB_PSEUDO_LEN 20
-#define MLX5_IPOIB_HARD_LEN (MLX5_IPOIB_PSEUDO_LEN + MLX5_IPOIB_ENCAP_LEN)
static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
struct mlx5_cqe64 *cqe,
@@ -1050,6 +1109,7 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
struct sk_buff *skb)
{
struct net_device *netdev = rq->netdev;
+ struct mlx5e_tstamp *tstamp = rq->tstamp;
char *pseudo_header;
u8 *dgid;
u8 g;
@@ -1074,6 +1134,9 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
skb->ip_summed = CHECKSUM_COMPLETE;
skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
+ if (unlikely(mlx5e_rx_hw_stamp(tstamp)))
+ mlx5e_fill_hwstamp(tstamp, get_cqe_ts(cqe), skb_hwtstamps(skb));
+
skb_record_rx_queue(skb, rq->ix);
if (likely(netdev->features & NETIF_F_RXHASH))
@@ -1094,6 +1157,7 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
{
+ struct mlx5e_wqe_frag_info *wi;
struct mlx5e_rx_wqe *wqe;
__be16 wqe_counter_be;
struct sk_buff *skb;
@@ -1103,18 +1167,60 @@ void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
wqe_counter_be = cqe->wqe_counter;
wqe_counter = be16_to_cpu(wqe_counter_be);
wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter);
+ wi = &rq->wqe.frag_info[wqe_counter];
cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
- skb = skb_from_cqe(rq, cqe, wqe_counter, cqe_bcnt);
+ skb = skb_from_cqe(rq, cqe, wi, cqe_bcnt);
if (!skb)
- goto wq_ll_pop;
+ goto wq_free_wqe;
mlx5i_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
napi_gro_receive(rq->cq.napi, skb);
-wq_ll_pop:
+wq_free_wqe:
+ mlx5e_free_rx_wqe_reuse(rq, wi);
mlx5_wq_ll_pop(&rq->wq, wqe_counter_be,
&wqe->next.next_wqe_index);
}
#endif /* CONFIG_MLX5_CORE_IPOIB */
+
+#ifdef CONFIG_MLX5_EN_IPSEC
+
+void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
+{
+ struct mlx5e_wqe_frag_info *wi;
+ struct mlx5e_rx_wqe *wqe;
+ __be16 wqe_counter_be;
+ struct sk_buff *skb;
+ u16 wqe_counter;
+ u32 cqe_bcnt;
+
+ wqe_counter_be = cqe->wqe_counter;
+ wqe_counter = be16_to_cpu(wqe_counter_be);
+ wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter);
+ wi = &rq->wqe.frag_info[wqe_counter];
+ cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
+
+ skb = skb_from_cqe(rq, cqe, wi, cqe_bcnt);
+ if (unlikely(!skb)) {
+ /* a DROP, save the page-reuse checks */
+ mlx5e_free_rx_wqe(rq, wi);
+ goto wq_ll_pop;
+ }
+ skb = mlx5e_ipsec_handle_rx_skb(rq->netdev, skb);
+ if (unlikely(!skb)) {
+ mlx5e_free_rx_wqe(rq, wi);
+ goto wq_ll_pop;
+ }
+
+ mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
+ napi_gro_receive(rq->cq.napi, skb);
+
+ mlx5e_free_rx_wqe_reuse(rq, wi);
+wq_ll_pop:
+ mlx5_wq_ll_pop(&rq->wq, wqe_counter_be,
+ &wqe->next.next_wqe_index);
+}
+
+#endif /* CONFIG_MLX5_EN_IPSEC */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index fda247587ff6..e65517eafc58 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -79,6 +79,7 @@ struct mlx5e_sw_stats {
u64 rx_buff_alloc_err;
u64 rx_cqe_compress_blks;
u64 rx_cqe_compress_pkts;
+ u64 rx_page_reuse;
u64 rx_cache_reuse;
u64 rx_cache_full;
u64 rx_cache_empty;
@@ -117,6 +118,7 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_page_reuse) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_reuse) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_full) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_empty) },
@@ -319,6 +321,7 @@ struct mlx5e_rq_stats {
u64 buff_alloc_err;
u64 cqe_compress_blks;
u64 cqe_compress_pkts;
+ u64 page_reuse;
u64 cache_reuse;
u64 cache_full;
u64 cache_empty;
@@ -341,6 +344,7 @@ static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, page_reuse) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_reuse) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_full) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_empty) },
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index f5afacfbe914..3c536f560dd2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -888,6 +888,34 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
*min_inline = MLX5_INLINE_MODE_IP;
}
+ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_IP)) {
+ struct flow_dissector_key_ip *key =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_IP,
+ f->key);
+ struct flow_dissector_key_ip *mask =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_IP,
+ f->mask);
+
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn, mask->tos & 0x3);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, key->tos & 0x3);
+
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp, mask->tos >> 2);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, key->tos >> 2);
+
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit, mask->ttl);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit, key->ttl);
+
+ if (mask->ttl &&
+ !MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
+ ft_field_support.outer_ipv4_ttl))
+ return -EOPNOTSUPP;
+
+ if (mask->tos || mask->ttl)
+ *min_inline = MLX5_INLINE_MODE_IP;
+ }
+
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
struct flow_dissector_key_ports *key =
skb_flow_dissector_target(f->dissector,
@@ -931,29 +959,6 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
*min_inline = MLX5_INLINE_MODE_TCP_UDP;
}
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_IP)) {
- struct flow_dissector_key_ip *key =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_IP,
- f->key);
- struct flow_dissector_key_ip *mask =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_IP,
- f->mask);
-
- MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn, mask->tos & 0x3);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, key->tos & 0x3);
-
- MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp, mask->tos >> 2);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, key->tos >> 2);
-
- if (mask->tos)
- *min_inline = MLX5_INLINE_MODE_IP;
-
- if (mask->ttl) /* currently not supported */
- return -EOPNOTSUPP;
- }
-
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_TCP)) {
struct flow_dissector_key_tcp *key =
skb_flow_dissector_target(f->dissector,
@@ -1053,33 +1058,37 @@ struct mlx5_fields {
u32 offset;
};
+#define OFFLOAD(fw_field, size, field, off) \
+ {MLX5_ACTION_IN_FIELD_OUT_ ## fw_field, size, offsetof(struct pedit_headers, field) + (off)}
+
static struct mlx5_fields fields[] = {
- {MLX5_ACTION_IN_FIELD_OUT_DMAC_47_16, 4, offsetof(struct pedit_headers, eth.h_dest[0])},
- {MLX5_ACTION_IN_FIELD_OUT_DMAC_15_0, 2, offsetof(struct pedit_headers, eth.h_dest[4])},
- {MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16, 4, offsetof(struct pedit_headers, eth.h_source[0])},
- {MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0, 2, offsetof(struct pedit_headers, eth.h_source[4])},
- {MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE, 2, offsetof(struct pedit_headers, eth.h_proto)},
-
- {MLX5_ACTION_IN_FIELD_OUT_IP_DSCP, 1, offsetof(struct pedit_headers, ip4.tos)},
- {MLX5_ACTION_IN_FIELD_OUT_IP_TTL, 1, offsetof(struct pedit_headers, ip4.ttl)},
- {MLX5_ACTION_IN_FIELD_OUT_SIPV4, 4, offsetof(struct pedit_headers, ip4.saddr)},
- {MLX5_ACTION_IN_FIELD_OUT_DIPV4, 4, offsetof(struct pedit_headers, ip4.daddr)},
-
- {MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96, 4, offsetof(struct pedit_headers, ip6.saddr.s6_addr32[0])},
- {MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64, 4, offsetof(struct pedit_headers, ip6.saddr.s6_addr32[1])},
- {MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32, 4, offsetof(struct pedit_headers, ip6.saddr.s6_addr32[2])},
- {MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0, 4, offsetof(struct pedit_headers, ip6.saddr.s6_addr32[3])},
- {MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96, 4, offsetof(struct pedit_headers, ip6.daddr.s6_addr32[0])},
- {MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64, 4, offsetof(struct pedit_headers, ip6.daddr.s6_addr32[1])},
- {MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32, 4, offsetof(struct pedit_headers, ip6.daddr.s6_addr32[2])},
- {MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0, 4, offsetof(struct pedit_headers, ip6.daddr.s6_addr32[3])},
-
- {MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT, 2, offsetof(struct pedit_headers, tcp.source)},
- {MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT, 2, offsetof(struct pedit_headers, tcp.dest)},
- {MLX5_ACTION_IN_FIELD_OUT_TCP_FLAGS, 1, offsetof(struct pedit_headers, tcp.ack_seq) + 5},
-
- {MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT, 2, offsetof(struct pedit_headers, udp.source)},
- {MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT, 2, offsetof(struct pedit_headers, udp.dest)},
+ OFFLOAD(DMAC_47_16, 4, eth.h_dest[0], 0),
+ OFFLOAD(DMAC_47_16, 4, eth.h_dest[0], 0),
+ OFFLOAD(DMAC_15_0, 2, eth.h_dest[4], 0),
+ OFFLOAD(SMAC_47_16, 4, eth.h_source[0], 0),
+ OFFLOAD(SMAC_15_0, 2, eth.h_source[4], 0),
+ OFFLOAD(ETHERTYPE, 2, eth.h_proto, 0),
+
+ OFFLOAD(IP_TTL, 1, ip4.ttl, 0),
+ OFFLOAD(SIPV4, 4, ip4.saddr, 0),
+ OFFLOAD(DIPV4, 4, ip4.daddr, 0),
+
+ OFFLOAD(SIPV6_127_96, 4, ip6.saddr.s6_addr32[0], 0),
+ OFFLOAD(SIPV6_95_64, 4, ip6.saddr.s6_addr32[1], 0),
+ OFFLOAD(SIPV6_63_32, 4, ip6.saddr.s6_addr32[2], 0),
+ OFFLOAD(SIPV6_31_0, 4, ip6.saddr.s6_addr32[3], 0),
+ OFFLOAD(DIPV6_127_96, 4, ip6.daddr.s6_addr32[0], 0),
+ OFFLOAD(DIPV6_95_64, 4, ip6.daddr.s6_addr32[1], 0),
+ OFFLOAD(DIPV6_63_32, 4, ip6.daddr.s6_addr32[2], 0),
+ OFFLOAD(DIPV6_31_0, 4, ip6.daddr.s6_addr32[3], 0),
+ OFFLOAD(IPV6_HOPLIMIT, 1, ip6.hop_limit, 0),
+
+ OFFLOAD(TCP_SPORT, 2, tcp.source, 0),
+ OFFLOAD(TCP_DPORT, 2, tcp.dest, 0),
+ OFFLOAD(TCP_FLAGS, 1, tcp.ack_seq, 5),
+
+ OFFLOAD(UDP_SPORT, 2, udp.source, 0),
+ OFFLOAD(UDP_DPORT, 2, udp.dest, 0),
};
/* On input attr->num_mod_hdr_actions tells how many HW actions can be parsed at
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index ef3e1918d8cc..aaa0f4ebba9a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -33,7 +33,8 @@
#include <linux/tcp.h>
#include <linux/if_vlan.h>
#include "en.h"
-#include "ipoib.h"
+#include "ipoib/ipoib.h"
+#include "en_accel/ipsec_rxtx.h"
#define MLX5E_SQ_NOPS_ROOM MLX5_SEND_WQE_MAX_WQEBBS
#define MLX5E_SQ_STOP_ROOM (MLX5_SEND_WQE_MAX_WQEBBS +\
@@ -299,12 +300,9 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
}
}
-static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb)
+static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ struct mlx5e_tx_wqe *wqe, u16 pi)
{
- struct mlx5_wq_cyc *wq = &sq->wq;
-
- u16 pi = sq->pc & wq->sz_m1;
- struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi];
struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
@@ -319,8 +317,6 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb)
u16 ds_cnt;
u16 ihs;
- memset(wqe, 0, sizeof(*wqe));
-
mlx5e_txwqe_build_eseg_csum(sq, skb, eseg);
if (skb_is_gso(skb)) {
@@ -375,8 +371,21 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5e_txqsq *sq = priv->txq2sq[skb_get_queue_mapping(skb)];
+ struct mlx5_wq_cyc *wq = &sq->wq;
+ u16 pi = sq->pc & wq->sz_m1;
+ struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
+
+ memset(wqe, 0, sizeof(*wqe));
- return mlx5e_sq_xmit(sq, skb);
+#ifdef CONFIG_MLX5_EN_IPSEC
+ if (sq->state & BIT(MLX5E_SQ_STATE_IPSEC)) {
+ skb = mlx5e_ipsec_handle_tx_skb(dev, wqe, skb);
+ if (unlikely(!skb))
+ return NETDEV_TX_OK;
+ }
+#endif
+
+ return mlx5e_sq_xmit(sq, skb, wqe, pi);
}
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
@@ -409,7 +418,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
u16 wqe_counter;
bool last_wqe;
- cqe = mlx5e_get_cqe(cq);
+ cqe = mlx5_cqwq_get_cqe(&cq->wq);
if (!cqe)
break;
@@ -557,11 +566,16 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
if (skb_is_gso(skb)) {
opcode = MLX5_OPCODE_LSO;
ihs = mlx5e_txwqe_build_eseg_gso(sq, skb, eseg, &num_bytes);
+ sq->stats.packets += skb_shinfo(skb)->gso_segs;
} else {
ihs = mlx5e_calc_min_inline(sq->min_inline_mode, skb);
num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
+ sq->stats.packets++;
}
+ sq->stats.bytes += num_bytes;
+ sq->stats.xmit_more += skb->xmit_more;
+
ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
if (ihs) {
memcpy(eseg->inline_hdr.start, skb_data, ihs);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
index 5ca6714e3e02..92db28a9ed43 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -32,23 +32,6 @@
#include "en.h"
-struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq)
-{
- struct mlx5_cqwq *wq = &cq->wq;
- u32 ci = mlx5_cqwq_get_ci(wq);
- struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci);
- u8 cqe_ownership_bit = cqe->op_own & MLX5_CQE_OWNER_MASK;
- u8 sw_ownership_val = mlx5_cqwq_get_wrap_cnt(wq) & 1;
-
- if (cqe_ownership_bit != sw_ownership_val)
- return NULL;
-
- /* ensure cqe content is read after cqe ownership bit */
- dma_rmb();
-
- return cqe;
-}
-
static inline void mlx5e_poll_ico_single_cqe(struct mlx5e_cq *cq,
struct mlx5e_icosq *sq,
struct mlx5_cqe64 *cqe,
@@ -89,7 +72,7 @@ static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
return;
- cqe = mlx5e_get_cqe(cq);
+ cqe = mlx5_cqwq_get_cqe(&cq->wq);
if (likely(!cqe))
return;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index b8030b5707a5..95b64025ce36 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -903,21 +903,34 @@ static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
return 0;
}
-int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
+static int mlx5_devlink_eswitch_check(struct devlink *devlink)
{
- struct mlx5_core_dev *dev;
- u16 cur_mlx5_mode, mlx5_mode = 0;
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
- dev = devlink_priv(devlink);
+ if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
+ return -EOPNOTSUPP;
if (!MLX5_CAP_GEN(dev, vport_group_manager))
return -EOPNOTSUPP;
- cur_mlx5_mode = dev->priv.eswitch->mode;
-
- if (cur_mlx5_mode == SRIOV_NONE)
+ if (dev->priv.eswitch->mode == SRIOV_NONE)
return -EOPNOTSUPP;
+ return 0;
+}
+
+int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ u16 cur_mlx5_mode, mlx5_mode = 0;
+ int err;
+
+ err = mlx5_devlink_eswitch_check(devlink);
+ if (err)
+ return err;
+
+ cur_mlx5_mode = dev->priv.eswitch->mode;
+
if (esw_mode_from_devlink(mode, &mlx5_mode))
return -EINVAL;
@@ -934,15 +947,12 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
{
- struct mlx5_core_dev *dev;
-
- dev = devlink_priv(devlink);
-
- if (!MLX5_CAP_GEN(dev, vport_group_manager))
- return -EOPNOTSUPP;
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ int err;
- if (dev->priv.eswitch->mode == SRIOV_NONE)
- return -EOPNOTSUPP;
+ err = mlx5_devlink_eswitch_check(devlink);
+ if (err)
+ return err;
return esw_mode_to_devlink(dev->priv.eswitch->mode, mode);
}
@@ -951,15 +961,12 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
struct mlx5_eswitch *esw = dev->priv.eswitch;
- int num_vports = esw->enabled_vports;
int err, vport;
u8 mlx5_mode;
- if (!MLX5_CAP_GEN(dev, vport_group_manager))
- return -EOPNOTSUPP;
-
- if (esw->mode == SRIOV_NONE)
- return -EOPNOTSUPP;
+ err = mlx5_devlink_eswitch_check(devlink);
+ if (err)
+ return err;
switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
@@ -982,7 +989,7 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode)
if (err)
goto out;
- for (vport = 1; vport < num_vports; vport++) {
+ for (vport = 1; vport < esw->enabled_vports; vport++) {
err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode);
if (err) {
esw_warn(dev, "Failed to set min inline on vport %d\n",
@@ -1007,12 +1014,11 @@ int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
struct mlx5_eswitch *esw = dev->priv.eswitch;
+ int err;
- if (!MLX5_CAP_GEN(dev, vport_group_manager))
- return -EOPNOTSUPP;
-
- if (esw->mode == SRIOV_NONE)
- return -EOPNOTSUPP;
+ err = mlx5_devlink_eswitch_check(devlink);
+ if (err)
+ return err;
return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
}
@@ -1059,11 +1065,9 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap)
struct mlx5_eswitch *esw = dev->priv.eswitch;
int err;
- if (!MLX5_CAP_GEN(dev, vport_group_manager))
- return -EOPNOTSUPP;
-
- if (esw->mode == SRIOV_NONE)
- return -EOPNOTSUPP;
+ err = mlx5_devlink_eswitch_check(devlink);
+ if (err)
+ return err;
if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE &&
(!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, encap) ||
@@ -1102,12 +1106,11 @@ int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
struct mlx5_eswitch *esw = dev->priv.eswitch;
+ int err;
- if (!MLX5_CAP_GEN(dev, vport_group_manager))
- return -EOPNOTSUPP;
-
- if (esw->mode == SRIOV_NONE)
- return -EOPNOTSUPP;
+ err = mlx5_devlink_eswitch_check(devlink);
+ if (err)
+ return err;
*encap = esw->offloads.encap;
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.c
index 99cba644b4fc..5cb855fd618f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.c
@@ -33,10 +33,44 @@
#include <linux/etherdevice.h>
#include <linux/mlx5/cmd.h>
#include <linux/mlx5/driver.h>
+#include <linux/mlx5/device.h>
#include "mlx5_core.h"
#include "fpga/cmd.h"
+#define MLX5_FPGA_ACCESS_REG_SZ (MLX5_ST_SZ_DW(fpga_access_reg) + \
+ MLX5_FPGA_ACCESS_REG_SIZE_MAX)
+
+int mlx5_fpga_access_reg(struct mlx5_core_dev *dev, u8 size, u64 addr,
+ void *buf, bool write)
+{
+ u32 in[MLX5_FPGA_ACCESS_REG_SZ] = {0};
+ u32 out[MLX5_FPGA_ACCESS_REG_SZ];
+ int err;
+
+ if (size & 3)
+ return -EINVAL;
+ if (addr & 3)
+ return -EINVAL;
+ if (size > MLX5_FPGA_ACCESS_REG_SIZE_MAX)
+ return -EINVAL;
+
+ MLX5_SET(fpga_access_reg, in, size, size);
+ MLX5_SET64(fpga_access_reg, in, address, addr);
+ if (write)
+ memcpy(MLX5_ADDR_OF(fpga_access_reg, in, data), buf, size);
+
+ err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out),
+ MLX5_REG_FPGA_ACCESS_REG, 0, write);
+ if (err)
+ return err;
+
+ if (!write)
+ memcpy(buf, MLX5_ADDR_OF(fpga_access_reg, out, data), size);
+
+ return 0;
+}
+
int mlx5_fpga_caps(struct mlx5_core_dev *dev, u32 *caps)
{
u32 in[MLX5_ST_SZ_DW(fpga_cap)] = {0};
@@ -46,6 +80,49 @@ int mlx5_fpga_caps(struct mlx5_core_dev *dev, u32 *caps)
MLX5_REG_FPGA_CAP, 0, 0);
}
+int mlx5_fpga_ctrl_op(struct mlx5_core_dev *dev, u8 op)
+{
+ u32 in[MLX5_ST_SZ_DW(fpga_ctrl)] = {0};
+ u32 out[MLX5_ST_SZ_DW(fpga_ctrl)];
+
+ MLX5_SET(fpga_ctrl, in, operation, op);
+
+ return mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out),
+ MLX5_REG_FPGA_CTRL, 0, true);
+}
+
+int mlx5_fpga_sbu_caps(struct mlx5_core_dev *dev, void *caps, int size)
+{
+ unsigned int cap_size = MLX5_CAP_FPGA(dev, sandbox_extended_caps_len);
+ u64 addr = MLX5_CAP64_FPGA(dev, sandbox_extended_caps_addr);
+ unsigned int read;
+ int ret = 0;
+
+ if (cap_size > size) {
+ mlx5_core_warn(dev, "Not enough buffer %u for FPGA SBU caps %u",
+ size, cap_size);
+ return -EINVAL;
+ }
+
+ while (cap_size > 0) {
+ read = min_t(unsigned int, cap_size,
+ MLX5_FPGA_ACCESS_REG_SIZE_MAX);
+
+ ret = mlx5_fpga_access_reg(dev, read, addr, caps, false);
+ if (ret) {
+ mlx5_core_warn(dev, "Error reading FPGA SBU caps %u bytes at address 0x%llx: %d",
+ read, addr, ret);
+ return ret;
+ }
+
+ cap_size -= read;
+ addr += read;
+ caps += read;
+ }
+
+ return ret;
+}
+
int mlx5_fpga_query(struct mlx5_core_dev *dev, struct mlx5_fpga_query *query)
{
u32 in[MLX5_ST_SZ_DW(fpga_ctrl)] = {0};
@@ -62,3 +139,100 @@ int mlx5_fpga_query(struct mlx5_core_dev *dev, struct mlx5_fpga_query *query)
query->oper_image = MLX5_GET(fpga_ctrl, out, flash_select_oper);
return 0;
}
+
+int mlx5_fpga_create_qp(struct mlx5_core_dev *dev, void *fpga_qpc,
+ u32 *fpga_qpn)
+{
+ u32 in[MLX5_ST_SZ_DW(fpga_create_qp_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(fpga_create_qp_out)];
+ int ret;
+
+ MLX5_SET(fpga_create_qp_in, in, opcode, MLX5_CMD_OP_FPGA_CREATE_QP);
+ memcpy(MLX5_ADDR_OF(fpga_create_qp_in, in, fpga_qpc), fpga_qpc,
+ MLX5_FLD_SZ_BYTES(fpga_create_qp_in, fpga_qpc));
+
+ ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ if (ret)
+ return ret;
+
+ memcpy(fpga_qpc, MLX5_ADDR_OF(fpga_create_qp_out, out, fpga_qpc),
+ MLX5_FLD_SZ_BYTES(fpga_create_qp_out, fpga_qpc));
+ *fpga_qpn = MLX5_GET(fpga_create_qp_out, out, fpga_qpn);
+ return ret;
+}
+
+int mlx5_fpga_modify_qp(struct mlx5_core_dev *dev, u32 fpga_qpn,
+ enum mlx5_fpga_qpc_field_select fields,
+ void *fpga_qpc)
+{
+ u32 in[MLX5_ST_SZ_DW(fpga_modify_qp_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(fpga_modify_qp_out)];
+
+ MLX5_SET(fpga_modify_qp_in, in, opcode, MLX5_CMD_OP_FPGA_MODIFY_QP);
+ MLX5_SET(fpga_modify_qp_in, in, field_select, fields);
+ MLX5_SET(fpga_modify_qp_in, in, fpga_qpn, fpga_qpn);
+ memcpy(MLX5_ADDR_OF(fpga_modify_qp_in, in, fpga_qpc), fpga_qpc,
+ MLX5_FLD_SZ_BYTES(fpga_modify_qp_in, fpga_qpc));
+
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5_fpga_query_qp(struct mlx5_core_dev *dev,
+ u32 fpga_qpn, void *fpga_qpc)
+{
+ u32 in[MLX5_ST_SZ_DW(fpga_query_qp_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(fpga_query_qp_out)];
+ int ret;
+
+ MLX5_SET(fpga_query_qp_in, in, opcode, MLX5_CMD_OP_FPGA_QUERY_QP);
+ MLX5_SET(fpga_query_qp_in, in, fpga_qpn, fpga_qpn);
+
+ ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ if (ret)
+ return ret;
+
+ memcpy(fpga_qpc, MLX5_ADDR_OF(fpga_query_qp_out, in, fpga_qpc),
+ MLX5_FLD_SZ_BYTES(fpga_query_qp_out, fpga_qpc));
+ return ret;
+}
+
+int mlx5_fpga_destroy_qp(struct mlx5_core_dev *dev, u32 fpga_qpn)
+{
+ u32 in[MLX5_ST_SZ_DW(fpga_destroy_qp_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(fpga_destroy_qp_out)];
+
+ MLX5_SET(fpga_destroy_qp_in, in, opcode, MLX5_CMD_OP_FPGA_DESTROY_QP);
+ MLX5_SET(fpga_destroy_qp_in, in, fpga_qpn, fpga_qpn);
+
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5_fpga_query_qp_counters(struct mlx5_core_dev *dev, u32 fpga_qpn,
+ bool clear, struct mlx5_fpga_qp_counters *data)
+{
+ u32 in[MLX5_ST_SZ_DW(fpga_query_qp_counters_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(fpga_query_qp_counters_out)];
+ int ret;
+
+ MLX5_SET(fpga_query_qp_counters_in, in, opcode,
+ MLX5_CMD_OP_FPGA_QUERY_QP_COUNTERS);
+ MLX5_SET(fpga_query_qp_counters_in, in, clear, clear);
+ MLX5_SET(fpga_query_qp_counters_in, in, fpga_qpn, fpga_qpn);
+
+ ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ if (ret)
+ return ret;
+
+ data->rx_ack_packets = MLX5_GET64(fpga_query_qp_counters_out, out,
+ rx_ack_packets);
+ data->rx_send_packets = MLX5_GET64(fpga_query_qp_counters_out, out,
+ rx_send_packets);
+ data->tx_ack_packets = MLX5_GET64(fpga_query_qp_counters_out, out,
+ tx_ack_packets);
+ data->tx_send_packets = MLX5_GET64(fpga_query_qp_counters_out, out,
+ tx_send_packets);
+ data->rx_total_drop = MLX5_GET64(fpga_query_qp_counters_out, out,
+ rx_total_drop);
+
+ return ret;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h
index a74396a61bc3..94bdfd47c3f0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h
@@ -53,7 +53,32 @@ struct mlx5_fpga_query {
enum mlx5_fpga_status status;
};
+enum mlx5_fpga_qpc_field_select {
+ MLX5_FPGA_QPC_STATE = BIT(0),
+};
+
+struct mlx5_fpga_qp_counters {
+ u64 rx_ack_packets;
+ u64 rx_send_packets;
+ u64 tx_ack_packets;
+ u64 tx_send_packets;
+ u64 rx_total_drop;
+};
+
int mlx5_fpga_caps(struct mlx5_core_dev *dev, u32 *caps);
int mlx5_fpga_query(struct mlx5_core_dev *dev, struct mlx5_fpga_query *query);
+int mlx5_fpga_ctrl_op(struct mlx5_core_dev *dev, u8 op);
+int mlx5_fpga_access_reg(struct mlx5_core_dev *dev, u8 size, u64 addr,
+ void *buf, bool write);
+int mlx5_fpga_sbu_caps(struct mlx5_core_dev *dev, void *caps, int size);
+
+int mlx5_fpga_create_qp(struct mlx5_core_dev *dev, void *fpga_qpc,
+ u32 *fpga_qpn);
+int mlx5_fpga_modify_qp(struct mlx5_core_dev *dev, u32 fpga_qpn,
+ enum mlx5_fpga_qpc_field_select fields, void *fpga_qpc);
+int mlx5_fpga_query_qp(struct mlx5_core_dev *dev, u32 fpga_qpn, void *fpga_qpc);
+int mlx5_fpga_query_qp_counters(struct mlx5_core_dev *dev, u32 fpga_qpn,
+ bool clear, struct mlx5_fpga_qp_counters *data);
+int mlx5_fpga_destroy_qp(struct mlx5_core_dev *dev, u32 fpga_qpn);
#endif /* __MLX5_FPGA_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
new file mode 100644
index 000000000000..c4392f741c5f
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
@@ -0,0 +1,1042 @@
+/*
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <net/addrconf.h>
+#include <linux/etherdevice.h>
+#include <linux/mlx5/vport.h>
+
+#include "mlx5_core.h"
+#include "lib/mlx5.h"
+#include "fpga/conn.h"
+
+#define MLX5_FPGA_PKEY 0xFFFF
+#define MLX5_FPGA_PKEY_INDEX 0 /* RoCE PKEY 0xFFFF is always at index 0 */
+#define MLX5_FPGA_RECV_SIZE 2048
+#define MLX5_FPGA_PORT_NUM 1
+#define MLX5_FPGA_CQ_BUDGET 64
+
+static int mlx5_fpga_conn_map_buf(struct mlx5_fpga_conn *conn,
+ struct mlx5_fpga_dma_buf *buf)
+{
+ struct device *dma_device;
+ int err = 0;
+
+ if (unlikely(!buf->sg[0].data))
+ goto out;
+
+ dma_device = &conn->fdev->mdev->pdev->dev;
+ buf->sg[0].dma_addr = dma_map_single(dma_device, buf->sg[0].data,
+ buf->sg[0].size, buf->dma_dir);
+ err = dma_mapping_error(dma_device, buf->sg[0].dma_addr);
+ if (unlikely(err)) {
+ mlx5_fpga_warn(conn->fdev, "DMA error on sg 0: %d\n", err);
+ err = -ENOMEM;
+ goto out;
+ }
+
+ if (!buf->sg[1].data)
+ goto out;
+
+ buf->sg[1].dma_addr = dma_map_single(dma_device, buf->sg[1].data,
+ buf->sg[1].size, buf->dma_dir);
+ err = dma_mapping_error(dma_device, buf->sg[1].dma_addr);
+ if (unlikely(err)) {
+ mlx5_fpga_warn(conn->fdev, "DMA error on sg 1: %d\n", err);
+ dma_unmap_single(dma_device, buf->sg[0].dma_addr,
+ buf->sg[0].size, buf->dma_dir);
+ err = -ENOMEM;
+ }
+
+out:
+ return err;
+}
+
+static void mlx5_fpga_conn_unmap_buf(struct mlx5_fpga_conn *conn,
+ struct mlx5_fpga_dma_buf *buf)
+{
+ struct device *dma_device;
+
+ dma_device = &conn->fdev->mdev->pdev->dev;
+ if (buf->sg[1].data)
+ dma_unmap_single(dma_device, buf->sg[1].dma_addr,
+ buf->sg[1].size, buf->dma_dir);
+
+ if (likely(buf->sg[0].data))
+ dma_unmap_single(dma_device, buf->sg[0].dma_addr,
+ buf->sg[0].size, buf->dma_dir);
+}
+
+static int mlx5_fpga_conn_post_recv(struct mlx5_fpga_conn *conn,
+ struct mlx5_fpga_dma_buf *buf)
+{
+ struct mlx5_wqe_data_seg *data;
+ unsigned int ix;
+ int err = 0;
+
+ err = mlx5_fpga_conn_map_buf(conn, buf);
+ if (unlikely(err))
+ goto out;
+
+ if (unlikely(conn->qp.rq.pc - conn->qp.rq.cc >= conn->qp.rq.size)) {
+ mlx5_fpga_conn_unmap_buf(conn, buf);
+ return -EBUSY;
+ }
+
+ ix = conn->qp.rq.pc & (conn->qp.rq.size - 1);
+ data = mlx5_wq_cyc_get_wqe(&conn->qp.wq.rq, ix);
+ data->byte_count = cpu_to_be32(buf->sg[0].size);
+ data->lkey = cpu_to_be32(conn->fdev->conn_res.mkey.key);
+ data->addr = cpu_to_be64(buf->sg[0].dma_addr);
+
+ conn->qp.rq.pc++;
+ conn->qp.rq.bufs[ix] = buf;
+
+ /* Make sure that descriptors are written before doorbell record. */
+ dma_wmb();
+ *conn->qp.wq.rq.db = cpu_to_be32(conn->qp.rq.pc & 0xffff);
+out:
+ return err;
+}
+
+static void mlx5_fpga_conn_notify_hw(struct mlx5_fpga_conn *conn, void *wqe)
+{
+ /* ensure wqe is visible to device before updating doorbell record */
+ dma_wmb();
+ *conn->qp.wq.sq.db = cpu_to_be32(conn->qp.sq.pc);
+ /* Make sure that doorbell record is visible before ringing */
+ wmb();
+ mlx5_write64(wqe, conn->fdev->conn_res.uar->map + MLX5_BF_OFFSET, NULL);
+}
+
+static void mlx5_fpga_conn_post_send(struct mlx5_fpga_conn *conn,
+ struct mlx5_fpga_dma_buf *buf)
+{
+ struct mlx5_wqe_ctrl_seg *ctrl;
+ struct mlx5_wqe_data_seg *data;
+ unsigned int ix, sgi;
+ int size = 1;
+
+ ix = conn->qp.sq.pc & (conn->qp.sq.size - 1);
+
+ ctrl = mlx5_wq_cyc_get_wqe(&conn->qp.wq.sq, ix);
+ data = (void *)(ctrl + 1);
+
+ for (sgi = 0; sgi < ARRAY_SIZE(buf->sg); sgi++) {
+ if (!buf->sg[sgi].data)
+ break;
+ data->byte_count = cpu_to_be32(buf->sg[sgi].size);
+ data->lkey = cpu_to_be32(conn->fdev->conn_res.mkey.key);
+ data->addr = cpu_to_be64(buf->sg[sgi].dma_addr);
+ data++;
+ size++;
+ }
+
+ ctrl->imm = 0;
+ ctrl->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
+ ctrl->opmod_idx_opcode = cpu_to_be32(((conn->qp.sq.pc & 0xffff) << 8) |
+ MLX5_OPCODE_SEND);
+ ctrl->qpn_ds = cpu_to_be32(size | (conn->qp.mqp.qpn << 8));
+
+ conn->qp.sq.pc++;
+ conn->qp.sq.bufs[ix] = buf;
+ mlx5_fpga_conn_notify_hw(conn, ctrl);
+}
+
+int mlx5_fpga_conn_send(struct mlx5_fpga_conn *conn,
+ struct mlx5_fpga_dma_buf *buf)
+{
+ unsigned long flags;
+ int err;
+
+ if (!conn->qp.active)
+ return -ENOTCONN;
+
+ err = mlx5_fpga_conn_map_buf(conn, buf);
+ if (err)
+ return err;
+
+ spin_lock_irqsave(&conn->qp.sq.lock, flags);
+
+ if (conn->qp.sq.pc - conn->qp.sq.cc >= conn->qp.sq.size) {
+ list_add_tail(&buf->list, &conn->qp.sq.backlog);
+ goto out_unlock;
+ }
+
+ mlx5_fpga_conn_post_send(conn, buf);
+
+out_unlock:
+ spin_unlock_irqrestore(&conn->qp.sq.lock, flags);
+ return err;
+}
+
+static int mlx5_fpga_conn_post_recv_buf(struct mlx5_fpga_conn *conn)
+{
+ struct mlx5_fpga_dma_buf *buf;
+ int err;
+
+ buf = kzalloc(sizeof(*buf) + MLX5_FPGA_RECV_SIZE, 0);
+ if (!buf)
+ return -ENOMEM;
+
+ buf->sg[0].data = (void *)(buf + 1);
+ buf->sg[0].size = MLX5_FPGA_RECV_SIZE;
+ buf->dma_dir = DMA_FROM_DEVICE;
+
+ err = mlx5_fpga_conn_post_recv(conn, buf);
+ if (err)
+ kfree(buf);
+
+ return err;
+}
+
+static int mlx5_fpga_conn_create_mkey(struct mlx5_core_dev *mdev, u32 pdn,
+ struct mlx5_core_mkey *mkey)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
+ void *mkc;
+ u32 *in;
+ int err;
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+ MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_PA);
+ MLX5_SET(mkc, mkc, lw, 1);
+ MLX5_SET(mkc, mkc, lr, 1);
+
+ MLX5_SET(mkc, mkc, pd, pdn);
+ MLX5_SET(mkc, mkc, length64, 1);
+ MLX5_SET(mkc, mkc, qpn, 0xffffff);
+
+ err = mlx5_core_create_mkey(mdev, mkey, in, inlen);
+
+ kvfree(in);
+ return err;
+}
+
+static void mlx5_fpga_conn_rq_cqe(struct mlx5_fpga_conn *conn,
+ struct mlx5_cqe64 *cqe, u8 status)
+{
+ struct mlx5_fpga_dma_buf *buf;
+ int ix, err;
+
+ ix = be16_to_cpu(cqe->wqe_counter) & (conn->qp.rq.size - 1);
+ buf = conn->qp.rq.bufs[ix];
+ conn->qp.rq.bufs[ix] = NULL;
+ if (!status)
+ buf->sg[0].size = be32_to_cpu(cqe->byte_cnt);
+ conn->qp.rq.cc++;
+
+ if (unlikely(status && (status != MLX5_CQE_SYNDROME_WR_FLUSH_ERR)))
+ mlx5_fpga_warn(conn->fdev, "RQ buf %p on FPGA QP %u completion status %d\n",
+ buf, conn->fpga_qpn, status);
+ else
+ mlx5_fpga_dbg(conn->fdev, "RQ buf %p on FPGA QP %u completion status %d\n",
+ buf, conn->fpga_qpn, status);
+
+ mlx5_fpga_conn_unmap_buf(conn, buf);
+
+ if (unlikely(status || !conn->qp.active)) {
+ conn->qp.active = false;
+ kfree(buf);
+ return;
+ }
+
+ mlx5_fpga_dbg(conn->fdev, "Message with %u bytes received successfully\n",
+ buf->sg[0].size);
+ conn->recv_cb(conn->cb_arg, buf);
+
+ buf->sg[0].size = MLX5_FPGA_RECV_SIZE;
+ err = mlx5_fpga_conn_post_recv(conn, buf);
+ if (unlikely(err)) {
+ mlx5_fpga_warn(conn->fdev,
+ "Failed to re-post recv buf: %d\n", err);
+ kfree(buf);
+ }
+}
+
+static void mlx5_fpga_conn_sq_cqe(struct mlx5_fpga_conn *conn,
+ struct mlx5_cqe64 *cqe, u8 status)
+{
+ struct mlx5_fpga_dma_buf *buf, *nextbuf;
+ unsigned long flags;
+ int ix;
+
+ spin_lock_irqsave(&conn->qp.sq.lock, flags);
+
+ ix = be16_to_cpu(cqe->wqe_counter) & (conn->qp.sq.size - 1);
+ buf = conn->qp.sq.bufs[ix];
+ conn->qp.sq.bufs[ix] = NULL;
+ conn->qp.sq.cc++;
+
+ /* Handle backlog still under the spinlock to ensure message post order */
+ if (unlikely(!list_empty(&conn->qp.sq.backlog))) {
+ if (likely(conn->qp.active)) {
+ nextbuf = list_first_entry(&conn->qp.sq.backlog,
+ struct mlx5_fpga_dma_buf, list);
+ list_del(&nextbuf->list);
+ mlx5_fpga_conn_post_send(conn, nextbuf);
+ }
+ }
+
+ spin_unlock_irqrestore(&conn->qp.sq.lock, flags);
+
+ if (unlikely(status && (status != MLX5_CQE_SYNDROME_WR_FLUSH_ERR)))
+ mlx5_fpga_warn(conn->fdev, "SQ buf %p on FPGA QP %u completion status %d\n",
+ buf, conn->fpga_qpn, status);
+ else
+ mlx5_fpga_dbg(conn->fdev, "SQ buf %p on FPGA QP %u completion status %d\n",
+ buf, conn->fpga_qpn, status);
+
+ mlx5_fpga_conn_unmap_buf(conn, buf);
+
+ if (likely(buf->complete))
+ buf->complete(conn, conn->fdev, buf, status);
+
+ if (unlikely(status))
+ conn->qp.active = false;
+}
+
+static void mlx5_fpga_conn_handle_cqe(struct mlx5_fpga_conn *conn,
+ struct mlx5_cqe64 *cqe)
+{
+ u8 opcode, status = 0;
+
+ opcode = cqe->op_own >> 4;
+
+ switch (opcode) {
+ case MLX5_CQE_REQ_ERR:
+ status = ((struct mlx5_err_cqe *)cqe)->syndrome;
+ /* Fall through */
+ case MLX5_CQE_REQ:
+ mlx5_fpga_conn_sq_cqe(conn, cqe, status);
+ break;
+
+ case MLX5_CQE_RESP_ERR:
+ status = ((struct mlx5_err_cqe *)cqe)->syndrome;
+ /* Fall through */
+ case MLX5_CQE_RESP_SEND:
+ mlx5_fpga_conn_rq_cqe(conn, cqe, status);
+ break;
+ default:
+ mlx5_fpga_warn(conn->fdev, "Unexpected cqe opcode %u\n",
+ opcode);
+ }
+}
+
+static void mlx5_fpga_conn_arm_cq(struct mlx5_fpga_conn *conn)
+{
+ mlx5_cq_arm(&conn->cq.mcq, MLX5_CQ_DB_REQ_NOT,
+ conn->fdev->conn_res.uar->map, conn->cq.wq.cc);
+}
+
+static void mlx5_fpga_conn_cq_event(struct mlx5_core_cq *mcq,
+ enum mlx5_event event)
+{
+ struct mlx5_fpga_conn *conn;
+
+ conn = container_of(mcq, struct mlx5_fpga_conn, cq.mcq);
+ mlx5_fpga_warn(conn->fdev, "CQ event %u on CQ #%u\n", event, mcq->cqn);
+}
+
+static void mlx5_fpga_conn_event(struct mlx5_core_qp *mqp, int event)
+{
+ struct mlx5_fpga_conn *conn;
+
+ conn = container_of(mqp, struct mlx5_fpga_conn, qp.mqp);
+ mlx5_fpga_warn(conn->fdev, "QP event %u on QP #%u\n", event, mqp->qpn);
+}
+
+static inline void mlx5_fpga_conn_cqes(struct mlx5_fpga_conn *conn,
+ unsigned int budget)
+{
+ struct mlx5_cqe64 *cqe;
+
+ while (budget) {
+ cqe = mlx5_cqwq_get_cqe(&conn->cq.wq);
+ if (!cqe)
+ break;
+
+ budget--;
+ mlx5_cqwq_pop(&conn->cq.wq);
+ mlx5_fpga_conn_handle_cqe(conn, cqe);
+ mlx5_cqwq_update_db_record(&conn->cq.wq);
+ }
+ if (!budget) {
+ tasklet_schedule(&conn->cq.tasklet);
+ return;
+ }
+
+ mlx5_fpga_dbg(conn->fdev, "Re-arming CQ with cc# %u\n", conn->cq.wq.cc);
+ /* ensure cq space is freed before enabling more cqes */
+ wmb();
+ mlx5_fpga_conn_arm_cq(conn);
+}
+
+static void mlx5_fpga_conn_cq_tasklet(unsigned long data)
+{
+ struct mlx5_fpga_conn *conn = (void *)data;
+
+ if (unlikely(!conn->qp.active))
+ return;
+ mlx5_fpga_conn_cqes(conn, MLX5_FPGA_CQ_BUDGET);
+}
+
+static void mlx5_fpga_conn_cq_complete(struct mlx5_core_cq *mcq)
+{
+ struct mlx5_fpga_conn *conn;
+
+ conn = container_of(mcq, struct mlx5_fpga_conn, cq.mcq);
+ if (unlikely(!conn->qp.active))
+ return;
+ mlx5_fpga_conn_cqes(conn, MLX5_FPGA_CQ_BUDGET);
+}
+
+static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size)
+{
+ struct mlx5_fpga_device *fdev = conn->fdev;
+ struct mlx5_core_dev *mdev = fdev->mdev;
+ u32 temp_cqc[MLX5_ST_SZ_DW(cqc)] = {0};
+ struct mlx5_wq_param wqp;
+ struct mlx5_cqe64 *cqe;
+ int inlen, err, eqn;
+ unsigned int irqn;
+ void *cqc, *in;
+ __be64 *pas;
+ u32 i;
+
+ cq_size = roundup_pow_of_two(cq_size);
+ MLX5_SET(cqc, temp_cqc, log_cq_size, ilog2(cq_size));
+
+ wqp.buf_numa_node = mdev->priv.numa_node;
+ wqp.db_numa_node = mdev->priv.numa_node;
+
+ err = mlx5_cqwq_create(mdev, &wqp, temp_cqc, &conn->cq.wq,
+ &conn->cq.wq_ctrl);
+ if (err)
+ return err;
+
+ for (i = 0; i < mlx5_cqwq_get_size(&conn->cq.wq); i++) {
+ cqe = mlx5_cqwq_get_wqe(&conn->cq.wq, i);
+ cqe->op_own = MLX5_CQE_INVALID << 4 | MLX5_CQE_OWNER_MASK;
+ }
+
+ inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
+ sizeof(u64) * conn->cq.wq_ctrl.frag_buf.npages;
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in) {
+ err = -ENOMEM;
+ goto err_cqwq;
+ }
+
+ err = mlx5_vector2eqn(mdev, smp_processor_id(), &eqn, &irqn);
+ if (err)
+ goto err_cqwq;
+
+ cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
+ MLX5_SET(cqc, cqc, log_cq_size, ilog2(cq_size));
+ MLX5_SET(cqc, cqc, c_eqn, eqn);
+ MLX5_SET(cqc, cqc, uar_page, fdev->conn_res.uar->index);
+ MLX5_SET(cqc, cqc, log_page_size, conn->cq.wq_ctrl.frag_buf.page_shift -
+ MLX5_ADAPTER_PAGE_SHIFT);
+ MLX5_SET64(cqc, cqc, dbr_addr, conn->cq.wq_ctrl.db.dma);
+
+ pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas);
+ mlx5_fill_page_frag_array(&conn->cq.wq_ctrl.frag_buf, pas);
+
+ err = mlx5_core_create_cq(mdev, &conn->cq.mcq, in, inlen);
+ kvfree(in);
+
+ if (err)
+ goto err_cqwq;
+
+ conn->cq.mcq.cqe_sz = 64;
+ conn->cq.mcq.set_ci_db = conn->cq.wq_ctrl.db.db;
+ conn->cq.mcq.arm_db = conn->cq.wq_ctrl.db.db + 1;
+ *conn->cq.mcq.set_ci_db = 0;
+ *conn->cq.mcq.arm_db = 0;
+ conn->cq.mcq.vector = 0;
+ conn->cq.mcq.comp = mlx5_fpga_conn_cq_complete;
+ conn->cq.mcq.event = mlx5_fpga_conn_cq_event;
+ conn->cq.mcq.irqn = irqn;
+ conn->cq.mcq.uar = fdev->conn_res.uar;
+ tasklet_init(&conn->cq.tasklet, mlx5_fpga_conn_cq_tasklet,
+ (unsigned long)conn);
+
+ mlx5_fpga_dbg(fdev, "Created CQ #0x%x\n", conn->cq.mcq.cqn);
+
+ goto out;
+
+err_cqwq:
+ mlx5_cqwq_destroy(&conn->cq.wq_ctrl);
+out:
+ return err;
+}
+
+static void mlx5_fpga_conn_destroy_cq(struct mlx5_fpga_conn *conn)
+{
+ tasklet_disable(&conn->cq.tasklet);
+ tasklet_kill(&conn->cq.tasklet);
+ mlx5_core_destroy_cq(conn->fdev->mdev, &conn->cq.mcq);
+ mlx5_cqwq_destroy(&conn->cq.wq_ctrl);
+}
+
+static int mlx5_fpga_conn_create_wq(struct mlx5_fpga_conn *conn, void *qpc)
+{
+ struct mlx5_fpga_device *fdev = conn->fdev;
+ struct mlx5_core_dev *mdev = fdev->mdev;
+ struct mlx5_wq_param wqp;
+
+ wqp.buf_numa_node = mdev->priv.numa_node;
+ wqp.db_numa_node = mdev->priv.numa_node;
+
+ return mlx5_wq_qp_create(mdev, &wqp, qpc, &conn->qp.wq,
+ &conn->qp.wq_ctrl);
+}
+
+static int mlx5_fpga_conn_create_qp(struct mlx5_fpga_conn *conn,
+ unsigned int tx_size, unsigned int rx_size)
+{
+ struct mlx5_fpga_device *fdev = conn->fdev;
+ struct mlx5_core_dev *mdev = fdev->mdev;
+ u32 temp_qpc[MLX5_ST_SZ_DW(qpc)] = {0};
+ void *in = NULL, *qpc;
+ int err, inlen;
+
+ conn->qp.rq.pc = 0;
+ conn->qp.rq.cc = 0;
+ conn->qp.rq.size = roundup_pow_of_two(rx_size);
+ conn->qp.sq.pc = 0;
+ conn->qp.sq.cc = 0;
+ conn->qp.sq.size = roundup_pow_of_two(tx_size);
+
+ MLX5_SET(qpc, temp_qpc, log_rq_stride, ilog2(MLX5_SEND_WQE_DS) - 4);
+ MLX5_SET(qpc, temp_qpc, log_rq_size, ilog2(conn->qp.rq.size));
+ MLX5_SET(qpc, temp_qpc, log_sq_size, ilog2(conn->qp.sq.size));
+ err = mlx5_fpga_conn_create_wq(conn, temp_qpc);
+ if (err)
+ goto out;
+
+ conn->qp.rq.bufs = kvzalloc(sizeof(conn->qp.rq.bufs[0]) *
+ conn->qp.rq.size, GFP_KERNEL);
+ if (!conn->qp.rq.bufs) {
+ err = -ENOMEM;
+ goto err_wq;
+ }
+
+ conn->qp.sq.bufs = kvzalloc(sizeof(conn->qp.sq.bufs[0]) *
+ conn->qp.sq.size, GFP_KERNEL);
+ if (!conn->qp.sq.bufs) {
+ err = -ENOMEM;
+ goto err_rq_bufs;
+ }
+
+ inlen = MLX5_ST_SZ_BYTES(create_qp_in) +
+ MLX5_FLD_SZ_BYTES(create_qp_in, pas[0]) *
+ conn->qp.wq_ctrl.buf.npages;
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in) {
+ err = -ENOMEM;
+ goto err_sq_bufs;
+ }
+
+ qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
+ MLX5_SET(qpc, qpc, uar_page, fdev->conn_res.uar->index);
+ MLX5_SET(qpc, qpc, log_page_size,
+ conn->qp.wq_ctrl.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
+ MLX5_SET(qpc, qpc, fre, 1);
+ MLX5_SET(qpc, qpc, rlky, 1);
+ MLX5_SET(qpc, qpc, st, MLX5_QP_ST_RC);
+ MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
+ MLX5_SET(qpc, qpc, pd, fdev->conn_res.pdn);
+ MLX5_SET(qpc, qpc, log_rq_stride, ilog2(MLX5_SEND_WQE_DS) - 4);
+ MLX5_SET(qpc, qpc, log_rq_size, ilog2(conn->qp.rq.size));
+ MLX5_SET(qpc, qpc, rq_type, MLX5_NON_ZERO_RQ);
+ MLX5_SET(qpc, qpc, log_sq_size, ilog2(conn->qp.sq.size));
+ MLX5_SET(qpc, qpc, cqn_snd, conn->cq.mcq.cqn);
+ MLX5_SET(qpc, qpc, cqn_rcv, conn->cq.mcq.cqn);
+ MLX5_SET64(qpc, qpc, dbr_addr, conn->qp.wq_ctrl.db.dma);
+ if (MLX5_CAP_GEN(mdev, cqe_version) == 1)
+ MLX5_SET(qpc, qpc, user_index, 0xFFFFFF);
+
+ mlx5_fill_page_array(&conn->qp.wq_ctrl.buf,
+ (__be64 *)MLX5_ADDR_OF(create_qp_in, in, pas));
+
+ err = mlx5_core_create_qp(mdev, &conn->qp.mqp, in, inlen);
+ if (err)
+ goto err_sq_bufs;
+
+ conn->qp.mqp.event = mlx5_fpga_conn_event;
+ mlx5_fpga_dbg(fdev, "Created QP #0x%x\n", conn->qp.mqp.qpn);
+
+ goto out;
+
+err_sq_bufs:
+ kvfree(conn->qp.sq.bufs);
+err_rq_bufs:
+ kvfree(conn->qp.rq.bufs);
+err_wq:
+ mlx5_wq_destroy(&conn->qp.wq_ctrl);
+out:
+ kvfree(in);
+ return err;
+}
+
+static void mlx5_fpga_conn_free_recv_bufs(struct mlx5_fpga_conn *conn)
+{
+ int ix;
+
+ for (ix = 0; ix < conn->qp.rq.size; ix++) {
+ if (!conn->qp.rq.bufs[ix])
+ continue;
+ mlx5_fpga_conn_unmap_buf(conn, conn->qp.rq.bufs[ix]);
+ kfree(conn->qp.rq.bufs[ix]);
+ conn->qp.rq.bufs[ix] = NULL;
+ }
+}
+
+static void mlx5_fpga_conn_flush_send_bufs(struct mlx5_fpga_conn *conn)
+{
+ struct mlx5_fpga_dma_buf *buf, *temp;
+ int ix;
+
+ for (ix = 0; ix < conn->qp.sq.size; ix++) {
+ buf = conn->qp.sq.bufs[ix];
+ if (!buf)
+ continue;
+ conn->qp.sq.bufs[ix] = NULL;
+ mlx5_fpga_conn_unmap_buf(conn, buf);
+ if (!buf->complete)
+ continue;
+ buf->complete(conn, conn->fdev, buf, MLX5_CQE_SYNDROME_WR_FLUSH_ERR);
+ }
+ list_for_each_entry_safe(buf, temp, &conn->qp.sq.backlog, list) {
+ mlx5_fpga_conn_unmap_buf(conn, buf);
+ if (!buf->complete)
+ continue;
+ buf->complete(conn, conn->fdev, buf, MLX5_CQE_SYNDROME_WR_FLUSH_ERR);
+ }
+}
+
+static void mlx5_fpga_conn_destroy_qp(struct mlx5_fpga_conn *conn)
+{
+ mlx5_core_destroy_qp(conn->fdev->mdev, &conn->qp.mqp);
+ mlx5_fpga_conn_free_recv_bufs(conn);
+ mlx5_fpga_conn_flush_send_bufs(conn);
+ kvfree(conn->qp.sq.bufs);
+ kvfree(conn->qp.rq.bufs);
+ mlx5_wq_destroy(&conn->qp.wq_ctrl);
+}
+
+static inline int mlx5_fpga_conn_reset_qp(struct mlx5_fpga_conn *conn)
+{
+ struct mlx5_core_dev *mdev = conn->fdev->mdev;
+
+ mlx5_fpga_dbg(conn->fdev, "Modifying QP %u to RST\n", conn->qp.mqp.qpn);
+
+ return mlx5_core_qp_modify(mdev, MLX5_CMD_OP_2RST_QP, 0, NULL,
+ &conn->qp.mqp);
+}
+
+static inline int mlx5_fpga_conn_init_qp(struct mlx5_fpga_conn *conn)
+{
+ struct mlx5_fpga_device *fdev = conn->fdev;
+ struct mlx5_core_dev *mdev = fdev->mdev;
+ u32 *qpc = NULL;
+ int err;
+
+ mlx5_fpga_dbg(conn->fdev, "Modifying QP %u to INIT\n", conn->qp.mqp.qpn);
+
+ qpc = kzalloc(MLX5_ST_SZ_BYTES(qpc), GFP_KERNEL);
+ if (!qpc) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ MLX5_SET(qpc, qpc, st, MLX5_QP_ST_RC);
+ MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
+ MLX5_SET(qpc, qpc, primary_address_path.pkey_index, MLX5_FPGA_PKEY_INDEX);
+ MLX5_SET(qpc, qpc, primary_address_path.port, MLX5_FPGA_PORT_NUM);
+ MLX5_SET(qpc, qpc, pd, conn->fdev->conn_res.pdn);
+ MLX5_SET(qpc, qpc, cqn_snd, conn->cq.mcq.cqn);
+ MLX5_SET(qpc, qpc, cqn_rcv, conn->cq.mcq.cqn);
+ MLX5_SET64(qpc, qpc, dbr_addr, conn->qp.wq_ctrl.db.dma);
+
+ err = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RST2INIT_QP, 0, qpc,
+ &conn->qp.mqp);
+ if (err) {
+ mlx5_fpga_warn(fdev, "qp_modify RST2INIT failed: %d\n", err);
+ goto out;
+ }
+
+out:
+ kfree(qpc);
+ return err;
+}
+
+static inline int mlx5_fpga_conn_rtr_qp(struct mlx5_fpga_conn *conn)
+{
+ struct mlx5_fpga_device *fdev = conn->fdev;
+ struct mlx5_core_dev *mdev = fdev->mdev;
+ u32 *qpc = NULL;
+ int err;
+
+ mlx5_fpga_dbg(conn->fdev, "QP RTR\n");
+
+ qpc = kzalloc(MLX5_ST_SZ_BYTES(qpc), GFP_KERNEL);
+ if (!qpc) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ MLX5_SET(qpc, qpc, mtu, MLX5_QPC_MTU_1K_BYTES);
+ MLX5_SET(qpc, qpc, log_msg_max, (u8)MLX5_CAP_GEN(mdev, log_max_msg));
+ MLX5_SET(qpc, qpc, remote_qpn, conn->fpga_qpn);
+ MLX5_SET(qpc, qpc, next_rcv_psn,
+ MLX5_GET(fpga_qpc, conn->fpga_qpc, next_send_psn));
+ MLX5_SET(qpc, qpc, primary_address_path.pkey_index, MLX5_FPGA_PKEY_INDEX);
+ MLX5_SET(qpc, qpc, primary_address_path.port, MLX5_FPGA_PORT_NUM);
+ ether_addr_copy(MLX5_ADDR_OF(qpc, qpc, primary_address_path.rmac_47_32),
+ MLX5_ADDR_OF(fpga_qpc, conn->fpga_qpc, fpga_mac_47_32));
+ MLX5_SET(qpc, qpc, primary_address_path.udp_sport,
+ MLX5_CAP_ROCE(mdev, r_roce_min_src_udp_port));
+ MLX5_SET(qpc, qpc, primary_address_path.src_addr_index,
+ conn->qp.sgid_index);
+ MLX5_SET(qpc, qpc, primary_address_path.hop_limit, 0);
+ memcpy(MLX5_ADDR_OF(qpc, qpc, primary_address_path.rgid_rip),
+ MLX5_ADDR_OF(fpga_qpc, conn->fpga_qpc, fpga_ip),
+ MLX5_FLD_SZ_BYTES(qpc, primary_address_path.rgid_rip));
+
+ err = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_INIT2RTR_QP, 0, qpc,
+ &conn->qp.mqp);
+ if (err) {
+ mlx5_fpga_warn(fdev, "qp_modify RST2INIT failed: %d\n", err);
+ goto out;
+ }
+
+out:
+ kfree(qpc);
+ return err;
+}
+
+static inline int mlx5_fpga_conn_rts_qp(struct mlx5_fpga_conn *conn)
+{
+ struct mlx5_fpga_device *fdev = conn->fdev;
+ struct mlx5_core_dev *mdev = fdev->mdev;
+ u32 *qpc = NULL;
+ u32 opt_mask;
+ int err;
+
+ mlx5_fpga_dbg(conn->fdev, "QP RTS\n");
+
+ qpc = kzalloc(MLX5_ST_SZ_BYTES(qpc), GFP_KERNEL);
+ if (!qpc) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ MLX5_SET(qpc, qpc, log_ack_req_freq, 8);
+ MLX5_SET(qpc, qpc, min_rnr_nak, 0x12);
+ MLX5_SET(qpc, qpc, primary_address_path.ack_timeout, 0x12); /* ~1.07s */
+ MLX5_SET(qpc, qpc, next_send_psn,
+ MLX5_GET(fpga_qpc, conn->fpga_qpc, next_rcv_psn));
+ MLX5_SET(qpc, qpc, retry_count, 7);
+ MLX5_SET(qpc, qpc, rnr_retry, 7); /* Infinite retry if RNR NACK */
+
+ opt_mask = MLX5_QP_OPTPAR_RNR_TIMEOUT;
+ err = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RTR2RTS_QP, opt_mask, qpc,
+ &conn->qp.mqp);
+ if (err) {
+ mlx5_fpga_warn(fdev, "qp_modify RST2INIT failed: %d\n", err);
+ goto out;
+ }
+
+out:
+ kfree(qpc);
+ return err;
+}
+
+static int mlx5_fpga_conn_connect(struct mlx5_fpga_conn *conn)
+{
+ struct mlx5_fpga_device *fdev = conn->fdev;
+ int err;
+
+ MLX5_SET(fpga_qpc, conn->fpga_qpc, state, MLX5_FPGA_QPC_STATE_ACTIVE);
+ err = mlx5_fpga_modify_qp(conn->fdev->mdev, conn->fpga_qpn,
+ MLX5_FPGA_QPC_STATE, &conn->fpga_qpc);
+ if (err) {
+ mlx5_fpga_err(fdev, "Failed to activate FPGA RC QP: %d\n", err);
+ goto out;
+ }
+
+ err = mlx5_fpga_conn_reset_qp(conn);
+ if (err) {
+ mlx5_fpga_err(fdev, "Failed to change QP state to reset\n");
+ goto err_fpga_qp;
+ }
+
+ err = mlx5_fpga_conn_init_qp(conn);
+ if (err) {
+ mlx5_fpga_err(fdev, "Failed to modify QP from RESET to INIT\n");
+ goto err_fpga_qp;
+ }
+ conn->qp.active = true;
+
+ while (!mlx5_fpga_conn_post_recv_buf(conn))
+ ;
+
+ err = mlx5_fpga_conn_rtr_qp(conn);
+ if (err) {
+ mlx5_fpga_err(fdev, "Failed to change QP state from INIT to RTR\n");
+ goto err_recv_bufs;
+ }
+
+ err = mlx5_fpga_conn_rts_qp(conn);
+ if (err) {
+ mlx5_fpga_err(fdev, "Failed to change QP state from RTR to RTS\n");
+ goto err_recv_bufs;
+ }
+ goto out;
+
+err_recv_bufs:
+ mlx5_fpga_conn_free_recv_bufs(conn);
+err_fpga_qp:
+ MLX5_SET(fpga_qpc, conn->fpga_qpc, state, MLX5_FPGA_QPC_STATE_INIT);
+ if (mlx5_fpga_modify_qp(conn->fdev->mdev, conn->fpga_qpn,
+ MLX5_FPGA_QPC_STATE, &conn->fpga_qpc))
+ mlx5_fpga_err(fdev, "Failed to revert FPGA QP to INIT\n");
+out:
+ return err;
+}
+
+struct mlx5_fpga_conn *mlx5_fpga_conn_create(struct mlx5_fpga_device *fdev,
+ struct mlx5_fpga_conn_attr *attr,
+ enum mlx5_ifc_fpga_qp_type qp_type)
+{
+ struct mlx5_fpga_conn *ret, *conn;
+ u8 *remote_mac, *remote_ip;
+ int err;
+
+ if (!attr->recv_cb)
+ return ERR_PTR(-EINVAL);
+
+ conn = kzalloc(sizeof(*conn), GFP_KERNEL);
+ if (!conn)
+ return ERR_PTR(-ENOMEM);
+
+ conn->fdev = fdev;
+ INIT_LIST_HEAD(&conn->qp.sq.backlog);
+
+ spin_lock_init(&conn->qp.sq.lock);
+
+ conn->recv_cb = attr->recv_cb;
+ conn->cb_arg = attr->cb_arg;
+
+ remote_mac = MLX5_ADDR_OF(fpga_qpc, conn->fpga_qpc, remote_mac_47_32);
+ err = mlx5_query_nic_vport_mac_address(fdev->mdev, 0, remote_mac);
+ if (err) {
+ mlx5_fpga_err(fdev, "Failed to query local MAC: %d\n", err);
+ ret = ERR_PTR(err);
+ goto err;
+ }
+
+ /* Build Modified EUI-64 IPv6 address from the MAC address */
+ remote_ip = MLX5_ADDR_OF(fpga_qpc, conn->fpga_qpc, remote_ip);
+ remote_ip[0] = 0xfe;
+ remote_ip[1] = 0x80;
+ addrconf_addr_eui48(&remote_ip[8], remote_mac);
+
+ err = mlx5_core_reserved_gid_alloc(fdev->mdev, &conn->qp.sgid_index);
+ if (err) {
+ mlx5_fpga_err(fdev, "Failed to allocate SGID: %d\n", err);
+ ret = ERR_PTR(err);
+ goto err;
+ }
+
+ err = mlx5_core_roce_gid_set(fdev->mdev, conn->qp.sgid_index,
+ MLX5_ROCE_VERSION_2,
+ MLX5_ROCE_L3_TYPE_IPV6,
+ remote_ip, remote_mac, true, 0);
+ if (err) {
+ mlx5_fpga_err(fdev, "Failed to set SGID: %d\n", err);
+ ret = ERR_PTR(err);
+ goto err_rsvd_gid;
+ }
+ mlx5_fpga_dbg(fdev, "Reserved SGID index %u\n", conn->qp.sgid_index);
+
+ /* Allow for one cqe per rx/tx wqe, plus one cqe for the next wqe,
+ * created during processing of the cqe
+ */
+ err = mlx5_fpga_conn_create_cq(conn,
+ (attr->tx_size + attr->rx_size) * 2);
+ if (err) {
+ mlx5_fpga_err(fdev, "Failed to create CQ: %d\n", err);
+ ret = ERR_PTR(err);
+ goto err_gid;
+ }
+
+ mlx5_fpga_conn_arm_cq(conn);
+
+ err = mlx5_fpga_conn_create_qp(conn, attr->tx_size, attr->rx_size);
+ if (err) {
+ mlx5_fpga_err(fdev, "Failed to create QP: %d\n", err);
+ ret = ERR_PTR(err);
+ goto err_cq;
+ }
+
+ MLX5_SET(fpga_qpc, conn->fpga_qpc, state, MLX5_FPGA_QPC_STATE_INIT);
+ MLX5_SET(fpga_qpc, conn->fpga_qpc, qp_type, qp_type);
+ MLX5_SET(fpga_qpc, conn->fpga_qpc, st, MLX5_FPGA_QPC_ST_RC);
+ MLX5_SET(fpga_qpc, conn->fpga_qpc, ether_type, ETH_P_8021Q);
+ MLX5_SET(fpga_qpc, conn->fpga_qpc, vid, 0);
+ MLX5_SET(fpga_qpc, conn->fpga_qpc, next_rcv_psn, 1);
+ MLX5_SET(fpga_qpc, conn->fpga_qpc, next_send_psn, 0);
+ MLX5_SET(fpga_qpc, conn->fpga_qpc, pkey, MLX5_FPGA_PKEY);
+ MLX5_SET(fpga_qpc, conn->fpga_qpc, remote_qpn, conn->qp.mqp.qpn);
+ MLX5_SET(fpga_qpc, conn->fpga_qpc, rnr_retry, 7);
+ MLX5_SET(fpga_qpc, conn->fpga_qpc, retry_count, 7);
+
+ err = mlx5_fpga_create_qp(fdev->mdev, &conn->fpga_qpc,
+ &conn->fpga_qpn);
+ if (err) {
+ mlx5_fpga_err(fdev, "Failed to create FPGA RC QP: %d\n", err);
+ ret = ERR_PTR(err);
+ goto err_qp;
+ }
+
+ err = mlx5_fpga_conn_connect(conn);
+ if (err) {
+ ret = ERR_PTR(err);
+ goto err_conn;
+ }
+
+ mlx5_fpga_dbg(fdev, "FPGA QPN is %u\n", conn->fpga_qpn);
+ ret = conn;
+ goto out;
+
+err_conn:
+ mlx5_fpga_destroy_qp(conn->fdev->mdev, conn->fpga_qpn);
+err_qp:
+ mlx5_fpga_conn_destroy_qp(conn);
+err_cq:
+ mlx5_fpga_conn_destroy_cq(conn);
+err_gid:
+ mlx5_core_roce_gid_set(fdev->mdev, conn->qp.sgid_index, 0, 0, NULL,
+ NULL, false, 0);
+err_rsvd_gid:
+ mlx5_core_reserved_gid_free(fdev->mdev, conn->qp.sgid_index);
+err:
+ kfree(conn);
+out:
+ return ret;
+}
+
+void mlx5_fpga_conn_destroy(struct mlx5_fpga_conn *conn)
+{
+ struct mlx5_fpga_device *fdev = conn->fdev;
+ struct mlx5_core_dev *mdev = fdev->mdev;
+ int err = 0;
+
+ conn->qp.active = false;
+ tasklet_disable(&conn->cq.tasklet);
+ synchronize_irq(conn->cq.mcq.irqn);
+
+ mlx5_fpga_destroy_qp(conn->fdev->mdev, conn->fpga_qpn);
+ err = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_2ERR_QP, 0, NULL,
+ &conn->qp.mqp);
+ if (err)
+ mlx5_fpga_warn(fdev, "qp_modify 2ERR failed: %d\n", err);
+ mlx5_fpga_conn_destroy_qp(conn);
+ mlx5_fpga_conn_destroy_cq(conn);
+
+ mlx5_core_roce_gid_set(conn->fdev->mdev, conn->qp.sgid_index, 0, 0,
+ NULL, NULL, false, 0);
+ mlx5_core_reserved_gid_free(conn->fdev->mdev, conn->qp.sgid_index);
+ kfree(conn);
+}
+
+int mlx5_fpga_conn_device_init(struct mlx5_fpga_device *fdev)
+{
+ int err;
+
+ err = mlx5_nic_vport_enable_roce(fdev->mdev);
+ if (err) {
+ mlx5_fpga_err(fdev, "Failed to enable RoCE: %d\n", err);
+ goto out;
+ }
+
+ fdev->conn_res.uar = mlx5_get_uars_page(fdev->mdev);
+ if (IS_ERR(fdev->conn_res.uar)) {
+ err = PTR_ERR(fdev->conn_res.uar);
+ mlx5_fpga_err(fdev, "get_uars_page failed, %d\n", err);
+ goto err_roce;
+ }
+ mlx5_fpga_dbg(fdev, "Allocated UAR index %u\n",
+ fdev->conn_res.uar->index);
+
+ err = mlx5_core_alloc_pd(fdev->mdev, &fdev->conn_res.pdn);
+ if (err) {
+ mlx5_fpga_err(fdev, "alloc pd failed, %d\n", err);
+ goto err_uar;
+ }
+ mlx5_fpga_dbg(fdev, "Allocated PD %u\n", fdev->conn_res.pdn);
+
+ err = mlx5_fpga_conn_create_mkey(fdev->mdev, fdev->conn_res.pdn,
+ &fdev->conn_res.mkey);
+ if (err) {
+ mlx5_fpga_err(fdev, "create mkey failed, %d\n", err);
+ goto err_dealloc_pd;
+ }
+ mlx5_fpga_dbg(fdev, "Created mkey 0x%x\n", fdev->conn_res.mkey.key);
+
+ return 0;
+
+err_dealloc_pd:
+ mlx5_core_dealloc_pd(fdev->mdev, fdev->conn_res.pdn);
+err_uar:
+ mlx5_put_uars_page(fdev->mdev, fdev->conn_res.uar);
+err_roce:
+ mlx5_nic_vport_disable_roce(fdev->mdev);
+out:
+ return err;
+}
+
+void mlx5_fpga_conn_device_cleanup(struct mlx5_fpga_device *fdev)
+{
+ mlx5_core_destroy_mkey(fdev->mdev, &fdev->conn_res.mkey);
+ mlx5_core_dealloc_pd(fdev->mdev, fdev->conn_res.pdn);
+ mlx5_put_uars_page(fdev->mdev, fdev->conn_res.uar);
+ mlx5_nic_vport_disable_roce(fdev->mdev);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.h
new file mode 100644
index 000000000000..44bd9eccc711
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef __MLX5_FPGA_CONN_H__
+#define __MLX5_FPGA_CONN_H__
+
+#include <linux/mlx5/cq.h>
+#include <linux/mlx5/qp.h>
+
+#include "fpga/core.h"
+#include "fpga/sdk.h"
+#include "wq.h"
+
+struct mlx5_fpga_conn {
+ struct mlx5_fpga_device *fdev;
+
+ void (*recv_cb)(void *cb_arg, struct mlx5_fpga_dma_buf *buf);
+ void *cb_arg;
+
+ /* FPGA QP */
+ u32 fpga_qpc[MLX5_ST_SZ_DW(fpga_qpc)];
+ u32 fpga_qpn;
+
+ /* CQ */
+ struct {
+ struct mlx5_cqwq wq;
+ struct mlx5_frag_wq_ctrl wq_ctrl;
+ struct mlx5_core_cq mcq;
+ struct tasklet_struct tasklet;
+ } cq;
+
+ /* QP */
+ struct {
+ bool active;
+ int sgid_index;
+ struct mlx5_wq_qp wq;
+ struct mlx5_wq_ctrl wq_ctrl;
+ struct mlx5_core_qp mqp;
+ struct {
+ spinlock_t lock; /* Protects all SQ state */
+ unsigned int pc;
+ unsigned int cc;
+ unsigned int size;
+ struct mlx5_fpga_dma_buf **bufs;
+ struct list_head backlog;
+ } sq;
+ struct {
+ unsigned int pc;
+ unsigned int cc;
+ unsigned int size;
+ struct mlx5_fpga_dma_buf **bufs;
+ } rq;
+ } qp;
+};
+
+int mlx5_fpga_conn_device_init(struct mlx5_fpga_device *fdev);
+void mlx5_fpga_conn_device_cleanup(struct mlx5_fpga_device *fdev);
+struct mlx5_fpga_conn *
+mlx5_fpga_conn_create(struct mlx5_fpga_device *fdev,
+ struct mlx5_fpga_conn_attr *attr,
+ enum mlx5_ifc_fpga_qp_type qp_type);
+void mlx5_fpga_conn_destroy(struct mlx5_fpga_conn *conn);
+int mlx5_fpga_conn_send(struct mlx5_fpga_conn *conn,
+ struct mlx5_fpga_dma_buf *buf);
+
+#endif /* __MLX5_FPGA_CONN_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c
index d88b332e9669..31e5a2627eb8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c
@@ -35,7 +35,9 @@
#include <linux/mlx5/driver.h>
#include "mlx5_core.h"
+#include "lib/mlx5.h"
#include "fpga/core.h"
+#include "fpga/conn.h"
static const char *const mlx5_fpga_error_strings[] = {
"Null Syndrome",
@@ -100,10 +102,34 @@ static int mlx5_fpga_device_load_check(struct mlx5_fpga_device *fdev)
return 0;
}
+int mlx5_fpga_device_brb(struct mlx5_fpga_device *fdev)
+{
+ int err;
+ struct mlx5_core_dev *mdev = fdev->mdev;
+
+ err = mlx5_fpga_ctrl_op(mdev, MLX5_FPGA_CTRL_OPERATION_SANDBOX_BYPASS_ON);
+ if (err) {
+ mlx5_fpga_err(fdev, "Failed to set bypass on: %d\n", err);
+ return err;
+ }
+ err = mlx5_fpga_ctrl_op(mdev, MLX5_FPGA_CTRL_OPERATION_RESET_SANDBOX);
+ if (err) {
+ mlx5_fpga_err(fdev, "Failed to reset SBU: %d\n", err);
+ return err;
+ }
+ err = mlx5_fpga_ctrl_op(mdev, MLX5_FPGA_CTRL_OPERATION_SANDBOX_BYPASS_OFF);
+ if (err) {
+ mlx5_fpga_err(fdev, "Failed to set bypass off: %d\n", err);
+ return err;
+ }
+ return 0;
+}
+
int mlx5_fpga_device_start(struct mlx5_core_dev *mdev)
{
struct mlx5_fpga_device *fdev = mdev->fpga;
unsigned long flags;
+ unsigned int max_num_qps;
int err;
if (!fdev)
@@ -123,6 +149,28 @@ int mlx5_fpga_device_start(struct mlx5_core_dev *mdev)
mlx5_fpga_image_name(fdev->last_oper_image),
MLX5_CAP_FPGA(fdev->mdev, image_version));
+ max_num_qps = MLX5_CAP_FPGA(mdev, shell_caps.max_num_qps);
+ err = mlx5_core_reserve_gids(mdev, max_num_qps);
+ if (err)
+ goto out;
+
+ err = mlx5_fpga_conn_device_init(fdev);
+ if (err)
+ goto err_rsvd_gid;
+
+ if (fdev->last_oper_image == MLX5_FPGA_IMAGE_USER) {
+ err = mlx5_fpga_device_brb(fdev);
+ if (err)
+ goto err_conn_init;
+ }
+
+ goto out;
+
+err_conn_init:
+ mlx5_fpga_conn_device_cleanup(fdev);
+
+err_rsvd_gid:
+ mlx5_core_unreserve_gids(mdev, max_num_qps);
out:
spin_lock_irqsave(&fdev->state_lock, flags);
fdev->state = err ? MLX5_FPGA_STATUS_FAILURE : MLX5_FPGA_STATUS_SUCCESS;
@@ -130,7 +178,7 @@ out:
return err;
}
-int mlx5_fpga_device_init(struct mlx5_core_dev *mdev)
+int mlx5_fpga_init(struct mlx5_core_dev *mdev)
{
struct mlx5_fpga_device *fdev = NULL;
@@ -151,9 +199,42 @@ int mlx5_fpga_device_init(struct mlx5_core_dev *mdev)
return 0;
}
-void mlx5_fpga_device_cleanup(struct mlx5_core_dev *mdev)
+void mlx5_fpga_device_stop(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_fpga_device *fdev = mdev->fpga;
+ unsigned int max_num_qps;
+ unsigned long flags;
+ int err;
+
+ if (!fdev)
+ return;
+
+ spin_lock_irqsave(&fdev->state_lock, flags);
+ if (fdev->state != MLX5_FPGA_STATUS_SUCCESS) {
+ spin_unlock_irqrestore(&fdev->state_lock, flags);
+ return;
+ }
+ fdev->state = MLX5_FPGA_STATUS_NONE;
+ spin_unlock_irqrestore(&fdev->state_lock, flags);
+
+ if (fdev->last_oper_image == MLX5_FPGA_IMAGE_USER) {
+ err = mlx5_fpga_ctrl_op(mdev, MLX5_FPGA_CTRL_OPERATION_SANDBOX_BYPASS_ON);
+ if (err)
+ mlx5_fpga_err(fdev, "Failed to re-set SBU bypass on: %d\n",
+ err);
+ }
+
+ mlx5_fpga_conn_device_cleanup(fdev);
+ max_num_qps = MLX5_CAP_FPGA(mdev, shell_caps.max_num_qps);
+ mlx5_core_unreserve_gids(mdev, max_num_qps);
+}
+
+void mlx5_fpga_cleanup(struct mlx5_core_dev *mdev)
{
- kfree(mdev->fpga);
+ struct mlx5_fpga_device *fdev = mdev->fpga;
+
+ mlx5_fpga_device_stop(mdev);
+ kfree(fdev);
mdev->fpga = NULL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h
index c55044d66778..82405ed84725 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h
@@ -44,6 +44,15 @@ struct mlx5_fpga_device {
enum mlx5_fpga_status state;
enum mlx5_fpga_image last_admin_image;
enum mlx5_fpga_image last_oper_image;
+
+ /* QP Connection resources */
+ struct {
+ u32 pdn;
+ struct mlx5_core_mkey mkey;
+ struct mlx5_uars_page *uar;
+ } conn_res;
+
+ struct mlx5_fpga_ipsec *ipsec;
};
#define mlx5_fpga_dbg(__adev, format, ...) \
@@ -68,19 +77,20 @@ struct mlx5_fpga_device {
#define mlx5_fpga_info(__adev, format, ...) \
dev_info(&(__adev)->mdev->pdev->dev, "FPGA: " format, ##__VA_ARGS__)
-int mlx5_fpga_device_init(struct mlx5_core_dev *mdev);
-void mlx5_fpga_device_cleanup(struct mlx5_core_dev *mdev);
+int mlx5_fpga_init(struct mlx5_core_dev *mdev);
+void mlx5_fpga_cleanup(struct mlx5_core_dev *mdev);
int mlx5_fpga_device_start(struct mlx5_core_dev *mdev);
+void mlx5_fpga_device_stop(struct mlx5_core_dev *mdev);
void mlx5_fpga_event(struct mlx5_core_dev *mdev, u8 event, void *data);
#else
-static inline int mlx5_fpga_device_init(struct mlx5_core_dev *mdev)
+static inline int mlx5_fpga_init(struct mlx5_core_dev *mdev)
{
return 0;
}
-static inline void mlx5_fpga_device_cleanup(struct mlx5_core_dev *mdev)
+static inline void mlx5_fpga_cleanup(struct mlx5_core_dev *mdev)
{
}
@@ -89,6 +99,10 @@ static inline int mlx5_fpga_device_start(struct mlx5_core_dev *mdev)
return 0;
}
+static inline void mlx5_fpga_device_stop(struct mlx5_core_dev *mdev)
+{
+}
+
static inline void mlx5_fpga_event(struct mlx5_core_dev *mdev, u8 event,
void *data)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
new file mode 100644
index 000000000000..42970e2a05ff
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
@@ -0,0 +1,376 @@
+/*
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/mlx5/driver.h>
+
+#include "mlx5_core.h"
+#include "fpga/ipsec.h"
+#include "fpga/sdk.h"
+#include "fpga/core.h"
+
+#define SBU_QP_QUEUE_SIZE 8
+
+enum mlx5_ipsec_response_syndrome {
+ MLX5_IPSEC_RESPONSE_SUCCESS = 0,
+ MLX5_IPSEC_RESPONSE_ILLEGAL_REQUEST = 1,
+ MLX5_IPSEC_RESPONSE_SADB_ISSUE = 2,
+ MLX5_IPSEC_RESPONSE_WRITE_RESPONSE_ISSUE = 3,
+};
+
+enum mlx5_fpga_ipsec_sacmd_status {
+ MLX5_FPGA_IPSEC_SACMD_PENDING,
+ MLX5_FPGA_IPSEC_SACMD_SEND_FAIL,
+ MLX5_FPGA_IPSEC_SACMD_COMPLETE,
+};
+
+struct mlx5_ipsec_command_context {
+ struct mlx5_fpga_dma_buf buf;
+ struct mlx5_accel_ipsec_sa sa;
+ enum mlx5_fpga_ipsec_sacmd_status status;
+ int status_code;
+ struct completion complete;
+ struct mlx5_fpga_device *dev;
+ struct list_head list; /* Item in pending_cmds */
+};
+
+struct mlx5_ipsec_sadb_resp {
+ __be32 syndrome;
+ __be32 sw_sa_handle;
+ u8 reserved[24];
+} __packed;
+
+struct mlx5_fpga_ipsec {
+ struct list_head pending_cmds;
+ spinlock_t pending_cmds_lock; /* Protects pending_cmds */
+ u32 caps[MLX5_ST_SZ_DW(ipsec_extended_cap)];
+ struct mlx5_fpga_conn *conn;
+};
+
+static bool mlx5_fpga_is_ipsec_device(struct mlx5_core_dev *mdev)
+{
+ if (!mdev->fpga || !MLX5_CAP_GEN(mdev, fpga))
+ return false;
+
+ if (MLX5_CAP_FPGA(mdev, ieee_vendor_id) !=
+ MLX5_FPGA_CAP_SANDBOX_VENDOR_ID_MLNX)
+ return false;
+
+ if (MLX5_CAP_FPGA(mdev, sandbox_product_id) !=
+ MLX5_FPGA_CAP_SANDBOX_PRODUCT_ID_IPSEC)
+ return false;
+
+ return true;
+}
+
+static void mlx5_fpga_ipsec_send_complete(struct mlx5_fpga_conn *conn,
+ struct mlx5_fpga_device *fdev,
+ struct mlx5_fpga_dma_buf *buf,
+ u8 status)
+{
+ struct mlx5_ipsec_command_context *context;
+
+ if (status) {
+ context = container_of(buf, struct mlx5_ipsec_command_context,
+ buf);
+ mlx5_fpga_warn(fdev, "IPSec command send failed with status %u\n",
+ status);
+ context->status = MLX5_FPGA_IPSEC_SACMD_SEND_FAIL;
+ complete(&context->complete);
+ }
+}
+
+static inline int syndrome_to_errno(enum mlx5_ipsec_response_syndrome syndrome)
+{
+ switch (syndrome) {
+ case MLX5_IPSEC_RESPONSE_SUCCESS:
+ return 0;
+ case MLX5_IPSEC_RESPONSE_SADB_ISSUE:
+ return -EEXIST;
+ case MLX5_IPSEC_RESPONSE_ILLEGAL_REQUEST:
+ return -EINVAL;
+ case MLX5_IPSEC_RESPONSE_WRITE_RESPONSE_ISSUE:
+ return -EIO;
+ }
+ return -EIO;
+}
+
+static void mlx5_fpga_ipsec_recv(void *cb_arg, struct mlx5_fpga_dma_buf *buf)
+{
+ struct mlx5_ipsec_sadb_resp *resp = buf->sg[0].data;
+ struct mlx5_ipsec_command_context *context;
+ enum mlx5_ipsec_response_syndrome syndrome;
+ struct mlx5_fpga_device *fdev = cb_arg;
+ unsigned long flags;
+
+ if (buf->sg[0].size < sizeof(*resp)) {
+ mlx5_fpga_warn(fdev, "Short receive from FPGA IPSec: %u < %zu bytes\n",
+ buf->sg[0].size, sizeof(*resp));
+ return;
+ }
+
+ mlx5_fpga_dbg(fdev, "mlx5_ipsec recv_cb syndrome %08x sa_id %x\n",
+ ntohl(resp->syndrome), ntohl(resp->sw_sa_handle));
+
+ spin_lock_irqsave(&fdev->ipsec->pending_cmds_lock, flags);
+ context = list_first_entry_or_null(&fdev->ipsec->pending_cmds,
+ struct mlx5_ipsec_command_context,
+ list);
+ if (context)
+ list_del(&context->list);
+ spin_unlock_irqrestore(&fdev->ipsec->pending_cmds_lock, flags);
+
+ if (!context) {
+ mlx5_fpga_warn(fdev, "Received IPSec offload response without pending command request\n");
+ return;
+ }
+ mlx5_fpga_dbg(fdev, "Handling response for %p\n", context);
+
+ if (context->sa.sw_sa_handle != resp->sw_sa_handle) {
+ mlx5_fpga_err(fdev, "mismatch SA handle. cmd 0x%08x vs resp 0x%08x\n",
+ ntohl(context->sa.sw_sa_handle),
+ ntohl(resp->sw_sa_handle));
+ return;
+ }
+
+ syndrome = ntohl(resp->syndrome);
+ context->status_code = syndrome_to_errno(syndrome);
+ context->status = MLX5_FPGA_IPSEC_SACMD_COMPLETE;
+
+ if (context->status_code)
+ mlx5_fpga_warn(fdev, "IPSec SADB command failed with syndrome %08x\n",
+ syndrome);
+ complete(&context->complete);
+}
+
+void *mlx5_fpga_ipsec_sa_cmd_exec(struct mlx5_core_dev *mdev,
+ struct mlx5_accel_ipsec_sa *cmd)
+{
+ struct mlx5_ipsec_command_context *context;
+ struct mlx5_fpga_device *fdev = mdev->fpga;
+ unsigned long flags;
+ int res = 0;
+
+ BUILD_BUG_ON((sizeof(struct mlx5_accel_ipsec_sa) & 3) != 0);
+ if (!fdev || !fdev->ipsec)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ context = kzalloc(sizeof(*context), GFP_ATOMIC);
+ if (!context)
+ return ERR_PTR(-ENOMEM);
+
+ memcpy(&context->sa, cmd, sizeof(*cmd));
+ context->buf.complete = mlx5_fpga_ipsec_send_complete;
+ context->buf.sg[0].size = sizeof(context->sa);
+ context->buf.sg[0].data = &context->sa;
+ init_completion(&context->complete);
+ context->dev = fdev;
+ spin_lock_irqsave(&fdev->ipsec->pending_cmds_lock, flags);
+ list_add_tail(&context->list, &fdev->ipsec->pending_cmds);
+ spin_unlock_irqrestore(&fdev->ipsec->pending_cmds_lock, flags);
+
+ context->status = MLX5_FPGA_IPSEC_SACMD_PENDING;
+
+ res = mlx5_fpga_sbu_conn_sendmsg(fdev->ipsec->conn, &context->buf);
+ if (res) {
+ mlx5_fpga_warn(fdev, "Failure sending IPSec command: %d\n",
+ res);
+ spin_lock_irqsave(&fdev->ipsec->pending_cmds_lock, flags);
+ list_del(&context->list);
+ spin_unlock_irqrestore(&fdev->ipsec->pending_cmds_lock, flags);
+ kfree(context);
+ return ERR_PTR(res);
+ }
+ /* Context will be freed by wait func after completion */
+ return context;
+}
+
+int mlx5_fpga_ipsec_sa_cmd_wait(void *ctx)
+{
+ struct mlx5_ipsec_command_context *context = ctx;
+ int res;
+
+ res = wait_for_completion_killable(&context->complete);
+ if (res) {
+ mlx5_fpga_warn(context->dev, "Failure waiting for IPSec command response\n");
+ return -EINTR;
+ }
+
+ if (context->status == MLX5_FPGA_IPSEC_SACMD_COMPLETE)
+ res = context->status_code;
+ else
+ res = -EIO;
+
+ kfree(context);
+ return res;
+}
+
+u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_fpga_device *fdev = mdev->fpga;
+ u32 ret = 0;
+
+ if (mlx5_fpga_is_ipsec_device(mdev))
+ ret |= MLX5_ACCEL_IPSEC_DEVICE;
+ else
+ return ret;
+
+ if (!fdev->ipsec)
+ return ret;
+
+ if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, esp))
+ ret |= MLX5_ACCEL_IPSEC_ESP;
+
+ if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, ipv6))
+ ret |= MLX5_ACCEL_IPSEC_IPV6;
+
+ if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, lso))
+ ret |= MLX5_ACCEL_IPSEC_LSO;
+
+ return ret;
+}
+
+unsigned int mlx5_fpga_ipsec_counters_count(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_fpga_device *fdev = mdev->fpga;
+
+ if (!fdev || !fdev->ipsec)
+ return 0;
+
+ return MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps,
+ number_of_ipsec_counters);
+}
+
+int mlx5_fpga_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters,
+ unsigned int counters_count)
+{
+ struct mlx5_fpga_device *fdev = mdev->fpga;
+ unsigned int i;
+ u32 *data;
+ u32 count;
+ u64 addr;
+ int ret;
+
+ if (!fdev || !fdev->ipsec)
+ return 0;
+
+ addr = (u64)MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps,
+ ipsec_counters_addr_low) +
+ ((u64)MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps,
+ ipsec_counters_addr_high) << 32);
+
+ count = mlx5_fpga_ipsec_counters_count(mdev);
+
+ data = kzalloc(sizeof(u32) * count * 2, GFP_KERNEL);
+ if (!data) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = mlx5_fpga_mem_read(fdev, count * sizeof(u64), addr, data,
+ MLX5_FPGA_ACCESS_TYPE_DONTCARE);
+ if (ret < 0) {
+ mlx5_fpga_err(fdev, "Failed to read IPSec counters from HW: %d\n",
+ ret);
+ goto out;
+ }
+ ret = 0;
+
+ if (count > counters_count)
+ count = counters_count;
+
+ /* Each counter is low word, then high. But each word is big-endian */
+ for (i = 0; i < count; i++)
+ counters[i] = (u64)ntohl(data[i * 2]) |
+ ((u64)ntohl(data[i * 2 + 1]) << 32);
+
+out:
+ kfree(data);
+ return ret;
+}
+
+int mlx5_fpga_ipsec_init(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_fpga_conn_attr init_attr = {0};
+ struct mlx5_fpga_device *fdev = mdev->fpga;
+ struct mlx5_fpga_conn *conn;
+ int err;
+
+ if (!mlx5_fpga_is_ipsec_device(mdev))
+ return 0;
+
+ fdev->ipsec = kzalloc(sizeof(*fdev->ipsec), GFP_KERNEL);
+ if (!fdev->ipsec)
+ return -ENOMEM;
+
+ err = mlx5_fpga_get_sbu_caps(fdev, sizeof(fdev->ipsec->caps),
+ fdev->ipsec->caps);
+ if (err) {
+ mlx5_fpga_err(fdev, "Failed to retrieve IPSec extended capabilities: %d\n",
+ err);
+ goto error;
+ }
+
+ INIT_LIST_HEAD(&fdev->ipsec->pending_cmds);
+ spin_lock_init(&fdev->ipsec->pending_cmds_lock);
+
+ init_attr.rx_size = SBU_QP_QUEUE_SIZE;
+ init_attr.tx_size = SBU_QP_QUEUE_SIZE;
+ init_attr.recv_cb = mlx5_fpga_ipsec_recv;
+ init_attr.cb_arg = fdev;
+ conn = mlx5_fpga_sbu_conn_create(fdev, &init_attr);
+ if (IS_ERR(conn)) {
+ err = PTR_ERR(conn);
+ mlx5_fpga_err(fdev, "Error creating IPSec command connection %d\n",
+ err);
+ goto error;
+ }
+ fdev->ipsec->conn = conn;
+ return 0;
+
+error:
+ kfree(fdev->ipsec);
+ fdev->ipsec = NULL;
+ return err;
+}
+
+void mlx5_fpga_ipsec_cleanup(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_fpga_device *fdev = mdev->fpga;
+
+ if (!mlx5_fpga_is_ipsec_device(mdev))
+ return;
+
+ mlx5_fpga_sbu_conn_destroy(fdev->ipsec->conn);
+ kfree(fdev->ipsec);
+ fdev->ipsec = NULL;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h
new file mode 100644
index 000000000000..26a3e4b56972
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef __MLX5_FPGA_IPSEC_H__
+#define __MLX5_FPGA_IPSEC_H__
+
+#include "accel/ipsec.h"
+
+#ifdef CONFIG_MLX5_FPGA
+
+void *mlx5_fpga_ipsec_sa_cmd_exec(struct mlx5_core_dev *mdev,
+ struct mlx5_accel_ipsec_sa *cmd);
+int mlx5_fpga_ipsec_sa_cmd_wait(void *context);
+
+u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev);
+unsigned int mlx5_fpga_ipsec_counters_count(struct mlx5_core_dev *mdev);
+int mlx5_fpga_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters,
+ unsigned int counters_count);
+
+int mlx5_fpga_ipsec_init(struct mlx5_core_dev *mdev);
+void mlx5_fpga_ipsec_cleanup(struct mlx5_core_dev *mdev);
+
+#else
+
+static inline void *mlx5_fpga_ipsec_sa_cmd_exec(struct mlx5_core_dev *mdev,
+ struct mlx5_accel_ipsec_sa *cmd)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline int mlx5_fpga_ipsec_sa_cmd_wait(void *context)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev)
+{
+ return 0;
+}
+
+static inline unsigned int
+mlx5_fpga_ipsec_counters_count(struct mlx5_core_dev *mdev)
+{
+ return 0;
+}
+
+static inline int mlx5_fpga_ipsec_counters_read(struct mlx5_core_dev *mdev,
+ u64 *counters)
+{
+ return 0;
+}
+
+static inline int mlx5_fpga_ipsec_init(struct mlx5_core_dev *mdev)
+{
+ return 0;
+}
+
+static inline void mlx5_fpga_ipsec_cleanup(struct mlx5_core_dev *mdev)
+{
+}
+
+#endif /* CONFIG_MLX5_FPGA */
+
+#endif /* __MLX5_FPGA_SADB_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.c
new file mode 100644
index 000000000000..3c11d6e2160a
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.c
@@ -0,0 +1,164 @@
+/*
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/mlx5/device.h>
+
+#include "fpga/core.h"
+#include "fpga/conn.h"
+#include "fpga/sdk.h"
+
+struct mlx5_fpga_conn *
+mlx5_fpga_sbu_conn_create(struct mlx5_fpga_device *fdev,
+ struct mlx5_fpga_conn_attr *attr)
+{
+ return mlx5_fpga_conn_create(fdev, attr, MLX5_FPGA_QPC_QP_TYPE_SANDBOX_QP);
+}
+EXPORT_SYMBOL(mlx5_fpga_sbu_conn_create);
+
+void mlx5_fpga_sbu_conn_destroy(struct mlx5_fpga_conn *conn)
+{
+ mlx5_fpga_conn_destroy(conn);
+}
+EXPORT_SYMBOL(mlx5_fpga_sbu_conn_destroy);
+
+int mlx5_fpga_sbu_conn_sendmsg(struct mlx5_fpga_conn *conn,
+ struct mlx5_fpga_dma_buf *buf)
+{
+ return mlx5_fpga_conn_send(conn, buf);
+}
+EXPORT_SYMBOL(mlx5_fpga_sbu_conn_sendmsg);
+
+static int mlx5_fpga_mem_read_i2c(struct mlx5_fpga_device *fdev, size_t size,
+ u64 addr, u8 *buf)
+{
+ size_t max_size = MLX5_FPGA_ACCESS_REG_SIZE_MAX;
+ size_t bytes_done = 0;
+ u8 actual_size;
+ int err;
+
+ if (!fdev->mdev)
+ return -ENOTCONN;
+
+ while (bytes_done < size) {
+ actual_size = min(max_size, (size - bytes_done));
+
+ err = mlx5_fpga_access_reg(fdev->mdev, actual_size,
+ addr + bytes_done,
+ buf + bytes_done, false);
+ if (err) {
+ mlx5_fpga_err(fdev, "Failed to read over I2C: %d\n",
+ err);
+ break;
+ }
+
+ bytes_done += actual_size;
+ }
+
+ return err;
+}
+
+static int mlx5_fpga_mem_write_i2c(struct mlx5_fpga_device *fdev, size_t size,
+ u64 addr, u8 *buf)
+{
+ size_t max_size = MLX5_FPGA_ACCESS_REG_SIZE_MAX;
+ size_t bytes_done = 0;
+ u8 actual_size;
+ int err;
+
+ if (!fdev->mdev)
+ return -ENOTCONN;
+
+ while (bytes_done < size) {
+ actual_size = min(max_size, (size - bytes_done));
+
+ err = mlx5_fpga_access_reg(fdev->mdev, actual_size,
+ addr + bytes_done,
+ buf + bytes_done, true);
+ if (err) {
+ mlx5_fpga_err(fdev, "Failed to write FPGA crspace\n");
+ break;
+ }
+
+ bytes_done += actual_size;
+ }
+
+ return err;
+}
+
+int mlx5_fpga_mem_read(struct mlx5_fpga_device *fdev, size_t size, u64 addr,
+ void *buf, enum mlx5_fpga_access_type access_type)
+{
+ int ret;
+
+ switch (access_type) {
+ case MLX5_FPGA_ACCESS_TYPE_I2C:
+ ret = mlx5_fpga_mem_read_i2c(fdev, size, addr, buf);
+ if (ret)
+ return ret;
+ break;
+ default:
+ mlx5_fpga_warn(fdev, "Unexpected read access_type %u\n",
+ access_type);
+ return -EACCES;
+ }
+
+ return size;
+}
+EXPORT_SYMBOL(mlx5_fpga_mem_read);
+
+int mlx5_fpga_mem_write(struct mlx5_fpga_device *fdev, size_t size, u64 addr,
+ void *buf, enum mlx5_fpga_access_type access_type)
+{
+ int ret;
+
+ switch (access_type) {
+ case MLX5_FPGA_ACCESS_TYPE_I2C:
+ ret = mlx5_fpga_mem_write_i2c(fdev, size, addr, buf);
+ if (ret)
+ return ret;
+ break;
+ default:
+ mlx5_fpga_warn(fdev, "Unexpected write access_type %u\n",
+ access_type);
+ return -EACCES;
+ }
+
+ return size;
+}
+EXPORT_SYMBOL(mlx5_fpga_mem_write);
+
+int mlx5_fpga_get_sbu_caps(struct mlx5_fpga_device *fdev, int size, void *buf)
+{
+ return mlx5_fpga_sbu_caps(fdev->mdev, buf, size);
+}
+EXPORT_SYMBOL(mlx5_fpga_get_sbu_caps);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.h
new file mode 100644
index 000000000000..baa537e54a49
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.h
@@ -0,0 +1,204 @@
+/*
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef MLX5_FPGA_SDK_H
+#define MLX5_FPGA_SDK_H
+
+#include <linux/types.h>
+#include <linux/dma-direction.h>
+
+/**
+ * DOC: Innova SDK
+ * This header defines the in-kernel API for Innova FPGA client drivers.
+ */
+
+enum mlx5_fpga_access_type {
+ MLX5_FPGA_ACCESS_TYPE_I2C = 0x0,
+ MLX5_FPGA_ACCESS_TYPE_DONTCARE = 0x0,
+};
+
+struct mlx5_fpga_conn;
+struct mlx5_fpga_device;
+
+/**
+ * struct mlx5_fpga_dma_entry - A scatter-gather DMA entry
+ */
+struct mlx5_fpga_dma_entry {
+ /** @data: Virtual address pointer to the data */
+ void *data;
+ /** @size: Size in bytes of the data */
+ unsigned int size;
+ /** @dma_addr: Private member. Physical DMA-mapped address of the data */
+ dma_addr_t dma_addr;
+};
+
+/**
+ * struct mlx5_fpga_dma_buf - A packet buffer
+ * May contain up to 2 scatter-gather data entries
+ */
+struct mlx5_fpga_dma_buf {
+ /** @dma_dir: DMA direction */
+ enum dma_data_direction dma_dir;
+ /** @sg: Scatter-gather entries pointing to the data in memory */
+ struct mlx5_fpga_dma_entry sg[2];
+ /** @list: Item in SQ backlog, for TX packets */
+ struct list_head list;
+ /**
+ * @complete: Completion routine, for TX packets
+ * @conn: FPGA Connection this packet was sent to
+ * @fdev: FPGA device this packet was sent to
+ * @buf: The packet buffer
+ * @status: 0 if successful, or an error code otherwise
+ */
+ void (*complete)(struct mlx5_fpga_conn *conn,
+ struct mlx5_fpga_device *fdev,
+ struct mlx5_fpga_dma_buf *buf, u8 status);
+};
+
+/**
+ * struct mlx5_fpga_conn_attr - FPGA connection attributes
+ * Describes the attributes of a connection
+ */
+struct mlx5_fpga_conn_attr {
+ /** @tx_size: Size of connection TX queue, in packets */
+ unsigned int tx_size;
+ /** @rx_size: Size of connection RX queue, in packets */
+ unsigned int rx_size;
+ /**
+ * @recv_cb: Callback function which is called for received packets
+ * @cb_arg: The value provided in mlx5_fpga_conn_attr.cb_arg
+ * @buf: A buffer containing a received packet
+ *
+ * buf is guaranteed to only contain a single scatter-gather entry.
+ * The size of the actual packet received is specified in buf.sg[0].size
+ * When this callback returns, the packet buffer may be re-used for
+ * subsequent receives.
+ */
+ void (*recv_cb)(void *cb_arg, struct mlx5_fpga_dma_buf *buf);
+ void *cb_arg;
+};
+
+/**
+ * mlx5_fpga_sbu_conn_create() - Initialize a new FPGA SBU connection
+ * @fdev: The FPGA device
+ * @attr: Attributes of the new connection
+ *
+ * Sets up a new FPGA SBU connection with the specified attributes.
+ * The receive callback function may be called for incoming messages even
+ * before this function returns.
+ *
+ * The caller must eventually destroy the connection by calling
+ * mlx5_fpga_sbu_conn_destroy.
+ *
+ * Return: A new connection, or ERR_PTR() error value otherwise.
+ */
+struct mlx5_fpga_conn *
+mlx5_fpga_sbu_conn_create(struct mlx5_fpga_device *fdev,
+ struct mlx5_fpga_conn_attr *attr);
+
+/**
+ * mlx5_fpga_sbu_conn_destroy() - Destroy an FPGA SBU connection
+ * @conn: The FPGA SBU connection to destroy
+ *
+ * Cleans up an FPGA SBU connection which was previously created with
+ * mlx5_fpga_sbu_conn_create.
+ */
+void mlx5_fpga_sbu_conn_destroy(struct mlx5_fpga_conn *conn);
+
+/**
+ * mlx5_fpga_sbu_conn_sendmsg() - Queue the transmission of a packet
+ * @fdev: An FPGA SBU connection
+ * @buf: The packet buffer
+ *
+ * Queues a packet for transmission over an FPGA SBU connection.
+ * The buffer should not be modified or freed until completion.
+ * Upon completion, the buf's complete() callback is invoked, indicating the
+ * success or error status of the transmission.
+ *
+ * Return: 0 if successful, or an error value otherwise.
+ */
+int mlx5_fpga_sbu_conn_sendmsg(struct mlx5_fpga_conn *conn,
+ struct mlx5_fpga_dma_buf *buf);
+
+/**
+ * mlx5_fpga_mem_read() - Read from FPGA memory address space
+ * @fdev: The FPGA device
+ * @size: Size of chunk to read, in bytes
+ * @addr: Starting address to read from, in FPGA address space
+ * @buf: Buffer to read into
+ * @access_type: Method for reading
+ *
+ * Reads from the specified address into the specified buffer.
+ * The address may point to configuration space or to DDR.
+ * Large reads may be performed internally as several non-atomic operations.
+ * This function may sleep, so should not be called from atomic contexts.
+ *
+ * Return: 0 if successful, or an error value otherwise.
+ */
+int mlx5_fpga_mem_read(struct mlx5_fpga_device *fdev, size_t size, u64 addr,
+ void *buf, enum mlx5_fpga_access_type access_type);
+
+/**
+ * mlx5_fpga_mem_write() - Write to FPGA memory address space
+ * @fdev: The FPGA device
+ * @size: Size of chunk to write, in bytes
+ * @addr: Starting address to write to, in FPGA address space
+ * @buf: Buffer which contains data to write
+ * @access_type: Method for writing
+ *
+ * Writes the specified buffer data to FPGA memory at the specified address.
+ * The address may point to configuration space or to DDR.
+ * Large writes may be performed internally as several non-atomic operations.
+ * This function may sleep, so should not be called from atomic contexts.
+ *
+ * Return: 0 if successful, or an error value otherwise.
+ */
+int mlx5_fpga_mem_write(struct mlx5_fpga_device *fdev, size_t size, u64 addr,
+ void *buf, enum mlx5_fpga_access_type access_type);
+
+/**
+ * mlx5_fpga_get_sbu_caps() - Read the SBU capabilities
+ * @fdev: The FPGA device
+ * @size: Size of the buffer to read into
+ * @buf: Buffer to read the capabilities into
+ *
+ * Reads the FPGA SBU capabilities into the specified buffer.
+ * The format of the capabilities buffer is SBU-dependent.
+ *
+ * Return: 0 if successful
+ * -EINVAL if the buffer is not large enough to contain SBU caps
+ * or any other error value otherwise.
+ */
+int mlx5_fpga_get_sbu_caps(struct mlx5_fpga_device *fdev, int size, void *buf);
+
+#endif /* MLX5_FPGA_SDK_H */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index e9489e8d08bb..fa33d59ab485 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -34,6 +34,7 @@
#include <linux/mlx5/cmd.h>
#include <linux/module.h>
#include "mlx5_core.h"
+#include "../../mlxfw/mlxfw.h"
static int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev, u32 *out,
int outlen)
@@ -223,3 +224,270 @@ int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev)
return 0;
}
+
+enum mlxsw_reg_mcc_instruction {
+ MLX5_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE = 0x01,
+ MLX5_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE = 0x02,
+ MLX5_REG_MCC_INSTRUCTION_UPDATE_COMPONENT = 0x03,
+ MLX5_REG_MCC_INSTRUCTION_VERIFY_COMPONENT = 0x04,
+ MLX5_REG_MCC_INSTRUCTION_ACTIVATE = 0x06,
+ MLX5_REG_MCC_INSTRUCTION_CANCEL = 0x08,
+};
+
+static int mlx5_reg_mcc_set(struct mlx5_core_dev *dev,
+ enum mlxsw_reg_mcc_instruction instr,
+ u16 component_index, u32 update_handle,
+ u32 component_size)
+{
+ u32 out[MLX5_ST_SZ_DW(mcc_reg)];
+ u32 in[MLX5_ST_SZ_DW(mcc_reg)];
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(mcc_reg, in, instruction, instr);
+ MLX5_SET(mcc_reg, in, component_index, component_index);
+ MLX5_SET(mcc_reg, in, update_handle, update_handle);
+ MLX5_SET(mcc_reg, in, component_size, component_size);
+
+ return mlx5_core_access_reg(dev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_MCC, 0, 1);
+}
+
+static int mlx5_reg_mcc_query(struct mlx5_core_dev *dev,
+ u32 *update_handle, u8 *error_code,
+ u8 *control_state)
+{
+ u32 out[MLX5_ST_SZ_DW(mcc_reg)];
+ u32 in[MLX5_ST_SZ_DW(mcc_reg)];
+ int err;
+
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
+ MLX5_SET(mcc_reg, in, update_handle, *update_handle);
+
+ err = mlx5_core_access_reg(dev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_MCC, 0, 0);
+ if (err)
+ goto out;
+
+ *update_handle = MLX5_GET(mcc_reg, out, update_handle);
+ *error_code = MLX5_GET(mcc_reg, out, error_code);
+ *control_state = MLX5_GET(mcc_reg, out, control_state);
+
+out:
+ return err;
+}
+
+static int mlx5_reg_mcda_set(struct mlx5_core_dev *dev,
+ u32 update_handle,
+ u32 offset, u16 size,
+ u8 *data)
+{
+ int err, in_size = MLX5_ST_SZ_BYTES(mcda_reg) + size;
+ u32 out[MLX5_ST_SZ_DW(mcda_reg)];
+ int i, j, dw_size = size >> 2;
+ __be32 data_element;
+ u32 *in;
+
+ in = kzalloc(in_size, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(mcda_reg, in, update_handle, update_handle);
+ MLX5_SET(mcda_reg, in, offset, offset);
+ MLX5_SET(mcda_reg, in, size, size);
+
+ for (i = 0; i < dw_size; i++) {
+ j = i * 4;
+ data_element = htonl(*(u32 *)&data[j]);
+ memcpy(MLX5_ADDR_OF(mcda_reg, in, data) + j, &data_element, 4);
+ }
+
+ err = mlx5_core_access_reg(dev, in, in_size, out,
+ sizeof(out), MLX5_REG_MCDA, 0, 1);
+ kfree(in);
+ return err;
+}
+
+static int mlx5_reg_mcqi_query(struct mlx5_core_dev *dev,
+ u16 component_index,
+ u32 *max_component_size,
+ u8 *log_mcda_word_size,
+ u16 *mcda_max_write_size)
+{
+ u32 out[MLX5_ST_SZ_DW(mcqi_reg) + MLX5_ST_SZ_DW(mcqi_cap)];
+ int offset = MLX5_ST_SZ_DW(mcqi_reg);
+ u32 in[MLX5_ST_SZ_DW(mcqi_reg)];
+ int err;
+
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
+
+ MLX5_SET(mcqi_reg, in, component_index, component_index);
+ MLX5_SET(mcqi_reg, in, data_size, MLX5_ST_SZ_BYTES(mcqi_cap));
+
+ err = mlx5_core_access_reg(dev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_MCQI, 0, 0);
+ if (err)
+ goto out;
+
+ *max_component_size = MLX5_GET(mcqi_cap, out + offset, max_component_size);
+ *log_mcda_word_size = MLX5_GET(mcqi_cap, out + offset, log_mcda_word_size);
+ *mcda_max_write_size = MLX5_GET(mcqi_cap, out + offset, mcda_max_write_size);
+
+out:
+ return err;
+}
+
+struct mlx5_mlxfw_dev {
+ struct mlxfw_dev mlxfw_dev;
+ struct mlx5_core_dev *mlx5_core_dev;
+};
+
+static int mlx5_component_query(struct mlxfw_dev *mlxfw_dev,
+ u16 component_index, u32 *p_max_size,
+ u8 *p_align_bits, u16 *p_max_write_size)
+{
+ struct mlx5_mlxfw_dev *mlx5_mlxfw_dev =
+ container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev);
+ struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev;
+
+ return mlx5_reg_mcqi_query(dev, component_index, p_max_size,
+ p_align_bits, p_max_write_size);
+}
+
+static int mlx5_fsm_lock(struct mlxfw_dev *mlxfw_dev, u32 *fwhandle)
+{
+ struct mlx5_mlxfw_dev *mlx5_mlxfw_dev =
+ container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev);
+ struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev;
+ u8 control_state, error_code;
+ int err;
+
+ *fwhandle = 0;
+ err = mlx5_reg_mcc_query(dev, fwhandle, &error_code, &control_state);
+ if (err)
+ return err;
+
+ if (control_state != MLXFW_FSM_STATE_IDLE)
+ return -EBUSY;
+
+ return mlx5_reg_mcc_set(dev, MLX5_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE,
+ 0, *fwhandle, 0);
+}
+
+static int mlx5_fsm_component_update(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
+ u16 component_index, u32 component_size)
+{
+ struct mlx5_mlxfw_dev *mlx5_mlxfw_dev =
+ container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev);
+ struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev;
+
+ return mlx5_reg_mcc_set(dev, MLX5_REG_MCC_INSTRUCTION_UPDATE_COMPONENT,
+ component_index, fwhandle, component_size);
+}
+
+static int mlx5_fsm_block_download(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
+ u8 *data, u16 size, u32 offset)
+{
+ struct mlx5_mlxfw_dev *mlx5_mlxfw_dev =
+ container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev);
+ struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev;
+
+ return mlx5_reg_mcda_set(dev, fwhandle, offset, size, data);
+}
+
+static int mlx5_fsm_component_verify(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
+ u16 component_index)
+{
+ struct mlx5_mlxfw_dev *mlx5_mlxfw_dev =
+ container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev);
+ struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev;
+
+ return mlx5_reg_mcc_set(dev, MLX5_REG_MCC_INSTRUCTION_VERIFY_COMPONENT,
+ component_index, fwhandle, 0);
+}
+
+static int mlx5_fsm_activate(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
+{
+ struct mlx5_mlxfw_dev *mlx5_mlxfw_dev =
+ container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev);
+ struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev;
+
+ return mlx5_reg_mcc_set(dev, MLX5_REG_MCC_INSTRUCTION_ACTIVATE, 0,
+ fwhandle, 0);
+}
+
+static int mlx5_fsm_query_state(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
+ enum mlxfw_fsm_state *fsm_state,
+ enum mlxfw_fsm_state_err *fsm_state_err)
+{
+ struct mlx5_mlxfw_dev *mlx5_mlxfw_dev =
+ container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev);
+ struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev;
+ u8 control_state, error_code;
+ int err;
+
+ err = mlx5_reg_mcc_query(dev, &fwhandle, &error_code, &control_state);
+ if (err)
+ return err;
+
+ *fsm_state = control_state;
+ *fsm_state_err = min_t(enum mlxfw_fsm_state_err, error_code,
+ MLXFW_FSM_STATE_ERR_MAX);
+ return 0;
+}
+
+static void mlx5_fsm_cancel(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
+{
+ struct mlx5_mlxfw_dev *mlx5_mlxfw_dev =
+ container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev);
+ struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev;
+
+ mlx5_reg_mcc_set(dev, MLX5_REG_MCC_INSTRUCTION_CANCEL, 0, fwhandle, 0);
+}
+
+static void mlx5_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
+{
+ struct mlx5_mlxfw_dev *mlx5_mlxfw_dev =
+ container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev);
+ struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev;
+
+ mlx5_reg_mcc_set(dev, MLX5_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE, 0,
+ fwhandle, 0);
+}
+
+static const struct mlxfw_dev_ops mlx5_mlxfw_dev_ops = {
+ .component_query = mlx5_component_query,
+ .fsm_lock = mlx5_fsm_lock,
+ .fsm_component_update = mlx5_fsm_component_update,
+ .fsm_block_download = mlx5_fsm_block_download,
+ .fsm_component_verify = mlx5_fsm_component_verify,
+ .fsm_activate = mlx5_fsm_activate,
+ .fsm_query_state = mlx5_fsm_query_state,
+ .fsm_cancel = mlx5_fsm_cancel,
+ .fsm_release = mlx5_fsm_release
+};
+
+int mlx5_firmware_flash(struct mlx5_core_dev *dev,
+ const struct firmware *firmware)
+{
+ struct mlx5_mlxfw_dev mlx5_mlxfw_dev = {
+ .mlxfw_dev = {
+ .ops = &mlx5_mlxfw_dev_ops,
+ .psid = dev->board_id,
+ .psid_size = strlen(dev->board_id),
+ },
+ .mlx5_core_dev = dev
+ };
+
+ if (!MLX5_CAP_GEN(dev, mcam_reg) ||
+ !MLX5_CAP_MCAM_REG(dev, mcqi) ||
+ !MLX5_CAP_MCAM_REG(dev, mcc) ||
+ !MLX5_CAP_MCAM_REG(dev, mcda)) {
+ pr_info("%s flashing isn't supported by the running FW\n", __func__);
+ return -EOPNOTSUPP;
+ }
+
+ return mlxfw_firmware_flash(&mlx5_mlxfw_dev.mlxfw_dev, firmware);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
new file mode 100644
index 000000000000..eb04e97d8765
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
@@ -0,0 +1,145 @@
+/*
+ * Copyright (c) 2017, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "en.h"
+#include "ipoib.h"
+
+static void mlx5i_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(dev);
+
+ mlx5e_ethtool_get_drvinfo(priv, drvinfo);
+}
+
+static void mlx5i_get_strings(struct net_device *dev,
+ uint32_t stringset, uint8_t *data)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(dev);
+
+ mlx5e_ethtool_get_strings(priv, stringset, data);
+}
+
+static int mlx5i_get_sset_count(struct net_device *dev, int sset)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(dev);
+
+ return mlx5e_ethtool_get_sset_count(priv, sset);
+}
+
+static void mlx5i_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats,
+ u64 *data)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(dev);
+
+ mlx5e_ethtool_get_ethtool_stats(priv, stats, data);
+}
+
+static int mlx5i_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *param)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(dev);
+
+ return mlx5e_ethtool_set_ringparam(priv, param);
+}
+
+static void mlx5i_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *param)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(dev);
+
+ mlx5e_ethtool_get_ringparam(priv, param);
+}
+
+static int mlx5i_set_channels(struct net_device *dev,
+ struct ethtool_channels *ch)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(dev);
+
+ return mlx5e_ethtool_set_channels(priv, ch);
+}
+
+static void mlx5i_get_channels(struct net_device *dev,
+ struct ethtool_channels *ch)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(dev);
+
+ mlx5e_ethtool_get_channels(priv, ch);
+}
+
+static int mlx5i_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *coal)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(netdev);
+
+ return mlx5e_ethtool_set_coalesce(priv, coal);
+}
+
+static int mlx5i_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *coal)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(netdev);
+
+ return mlx5e_ethtool_get_coalesce(priv, coal);
+}
+
+static int mlx5i_get_ts_info(struct net_device *netdev,
+ struct ethtool_ts_info *info)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(netdev);
+
+ return mlx5e_ethtool_get_ts_info(priv, info);
+}
+
+static int mlx5i_flash_device(struct net_device *netdev,
+ struct ethtool_flash *flash)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(netdev);
+
+ return mlx5e_ethtool_flash_device(priv, flash);
+}
+
+const struct ethtool_ops mlx5i_ethtool_ops = {
+ .get_drvinfo = mlx5i_get_drvinfo,
+ .get_strings = mlx5i_get_strings,
+ .get_sset_count = mlx5i_get_sset_count,
+ .get_ethtool_stats = mlx5i_get_ethtool_stats,
+ .get_ringparam = mlx5i_get_ringparam,
+ .set_ringparam = mlx5i_set_ringparam,
+ .flash_device = mlx5i_flash_device,
+ .get_channels = mlx5i_get_channels,
+ .set_channels = mlx5i_set_channels,
+ .get_coalesce = mlx5i_get_coalesce,
+ .set_coalesce = mlx5i_set_coalesce,
+ .get_ts_info = mlx5i_get_ts_info,
+};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index 22ca59145e6c..1ee5bce85901 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -36,20 +36,38 @@
#include "ipoib.h"
#define IB_DEFAULT_Q_KEY 0xb1b
+#define MLX5I_PARAMS_DEFAULT_LOG_RQ_SIZE 9
static int mlx5i_open(struct net_device *netdev);
static int mlx5i_close(struct net_device *netdev);
static int mlx5i_dev_init(struct net_device *dev);
static void mlx5i_dev_cleanup(struct net_device *dev);
+static int mlx5i_change_mtu(struct net_device *netdev, int new_mtu);
+static int mlx5i_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
static const struct net_device_ops mlx5i_netdev_ops = {
.ndo_open = mlx5i_open,
.ndo_stop = mlx5i_close,
.ndo_init = mlx5i_dev_init,
.ndo_uninit = mlx5i_dev_cleanup,
+ .ndo_change_mtu = mlx5i_change_mtu,
+ .ndo_do_ioctl = mlx5i_ioctl,
};
/* IPoIB mlx5 netdev profile */
+static void mlx5i_build_nic_params(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params)
+{
+ /* Override RQ params as IPoIB supports only LINKED LIST RQ for now */
+ mlx5e_set_rq_type_params(mdev, params, MLX5_WQ_TYPE_LINKED_LIST);
+
+ /* RQ size in ipoib by default is 512 */
+ params->log_rq_size = is_kdump_kernel() ?
+ MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE :
+ MLX5I_PARAMS_DEFAULT_LOG_RQ_SIZE;
+
+ params->lro_en = false;
+}
/* Called directly after IPoIB netdevice was created to initialize SW structs */
static void mlx5i_init(struct mlx5_core_dev *mdev,
@@ -59,19 +77,18 @@ static void mlx5i_init(struct mlx5_core_dev *mdev,
{
struct mlx5e_priv *priv = mlx5i_epriv(netdev);
+ /* priv init */
priv->mdev = mdev;
priv->netdev = netdev;
priv->profile = profile;
priv->ppriv = ppriv;
+ priv->hard_mtu = MLX5_IB_GRH_BYTES + MLX5_IPOIB_HARD_LEN;
+ mutex_init(&priv->state_lock);
mlx5e_build_nic_params(mdev, &priv->channels.params, profile->max_nch(mdev));
+ mlx5i_build_nic_params(mdev, &priv->channels.params);
- /* Override RQ params as IPoIB supports only LINKED LIST RQ for now */
- mlx5e_set_rq_type_params(mdev, &priv->channels.params, MLX5_WQ_TYPE_LINKED_LIST);
- priv->channels.params.lro_en = false;
-
- mutex_init(&priv->state_lock);
-
+ /* netdev init */
netdev->hw_features |= NETIF_F_SG;
netdev->hw_features |= NETIF_F_IP_CSUM;
netdev->hw_features |= NETIF_F_IPV6_CSUM;
@@ -82,6 +99,7 @@ static void mlx5i_init(struct mlx5_core_dev *mdev,
netdev->hw_features |= NETIF_F_RXHASH;
netdev->netdev_ops = &mlx5i_netdev_ops;
+ netdev->ethtool_ops = &mlx5i_ethtool_ops;
}
/* Called directly before IPoIB netdevice is destroyed to cleanup SW structs */
@@ -290,6 +308,7 @@ static const struct mlx5e_profile mlx5i_nic_profile = {
.disable = NULL, /* mlx5i_disable */
.update_stats = NULL, /* mlx5i_update_stats */
.max_nch = mlx5e_get_max_num_channels,
+ .update_carrier = NULL, /* no HW update in IB link */
.rx_handlers.handle_rx_cqe = mlx5i_handle_rx_cqe,
.rx_handlers.handle_rx_cqe_mpwqe = NULL, /* Not supported */
.max_tc = MLX5I_MAX_NUM_TC,
@@ -297,6 +316,35 @@ static const struct mlx5e_profile mlx5i_nic_profile = {
/* mlx5i netdev NDos */
+static int mlx5i_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(netdev);
+ struct mlx5e_channels new_channels = {};
+ int curr_mtu;
+ int err = 0;
+
+ mutex_lock(&priv->state_lock);
+
+ curr_mtu = netdev->mtu;
+ netdev->mtu = new_mtu;
+
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ goto out;
+
+ new_channels.params = priv->channels.params;
+ err = mlx5e_open_channels(priv, &new_channels);
+ if (err) {
+ netdev->mtu = curr_mtu;
+ goto out;
+ }
+
+ mlx5e_switch_priv_channels(priv, &new_channels, NULL);
+
+out:
+ mutex_unlock(&priv->state_lock);
+ return err;
+}
+
static int mlx5i_dev_init(struct net_device *dev)
{
struct mlx5e_priv *priv = mlx5i_epriv(dev);
@@ -310,6 +358,20 @@ static int mlx5i_dev_init(struct net_device *dev)
return 0;
}
+static int mlx5i_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(dev);
+
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ return mlx5e_hwstamp_set(priv, ifr);
+ case SIOCGHWTSTAMP:
+ return mlx5e_hwstamp_get(priv, ifr);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static void mlx5i_dev_cleanup(struct net_device *dev)
{
struct mlx5e_priv *priv = mlx5i_epriv(dev);
@@ -336,6 +398,8 @@ static int mlx5i_open(struct net_device *netdev)
mlx5e_refresh_tirs(priv, false);
mlx5e_activate_priv_channels(priv);
+ mlx5e_timestamp_init(priv);
+
mutex_unlock(&priv->state_lock);
return 0;
@@ -359,6 +423,7 @@ static int mlx5i_close(struct net_device *netdev)
clear_bit(MLX5E_STATE_OPENED, &priv->state);
+ mlx5e_timestamp_cleanup(priv);
netif_carrier_off(priv->netdev);
mlx5e_deactivate_priv_channels(priv);
mlx5e_close_channels(&priv->channels);
@@ -510,4 +575,3 @@ void mlx5_rdma_netdev_free(struct net_device *netdev)
mlx5e_destroy_mdev_resources(priv->mdev);
}
EXPORT_SYMBOL(mlx5_rdma_netdev_free);
-
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib.h b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
index 213191a78464..a0f405f520f7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
@@ -38,6 +38,13 @@
#define MLX5I_MAX_NUM_TC 1
+extern const struct ethtool_ops mlx5i_ethtool_ops;
+
+#define MLX5_IB_GRH_BYTES 40
+#define MLX5_IPOIB_ENCAP_LEN 4
+#define MLX5_IPOIB_PSEUDO_LEN 20
+#define MLX5_IPOIB_HARD_LEN (MLX5_IPOIB_PSEUDO_LEN + MLX5_IPOIB_ENCAP_LEN)
+
/* ipoib rdma netdev's private data structure */
struct mlx5i_priv {
struct rdma_netdev rn; /* keep this first */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c
new file mode 100644
index 000000000000..4d0db481f6c4
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c
@@ -0,0 +1,154 @@
+/*
+ * Copyright (c) 2017, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/mlx5/driver.h>
+#include <linux/etherdevice.h>
+#include <linux/idr.h>
+#include "mlx5_core.h"
+
+void mlx5_init_reserved_gids(struct mlx5_core_dev *dev)
+{
+ unsigned int tblsz = MLX5_CAP_ROCE(dev, roce_address_table_size);
+
+ ida_init(&dev->roce.reserved_gids.ida);
+ dev->roce.reserved_gids.start = tblsz;
+ dev->roce.reserved_gids.count = 0;
+}
+
+void mlx5_cleanup_reserved_gids(struct mlx5_core_dev *dev)
+{
+ WARN_ON(!ida_is_empty(&dev->roce.reserved_gids.ida));
+ dev->roce.reserved_gids.start = 0;
+ dev->roce.reserved_gids.count = 0;
+ ida_destroy(&dev->roce.reserved_gids.ida);
+}
+
+int mlx5_core_reserve_gids(struct mlx5_core_dev *dev, unsigned int count)
+{
+ if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
+ mlx5_core_err(dev, "Cannot reserve GIDs when interfaces are up\n");
+ return -EPERM;
+ }
+ if (dev->roce.reserved_gids.start < count) {
+ mlx5_core_warn(dev, "GID table exhausted attempting to reserve %d more GIDs\n",
+ count);
+ return -ENOMEM;
+ }
+ if (dev->roce.reserved_gids.count + count > MLX5_MAX_RESERVED_GIDS) {
+ mlx5_core_warn(dev, "Unable to reserve %d more GIDs\n", count);
+ return -ENOMEM;
+ }
+
+ dev->roce.reserved_gids.start -= count;
+ dev->roce.reserved_gids.count += count;
+ mlx5_core_dbg(dev, "Reserved %u GIDs starting at %u\n",
+ dev->roce.reserved_gids.count,
+ dev->roce.reserved_gids.start);
+ return 0;
+}
+
+void mlx5_core_unreserve_gids(struct mlx5_core_dev *dev, unsigned int count)
+{
+ WARN(test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state), "Unreserving GIDs when interfaces are up");
+ WARN(count > dev->roce.reserved_gids.count, "Unreserving %u GIDs when only %u reserved",
+ count, dev->roce.reserved_gids.count);
+
+ dev->roce.reserved_gids.start += count;
+ dev->roce.reserved_gids.count -= count;
+ mlx5_core_dbg(dev, "%u GIDs starting at %u left reserved\n",
+ dev->roce.reserved_gids.count,
+ dev->roce.reserved_gids.start);
+}
+
+int mlx5_core_reserved_gid_alloc(struct mlx5_core_dev *dev, int *gid_index)
+{
+ int end = dev->roce.reserved_gids.start +
+ dev->roce.reserved_gids.count;
+ int index = 0;
+
+ index = ida_simple_get(&dev->roce.reserved_gids.ida,
+ dev->roce.reserved_gids.start, end,
+ GFP_KERNEL);
+ if (index < 0)
+ return index;
+
+ mlx5_core_dbg(dev, "Allodating reserved GID %u\n", index);
+ *gid_index = index;
+ return 0;
+}
+
+void mlx5_core_reserved_gid_free(struct mlx5_core_dev *dev, int gid_index)
+{
+ mlx5_core_dbg(dev, "Freeing reserved GID %u\n", gid_index);
+ ida_simple_remove(&dev->roce.reserved_gids.ida, gid_index);
+}
+
+unsigned int mlx5_core_reserved_gids_count(struct mlx5_core_dev *dev)
+{
+ return dev->roce.reserved_gids.count;
+}
+EXPORT_SYMBOL_GPL(mlx5_core_reserved_gids_count);
+
+int mlx5_core_roce_gid_set(struct mlx5_core_dev *dev, unsigned int index,
+ u8 roce_version, u8 roce_l3_type, const u8 *gid,
+ const u8 *mac, bool vlan, u16 vlan_id)
+{
+#define MLX5_SET_RA(p, f, v) MLX5_SET(roce_addr_layout, p, f, v)
+ u32 in[MLX5_ST_SZ_DW(set_roce_address_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(set_roce_address_out)] = {0};
+ void *in_addr = MLX5_ADDR_OF(set_roce_address_in, in, roce_address);
+ char *addr_l3_addr = MLX5_ADDR_OF(roce_addr_layout, in_addr,
+ source_l3_address);
+ void *addr_mac = MLX5_ADDR_OF(roce_addr_layout, in_addr,
+ source_mac_47_32);
+ int gidsz = MLX5_FLD_SZ_BYTES(roce_addr_layout, source_l3_address);
+
+ if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
+ return -EINVAL;
+
+ if (gid) {
+ if (vlan) {
+ MLX5_SET_RA(in_addr, vlan_valid, 1);
+ MLX5_SET_RA(in_addr, vlan_id, vlan_id);
+ }
+
+ ether_addr_copy(addr_mac, mac);
+ MLX5_SET_RA(in_addr, roce_version, roce_version);
+ MLX5_SET_RA(in_addr, roce_l3_type, roce_l3_type);
+ memcpy(addr_l3_addr, gid, gidsz);
+ }
+
+ MLX5_SET(set_roce_address_in, in, roce_address_index, index);
+ MLX5_SET(set_roce_address_in, in, opcode, MLX5_CMD_OP_SET_ROCE_ADDRESS);
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+EXPORT_SYMBOL(mlx5_core_roce_gid_set);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
new file mode 100644
index 000000000000..7550b1cc8c6a
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2017, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __LIB_MLX5_H__
+#define __LIB_MLX5_H__
+
+void mlx5_init_reserved_gids(struct mlx5_core_dev *dev);
+void mlx5_cleanup_reserved_gids(struct mlx5_core_dev *dev);
+int mlx5_core_reserve_gids(struct mlx5_core_dev *dev, unsigned int count);
+void mlx5_core_unreserve_gids(struct mlx5_core_dev *dev, unsigned int count);
+int mlx5_core_reserved_gid_alloc(struct mlx5_core_dev *dev, int *gid_index);
+void mlx5_core_reserved_gid_free(struct mlx5_core_dev *dev, int gid_index);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 715eeab59999..719f8e974482 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -56,7 +56,9 @@
#ifdef CONFIG_MLX5_CORE_EN
#include "eswitch.h"
#endif
+#include "lib/mlx5.h"
#include "fpga/core.h"
+#include "accel/ipsec.h"
MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
MODULE_DESCRIPTION("Mellanox Connect-IB, ConnectX-4 core driver");
@@ -176,8 +178,9 @@ static struct mlx5_profile profile[] = {
},
};
-#define FW_INIT_TIMEOUT_MILI 2000
-#define FW_INIT_WAIT_MS 2
+#define FW_INIT_TIMEOUT_MILI 2000
+#define FW_INIT_WAIT_MS 2
+#define FW_PRE_INIT_TIMEOUT_MILI 10000
static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili)
{
@@ -935,6 +938,8 @@ static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
mlx5_init_mkey_table(dev);
+ mlx5_init_reserved_gids(dev);
+
err = mlx5_init_rl_table(dev);
if (err) {
dev_err(&pdev->dev, "Failed to init rate limiting\n");
@@ -955,8 +960,16 @@ static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
goto err_eswitch_cleanup;
}
+ err = mlx5_fpga_init(dev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to init fpga device %d\n", err);
+ goto err_sriov_cleanup;
+ }
+
return 0;
+err_sriov_cleanup:
+ mlx5_sriov_cleanup(dev);
err_eswitch_cleanup:
#ifdef CONFIG_MLX5_CORE_EN
mlx5_eswitch_cleanup(dev->priv.eswitch);
@@ -980,11 +993,13 @@ out:
static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
{
+ mlx5_fpga_cleanup(dev);
mlx5_sriov_cleanup(dev);
#ifdef CONFIG_MLX5_CORE_EN
mlx5_eswitch_cleanup(dev->priv.eswitch);
#endif
mlx5_cleanup_rl_table(dev);
+ mlx5_cleanup_reserved_gids(dev);
mlx5_cleanup_mkey_table(dev);
mlx5_cleanup_srq_table(dev);
mlx5_cleanup_qp_table(dev);
@@ -1013,6 +1028,15 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
*/
dev->state = MLX5_DEVICE_STATE_UP;
+ /* wait for firmware to accept initialization segments configurations
+ */
+ err = wait_fw_init(dev, FW_PRE_INIT_TIMEOUT_MILI);
+ if (err) {
+ dev_err(&dev->pdev->dev, "Firmware over %d MS in pre-initializing state, aborting\n",
+ FW_PRE_INIT_TIMEOUT_MILI);
+ goto out;
+ }
+
err = mlx5_cmd_init(dev);
if (err) {
dev_err(&pdev->dev, "Failed initializing command interface, aborting\n");
@@ -1107,16 +1131,10 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
goto err_disable_msix;
}
- err = mlx5_fpga_device_init(dev);
- if (err) {
- dev_err(&pdev->dev, "fpga device init failed %d\n", err);
- goto err_put_uars;
- }
-
err = mlx5_start_eqs(dev);
if (err) {
dev_err(&pdev->dev, "Failed to start pages and async EQs\n");
- goto err_fpga_init;
+ goto err_put_uars;
}
err = alloc_comp_eqs(dev);
@@ -1150,7 +1168,12 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
err = mlx5_fpga_device_start(dev);
if (err) {
dev_err(&pdev->dev, "fpga device start failed %d\n", err);
- goto err_reg_dev;
+ goto err_fpga_start;
+ }
+ err = mlx5_accel_ipsec_init(dev);
+ if (err) {
+ dev_err(&pdev->dev, "IPSec device start failed %d\n", err);
+ goto err_ipsec_start;
}
if (mlx5_device_registered(dev)) {
@@ -1171,6 +1194,11 @@ out:
return 0;
err_reg_dev:
+ mlx5_accel_ipsec_cleanup(dev);
+err_ipsec_start:
+ mlx5_fpga_device_stop(dev);
+
+err_fpga_start:
mlx5_sriov_detach(dev);
err_sriov:
@@ -1188,9 +1216,6 @@ err_affinity_hints:
err_stop_eqs:
mlx5_stop_eqs(dev);
-err_fpga_init:
- mlx5_fpga_device_cleanup(dev);
-
err_put_uars:
mlx5_put_uars_page(dev, priv->uar);
@@ -1244,9 +1269,15 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
goto out;
}
+ clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
+ set_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state);
+
if (mlx5_device_registered(dev))
mlx5_detach_device(dev);
+ mlx5_accel_ipsec_cleanup(dev);
+ mlx5_fpga_device_stop(dev);
+
mlx5_sriov_detach(dev);
#ifdef CONFIG_MLX5_CORE_EN
mlx5_eswitch_detach(dev->priv.eswitch);
@@ -1255,7 +1286,6 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
mlx5_irq_clear_affinity_hints(dev);
free_comp_eqs(dev);
mlx5_stop_eqs(dev);
- mlx5_fpga_device_cleanup(dev);
mlx5_put_uars_page(dev, priv->uar);
mlx5_disable_msix(dev);
if (cleanup)
@@ -1272,8 +1302,6 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
mlx5_cmd_cleanup(dev);
out:
- clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
- set_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state);
mutex_unlock(&dev->intf_state_mutex);
return err;
}
@@ -1546,11 +1574,11 @@ static void shutdown(struct pci_dev *pdev)
}
static const struct pci_device_id mlx5_core_pci_table[] = {
- { PCI_VDEVICE(MELLANOX, 0x1011) }, /* Connect-IB */
+ { PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_CONNECTIB) },
{ PCI_VDEVICE(MELLANOX, 0x1012), MLX5_PCI_DEV_IS_VF}, /* Connect-IB VF */
- { PCI_VDEVICE(MELLANOX, 0x1013) }, /* ConnectX-4 */
+ { PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_CONNECTX4) },
{ PCI_VDEVICE(MELLANOX, 0x1014), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4 VF */
- { PCI_VDEVICE(MELLANOX, 0x1015) }, /* ConnectX-4LX */
+ { PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_CONNECTX4_LX) },
{ PCI_VDEVICE(MELLANOX, 0x1016), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4LX VF */
{ PCI_VDEVICE(MELLANOX, 0x1017) }, /* ConnectX-5, PCIe 3.0 */
{ PCI_VDEVICE(MELLANOX, 0x1018), MLX5_PCI_DEV_IS_VF}, /* ConnectX-5 VF */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 5ccdf43e58a6..6a3d6bef7dd4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -37,6 +37,7 @@
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/if_link.h>
+#include <linux/firmware.h>
#define DRIVER_NAME "mlx5_core"
#define DRIVER_VERSION "5.0-0"
@@ -153,6 +154,8 @@ int mlx5_set_mtpps(struct mlx5_core_dev *mdev, u32 *mtpps, u32 mtpps_size);
int mlx5_query_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 *arm, u8 *mode);
int mlx5_set_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 arm, u8 mode);
+int mlx5_firmware_flash(struct mlx5_core_dev *dev, const struct firmware *fw);
+
void mlx5e_init(void);
void mlx5e_cleanup(void);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index 06019d00ab7b..5abfec1c3399 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -926,12 +926,16 @@ static int mlx5_nic_vport_update_roce_state(struct mlx5_core_dev *mdev,
int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev)
{
+ if (atomic_inc_return(&mdev->roce.roce_en) != 1)
+ return 0;
return mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_ENABLED);
}
EXPORT_SYMBOL_GPL(mlx5_nic_vport_enable_roce);
int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev)
{
+ if (atomic_dec_return(&mdev->roce.roce_en) != 0)
+ return 0;
return mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_DISABLED);
}
EXPORT_SYMBOL_GPL(mlx5_nic_vport_disable_roce);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
index 921673c42bc9..6bcfc25350f5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
@@ -54,6 +54,12 @@ static u32 mlx5_wq_cyc_get_byte_size(struct mlx5_wq_cyc *wq)
return mlx5_wq_cyc_get_size(wq) << wq->log_stride;
}
+static u32 mlx5_wq_qp_get_byte_size(struct mlx5_wq_qp *wq)
+{
+ return mlx5_wq_cyc_get_byte_size(&wq->rq) +
+ mlx5_wq_cyc_get_byte_size(&wq->sq);
+}
+
static u32 mlx5_cqwq_get_byte_size(struct mlx5_cqwq *wq)
{
return mlx5_cqwq_get_size(wq) << wq->log_stride;
@@ -99,6 +105,46 @@ err_db_free:
return err;
}
+int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
+ void *qpc, struct mlx5_wq_qp *wq,
+ struct mlx5_wq_ctrl *wq_ctrl)
+{
+ int err;
+
+ wq->rq.log_stride = MLX5_GET(qpc, qpc, log_rq_stride) + 4;
+ wq->rq.sz_m1 = (1 << MLX5_GET(qpc, qpc, log_rq_size)) - 1;
+
+ wq->sq.log_stride = ilog2(MLX5_SEND_WQE_BB);
+ wq->sq.sz_m1 = (1 << MLX5_GET(qpc, qpc, log_sq_size)) - 1;
+
+ err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
+ if (err) {
+ mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err);
+ return err;
+ }
+
+ err = mlx5_buf_alloc_node(mdev, mlx5_wq_qp_get_byte_size(wq),
+ &wq_ctrl->buf, param->buf_numa_node);
+ if (err) {
+ mlx5_core_warn(mdev, "mlx5_buf_alloc_node() failed, %d\n", err);
+ goto err_db_free;
+ }
+
+ wq->rq.buf = wq_ctrl->buf.direct.buf;
+ wq->sq.buf = wq->rq.buf + mlx5_wq_cyc_get_byte_size(&wq->rq);
+ wq->rq.db = &wq_ctrl->db.db[MLX5_RCV_DBR];
+ wq->sq.db = &wq_ctrl->db.db[MLX5_SND_DBR];
+
+ wq_ctrl->mdev = mdev;
+
+ return 0;
+
+err_db_free:
+ mlx5_db_free(mdev, &wq_ctrl->db);
+
+ return err;
+}
+
int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *cqc, struct mlx5_cqwq *wq,
struct mlx5_frag_wq_ctrl *wq_ctrl)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
index d8afed898c31..718589d0cec2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
@@ -34,6 +34,8 @@
#define __MLX5_WQ_H__
#include <linux/mlx5/mlx5_ifc.h>
+#include <linux/mlx5/cq.h>
+#include <linux/mlx5/qp.h>
struct mlx5_wq_param {
int linear;
@@ -60,6 +62,11 @@ struct mlx5_wq_cyc {
u8 log_stride;
};
+struct mlx5_wq_qp {
+ struct mlx5_wq_cyc rq;
+ struct mlx5_wq_cyc sq;
+};
+
struct mlx5_cqwq {
struct mlx5_frag_buf frag_buf;
__be32 *db;
@@ -87,6 +94,10 @@ int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
struct mlx5_wq_ctrl *wq_ctrl);
u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq);
+int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
+ void *qpc, struct mlx5_wq_qp *wq,
+ struct mlx5_wq_ctrl *wq_ctrl);
+
int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *cqc, struct mlx5_cqwq *wq,
struct mlx5_frag_wq_ctrl *wq_ctrl);
@@ -146,6 +157,22 @@ static inline void mlx5_cqwq_update_db_record(struct mlx5_cqwq *wq)
*wq->db = cpu_to_be32(wq->cc & 0xffffff);
}
+static inline struct mlx5_cqe64 *mlx5_cqwq_get_cqe(struct mlx5_cqwq *wq)
+{
+ u32 ci = mlx5_cqwq_get_ci(wq);
+ struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci);
+ u8 cqe_ownership_bit = cqe->op_own & MLX5_CQE_OWNER_MASK;
+ u8 sw_ownership_val = mlx5_cqwq_get_wrap_cnt(wq) & 1;
+
+ if (cqe_ownership_bit != sw_ownership_val)
+ return NULL;
+
+ /* ensure cqe content is read after cqe ownership bit */
+ dma_rmb();
+
+ return cqe;
+}
+
static inline int mlx5_wq_ll_is_full(struct mlx5_wq_ll *wq)
{
return wq->cur_sz == wq->sz_m1;
diff --git a/drivers/net/ethernet/mellanox/mlxfw/Kconfig b/drivers/net/ethernet/mellanox/mlxfw/Kconfig
index 2b21af8a2b1d..186ebe783f97 100644
--- a/drivers/net/ethernet/mellanox/mlxfw/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlxfw/Kconfig
@@ -3,5 +3,11 @@
#
config MLXFW
- tristate "mlxfw" if COMPILE_TEST
+ tristate "Mellanox Technologies firmware flash module"
+ ---help---
+ This driver supports Mellanox Technologies Firmware
+ flashing common logic.
+
+ To compile this driver as a module, choose M here: the
+ module will be called mlxfw.
select XZ_DEC
diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw.h b/drivers/net/ethernet/mellanox/mlxfw/mlxfw.h
index beea4ba83495..9ca85383aa35 100644
--- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw.h
+++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw.h
@@ -96,7 +96,16 @@ struct mlxfw_dev {
u16 psid_size;
};
+#if IS_ENABLED(CONFIG_MLXFW)
int mlxfw_firmware_flash(struct mlxfw_dev *mlxfw_dev,
const struct firmware *firmware);
+#else
+static inline
+int mlxfw_firmware_flash(struct mlxfw_dev *mlxfw_dev,
+ const struct firmware *firmware)
+{
+ return -EOPNOTSUPP;
+}
+#endif
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 700cc8c6aa5b..192cb93e7669 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -3301,6 +3301,9 @@ static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev,
struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
u16 vid = vlan_dev_vlan_id(vlan_dev);
+ if (netif_is_bridge_port(vlan_dev))
+ return 0;
+
if (mlxsw_sp_port_dev_check(real_dev))
return mlxsw_sp_inetaddr_port_vlan_event(vlan_dev, real_dev,
event, vid);
diff --git a/drivers/net/ethernet/netronome/Kconfig b/drivers/net/ethernet/netronome/Kconfig
index 0d5a7b9203a4..0e331e2f685a 100644
--- a/drivers/net/ethernet/netronome/Kconfig
+++ b/drivers/net/ethernet/netronome/Kconfig
@@ -25,6 +25,16 @@ config NFP
cards working as a advanced Ethernet NIC. It works with both
SR-IOV physical and virtual functions.
+config NFP_APP_FLOWER
+ bool "NFP4000/NFP6000 TC Flower offload support"
+ depends on NFP
+ depends on NET_SWITCHDEV
+ ---help---
+ Enable driver support for TC Flower offload on NFP4000 and NFP6000.
+ Say Y, if you are planning to make use of TC Flower offload
+ either directly, with Open vSwitch, or any other way. Note that
+ TC Flower offload requires specific FW to work.
+
config NFP_DEBUG
bool "Debug support for Netronome(R) NFP4000/NFP6000 NIC drivers"
depends on NFP
diff --git a/drivers/net/ethernet/netronome/nfp/Makefile b/drivers/net/ethernet/netronome/nfp/Makefile
index 5ad9a557f06a..b8e1358868bd 100644
--- a/drivers/net/ethernet/netronome/nfp/Makefile
+++ b/drivers/net/ethernet/netronome/nfp/Makefile
@@ -22,12 +22,23 @@ nfp-objs := \
nfp_net_common.o \
nfp_net_ethtool.o \
nfp_net_main.o \
+ nfp_net_repr.o \
nfp_netvf_main.o \
nfp_port.o \
bpf/main.o \
bpf/offload.o \
nic/main.o
+ifeq ($(CONFIG_NFP_APP_FLOWER),y)
+nfp-objs += \
+ flower/action.o \
+ flower/cmsg.o \
+ flower/main.o \
+ flower/match.o \
+ flower/metadata.o \
+ flower/offload.o
+endif
+
ifeq ($(CONFIG_BPF_SYSCALL),y)
nfp-objs += \
bpf/verifier.o \
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
new file mode 100644
index 000000000000..db9750695dc7
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@ -0,0 +1,211 @@
+/*
+ * Copyright (C) 2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/bitfield.h>
+#include <net/pkt_cls.h>
+#include <net/switchdev.h>
+#include <net/tc_act/tc_gact.h>
+#include <net/tc_act/tc_mirred.h>
+#include <net/tc_act/tc_vlan.h>
+
+#include "cmsg.h"
+#include "main.h"
+#include "../nfp_net_repr.h"
+
+static void nfp_fl_pop_vlan(struct nfp_fl_pop_vlan *pop_vlan)
+{
+ size_t act_size = sizeof(struct nfp_fl_pop_vlan);
+ u16 tmp_pop_vlan_op;
+
+ tmp_pop_vlan_op =
+ FIELD_PREP(NFP_FL_ACT_LEN_LW, act_size >> NFP_FL_LW_SIZ) |
+ FIELD_PREP(NFP_FL_ACT_JMP_ID, NFP_FL_ACTION_OPCODE_POP_VLAN);
+
+ pop_vlan->a_op = cpu_to_be16(tmp_pop_vlan_op);
+ pop_vlan->reserved = 0;
+}
+
+static void
+nfp_fl_push_vlan(struct nfp_fl_push_vlan *push_vlan,
+ const struct tc_action *action)
+{
+ size_t act_size = sizeof(struct nfp_fl_push_vlan);
+ struct tcf_vlan *vlan = to_vlan(action);
+ u16 tmp_push_vlan_tci;
+ u16 tmp_push_vlan_op;
+
+ tmp_push_vlan_op =
+ FIELD_PREP(NFP_FL_ACT_LEN_LW, act_size >> NFP_FL_LW_SIZ) |
+ FIELD_PREP(NFP_FL_ACT_JMP_ID, NFP_FL_ACTION_OPCODE_PUSH_VLAN);
+
+ push_vlan->a_op = cpu_to_be16(tmp_push_vlan_op);
+ /* Set action push vlan parameters. */
+ push_vlan->reserved = 0;
+ push_vlan->vlan_tpid = tcf_vlan_push_proto(action);
+
+ tmp_push_vlan_tci =
+ FIELD_PREP(NFP_FL_PUSH_VLAN_PRIO, vlan->tcfv_push_prio) |
+ FIELD_PREP(NFP_FL_PUSH_VLAN_VID, vlan->tcfv_push_vid) |
+ NFP_FL_PUSH_VLAN_CFI;
+ push_vlan->vlan_tci = cpu_to_be16(tmp_push_vlan_tci);
+}
+
+static int
+nfp_fl_output(struct nfp_fl_output *output, const struct tc_action *action,
+ struct nfp_fl_payload *nfp_flow, bool last,
+ struct net_device *in_dev)
+{
+ size_t act_size = sizeof(struct nfp_fl_output);
+ struct net_device *out_dev;
+ u16 tmp_output_op;
+ int ifindex;
+
+ /* Set action opcode to output action. */
+ tmp_output_op =
+ FIELD_PREP(NFP_FL_ACT_LEN_LW, act_size >> NFP_FL_LW_SIZ) |
+ FIELD_PREP(NFP_FL_ACT_JMP_ID, NFP_FL_ACTION_OPCODE_OUTPUT);
+
+ output->a_op = cpu_to_be16(tmp_output_op);
+
+ /* Set action output parameters. */
+ output->flags = cpu_to_be16(last ? NFP_FL_OUT_FLAGS_LAST : 0);
+
+ ifindex = tcf_mirred_ifindex(action);
+ out_dev = __dev_get_by_index(dev_net(in_dev), ifindex);
+ if (!out_dev)
+ return -EOPNOTSUPP;
+
+ /* Only offload egress ports are on the same device as the ingress
+ * port.
+ */
+ if (!switchdev_port_same_parent_id(in_dev, out_dev))
+ return -EOPNOTSUPP;
+
+ output->port = cpu_to_be32(nfp_repr_get_port_id(out_dev));
+ if (!output->port)
+ return -EOPNOTSUPP;
+
+ nfp_flow->meta.shortcut = output->port;
+
+ return 0;
+}
+
+static int
+nfp_flower_loop_action(const struct tc_action *a,
+ struct nfp_fl_payload *nfp_fl, int *a_len,
+ struct net_device *netdev)
+{
+ struct nfp_fl_push_vlan *psh_v;
+ struct nfp_fl_pop_vlan *pop_v;
+ struct nfp_fl_output *output;
+ int err;
+
+ if (is_tcf_gact_shot(a)) {
+ nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_DROP);
+ } else if (is_tcf_mirred_egress_redirect(a)) {
+ if (*a_len + sizeof(struct nfp_fl_output) > NFP_FL_MAX_A_SIZ)
+ return -EOPNOTSUPP;
+
+ output = (struct nfp_fl_output *)&nfp_fl->action_data[*a_len];
+ err = nfp_fl_output(output, a, nfp_fl, true, netdev);
+ if (err)
+ return err;
+
+ *a_len += sizeof(struct nfp_fl_output);
+ } else if (is_tcf_mirred_egress_mirror(a)) {
+ if (*a_len + sizeof(struct nfp_fl_output) > NFP_FL_MAX_A_SIZ)
+ return -EOPNOTSUPP;
+
+ output = (struct nfp_fl_output *)&nfp_fl->action_data[*a_len];
+ err = nfp_fl_output(output, a, nfp_fl, false, netdev);
+ if (err)
+ return err;
+
+ *a_len += sizeof(struct nfp_fl_output);
+ } else if (is_tcf_vlan(a) && tcf_vlan_action(a) == TCA_VLAN_ACT_POP) {
+ if (*a_len + sizeof(struct nfp_fl_pop_vlan) > NFP_FL_MAX_A_SIZ)
+ return -EOPNOTSUPP;
+
+ pop_v = (struct nfp_fl_pop_vlan *)&nfp_fl->action_data[*a_len];
+ nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_POPV);
+
+ nfp_fl_pop_vlan(pop_v);
+ *a_len += sizeof(struct nfp_fl_pop_vlan);
+ } else if (is_tcf_vlan(a) && tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) {
+ if (*a_len + sizeof(struct nfp_fl_push_vlan) > NFP_FL_MAX_A_SIZ)
+ return -EOPNOTSUPP;
+
+ psh_v = (struct nfp_fl_push_vlan *)&nfp_fl->action_data[*a_len];
+ nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
+
+ nfp_fl_push_vlan(psh_v, a);
+ *a_len += sizeof(struct nfp_fl_push_vlan);
+ } else {
+ /* Currently we do not handle any other actions. */
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+int nfp_flower_compile_action(struct tc_cls_flower_offload *flow,
+ struct net_device *netdev,
+ struct nfp_fl_payload *nfp_flow)
+{
+ int act_len, act_cnt, err;
+ const struct tc_action *a;
+ LIST_HEAD(actions);
+
+ memset(nfp_flow->action_data, 0, NFP_FL_MAX_A_SIZ);
+ nfp_flow->meta.act_len = 0;
+ act_len = 0;
+ act_cnt = 0;
+
+ tcf_exts_to_list(flow->exts, &actions);
+ list_for_each_entry(a, &actions, list) {
+ err = nfp_flower_loop_action(a, nfp_flow, &act_len, netdev);
+ if (err)
+ return err;
+ act_cnt++;
+ }
+
+ /* We optimise when the action list is small, this can unfortunately
+ * not happen once we have more than one action in the action list.
+ */
+ if (act_cnt > 1)
+ nfp_flow->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
+
+ nfp_flow->meta.act_len = act_len;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
new file mode 100644
index 000000000000..dd7fa9cf225f
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
@@ -0,0 +1,157 @@
+/*
+ * Copyright (C) 2015-2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <net/dst_metadata.h>
+
+#include "main.h"
+#include "../nfpcore/nfp_cpp.h"
+#include "../nfp_net_repr.h"
+#include "./cmsg.h"
+
+#define nfp_flower_cmsg_warn(app, fmt, args...) \
+ do { \
+ if (net_ratelimit()) \
+ nfp_warn((app)->cpp, fmt, ## args); \
+ } while (0)
+
+static struct nfp_flower_cmsg_hdr *
+nfp_flower_cmsg_get_hdr(struct sk_buff *skb)
+{
+ return (struct nfp_flower_cmsg_hdr *)skb->data;
+}
+
+struct sk_buff *
+nfp_flower_cmsg_alloc(struct nfp_app *app, unsigned int size,
+ enum nfp_flower_cmsg_type_port type)
+{
+ struct nfp_flower_cmsg_hdr *ch;
+ struct sk_buff *skb;
+
+ size += NFP_FLOWER_CMSG_HLEN;
+
+ skb = nfp_app_ctrl_msg_alloc(app, size, GFP_KERNEL);
+ if (!skb)
+ return NULL;
+
+ ch = nfp_flower_cmsg_get_hdr(skb);
+ ch->pad = 0;
+ ch->version = NFP_FLOWER_CMSG_VER1;
+ ch->type = type;
+ skb_put(skb, size);
+
+ return skb;
+}
+
+int nfp_flower_cmsg_portmod(struct nfp_repr *repr, bool carrier_ok)
+{
+ struct nfp_flower_cmsg_portmod *msg;
+ struct sk_buff *skb;
+
+ skb = nfp_flower_cmsg_alloc(repr->app, sizeof(*msg),
+ NFP_FLOWER_CMSG_TYPE_PORT_MOD);
+ if (!skb)
+ return -ENOMEM;
+
+ msg = nfp_flower_cmsg_get_data(skb);
+ msg->portnum = cpu_to_be32(repr->dst->u.port_info.port_id);
+ msg->reserved = 0;
+ msg->info = carrier_ok;
+ msg->mtu = cpu_to_be16(repr->netdev->mtu);
+
+ nfp_ctrl_tx(repr->app->ctrl, skb);
+
+ return 0;
+}
+
+static void
+nfp_flower_cmsg_portmod_rx(struct nfp_app *app, struct sk_buff *skb)
+{
+ struct nfp_flower_cmsg_portmod *msg;
+ struct net_device *netdev;
+ bool link;
+
+ msg = nfp_flower_cmsg_get_data(skb);
+ link = msg->info & NFP_FLOWER_CMSG_PORTMOD_INFO_LINK;
+
+ rcu_read_lock();
+ netdev = nfp_app_repr_get(app, be32_to_cpu(msg->portnum));
+ if (!netdev) {
+ nfp_flower_cmsg_warn(app, "ctrl msg for unknown port 0x%08x\n",
+ be32_to_cpu(msg->portnum));
+ rcu_read_unlock();
+ return;
+ }
+
+ if (link) {
+ netif_carrier_on(netdev);
+ rtnl_lock();
+ dev_set_mtu(netdev, be16_to_cpu(msg->mtu));
+ rtnl_unlock();
+ } else {
+ netif_carrier_off(netdev);
+ }
+ rcu_read_unlock();
+}
+
+void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb)
+{
+ struct nfp_flower_cmsg_hdr *cmsg_hdr;
+ enum nfp_flower_cmsg_type_port type;
+
+ cmsg_hdr = nfp_flower_cmsg_get_hdr(skb);
+
+ if (unlikely(cmsg_hdr->version != NFP_FLOWER_CMSG_VER1)) {
+ nfp_flower_cmsg_warn(app, "Cannot handle repr control version %u\n",
+ cmsg_hdr->version);
+ goto out;
+ }
+
+ type = cmsg_hdr->type;
+ switch (type) {
+ case NFP_FLOWER_CMSG_TYPE_PORT_MOD:
+ nfp_flower_cmsg_portmod_rx(app, skb);
+ break;
+ case NFP_FLOWER_CMSG_TYPE_FLOW_STATS:
+ nfp_flower_rx_flow_stats(app, skb);
+ break;
+ default:
+ nfp_flower_cmsg_warn(app, "Cannot handle invalid repr control type %u\n",
+ type);
+ }
+
+out:
+ dev_kfree_skb_any(skb);
+}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
new file mode 100644
index 000000000000..cf738de170ab
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
@@ -0,0 +1,317 @@
+/*
+ * Copyright (C) 2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef NFP_FLOWER_CMSG_H
+#define NFP_FLOWER_CMSG_H
+
+#include <linux/bitfield.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+
+#include "../nfp_app.h"
+
+#define NFP_FLOWER_LAYER_META BIT(0)
+#define NFP_FLOWER_LAYER_PORT BIT(1)
+#define NFP_FLOWER_LAYER_MAC BIT(2)
+#define NFP_FLOWER_LAYER_TP BIT(3)
+#define NFP_FLOWER_LAYER_IPV4 BIT(4)
+#define NFP_FLOWER_LAYER_IPV6 BIT(5)
+#define NFP_FLOWER_LAYER_CT BIT(6)
+#define NFP_FLOWER_LAYER_VXLAN BIT(7)
+
+#define NFP_FLOWER_LAYER_ETHER BIT(3)
+#define NFP_FLOWER_LAYER_ARP BIT(4)
+
+#define NFP_FLOWER_MASK_VLAN_PRIO GENMASK(15, 13)
+#define NFP_FLOWER_MASK_VLAN_CFI BIT(12)
+#define NFP_FLOWER_MASK_VLAN_VID GENMASK(11, 0)
+
+#define NFP_FL_SC_ACT_DROP 0x80000000
+#define NFP_FL_SC_ACT_USER 0x7D000000
+#define NFP_FL_SC_ACT_POPV 0x6A000000
+#define NFP_FL_SC_ACT_NULL 0x00000000
+
+/* The maximum action list size (in bytes) supported by the NFP.
+ */
+#define NFP_FL_MAX_A_SIZ 1216
+#define NFP_FL_LW_SIZ 2
+
+/* Action opcodes */
+#define NFP_FL_ACTION_OPCODE_OUTPUT 0
+#define NFP_FL_ACTION_OPCODE_PUSH_VLAN 1
+#define NFP_FL_ACTION_OPCODE_POP_VLAN 2
+#define NFP_FL_ACTION_OPCODE_NUM 32
+
+#define NFP_FL_ACT_JMP_ID GENMASK(15, 8)
+#define NFP_FL_ACT_LEN_LW GENMASK(7, 0)
+
+#define NFP_FL_OUT_FLAGS_LAST BIT(15)
+#define NFP_FL_OUT_FLAGS_USE_TUN BIT(4)
+#define NFP_FL_OUT_FLAGS_TYPE_IDX GENMASK(2, 0)
+
+#define NFP_FL_PUSH_VLAN_PRIO GENMASK(15, 13)
+#define NFP_FL_PUSH_VLAN_CFI BIT(12)
+#define NFP_FL_PUSH_VLAN_VID GENMASK(11, 0)
+
+struct nfp_fl_output {
+ __be16 a_op;
+ __be16 flags;
+ __be32 port;
+};
+
+struct nfp_fl_push_vlan {
+ __be16 a_op;
+ __be16 reserved;
+ __be16 vlan_tpid;
+ __be16 vlan_tci;
+};
+
+struct nfp_fl_pop_vlan {
+ __be16 a_op;
+ __be16 reserved;
+};
+
+/* Metadata without L2 (1W/4B)
+ * ----------------------------------------------------------------
+ * 3 2 1
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | key_layers | mask_id | reserved |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+struct nfp_flower_meta_one {
+ u8 nfp_flow_key_layer;
+ u8 mask_id;
+ u16 reserved;
+};
+
+/* Metadata with L2 (1W/4B)
+ * ----------------------------------------------------------------
+ * 3 2 1
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | key_type | mask_id | PCP |p| vlan outermost VID |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * ^ ^
+ * NOTE: | TCI |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+struct nfp_flower_meta_two {
+ u8 nfp_flow_key_layer;
+ u8 mask_id;
+ __be16 tci;
+};
+
+/* Port details (1W/4B)
+ * ----------------------------------------------------------------
+ * 3 2 1
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | port_ingress |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+struct nfp_flower_in_port {
+ __be32 in_port;
+};
+
+/* L2 details (4W/16B)
+ * 3 2 1
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | mac_addr_dst, 31 - 0 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | mac_addr_dst, 47 - 32 | mac_addr_src, 15 - 0 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | mac_addr_src, 47 - 16 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | mpls outermost label | TC |B| reserved |q|
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+struct nfp_flower_mac_mpls {
+ u8 mac_dst[6];
+ u8 mac_src[6];
+ __be32 mpls_lse;
+};
+
+/* L4 ports (for UDP, TCP, SCTP) (1W/4B)
+ * 3 2 1
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | port_src | port_dst |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+struct nfp_flower_tp_ports {
+ __be16 port_src;
+ __be16 port_dst;
+};
+
+/* L3 IPv4 details (3W/12B)
+ * 3 2 1
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | DSCP |ECN| protocol | reserved |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv4_addr_src |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv4_addr_dst |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+struct nfp_flower_ipv4 {
+ u8 tos;
+ u8 proto;
+ u8 ttl;
+ u8 reserved;
+ __be32 ipv4_src;
+ __be32 ipv4_dst;
+};
+
+/* L3 IPv6 details (10W/40B)
+ * 3 2 1
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | DSCP |ECN| protocol | reserved |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv6_exthdr | res | ipv6_flow_label |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv6_addr_src, 31 - 0 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv6_addr_src, 63 - 32 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv6_addr_src, 95 - 64 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv6_addr_src, 127 - 96 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv6_addr_dst, 31 - 0 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv6_addr_dst, 63 - 32 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv6_addr_dst, 95 - 64 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv6_addr_dst, 127 - 96 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+struct nfp_flower_ipv6 {
+ u8 tos;
+ u8 proto;
+ u8 ttl;
+ u8 reserved;
+ __be32 ipv6_flow_label_exthdr;
+ struct in6_addr ipv6_src;
+ struct in6_addr ipv6_dst;
+};
+
+/* The base header for a control message packet.
+ * Defines an 8-bit version, and an 8-bit type, padded
+ * to a 32-bit word. Rest of the packet is type-specific.
+ */
+struct nfp_flower_cmsg_hdr {
+ __be16 pad;
+ u8 type;
+ u8 version;
+};
+
+#define NFP_FLOWER_CMSG_HLEN sizeof(struct nfp_flower_cmsg_hdr)
+#define NFP_FLOWER_CMSG_VER1 1
+
+/* Types defined for port related control messages */
+enum nfp_flower_cmsg_type_port {
+ NFP_FLOWER_CMSG_TYPE_FLOW_ADD = 0,
+ NFP_FLOWER_CMSG_TYPE_FLOW_DEL = 2,
+ NFP_FLOWER_CMSG_TYPE_PORT_MOD = 8,
+ NFP_FLOWER_CMSG_TYPE_FLOW_STATS = 15,
+ NFP_FLOWER_CMSG_TYPE_PORT_ECHO = 16,
+ NFP_FLOWER_CMSG_TYPE_MAX = 32,
+};
+
+/* NFP_FLOWER_CMSG_TYPE_PORT_MOD */
+struct nfp_flower_cmsg_portmod {
+ __be32 portnum;
+ u8 reserved;
+ u8 info;
+ __be16 mtu;
+};
+
+#define NFP_FLOWER_CMSG_PORTMOD_INFO_LINK BIT(0)
+
+enum nfp_flower_cmsg_port_type {
+ NFP_FLOWER_CMSG_PORT_TYPE_UNSPEC = 0x0,
+ NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT = 0x1,
+ NFP_FLOWER_CMSG_PORT_TYPE_PCIE_PORT = 0x2,
+};
+
+enum nfp_flower_cmsg_port_vnic_type {
+ NFP_FLOWER_CMSG_PORT_VNIC_TYPE_VF = 0x0,
+ NFP_FLOWER_CMSG_PORT_VNIC_TYPE_PF = 0x1,
+ NFP_FLOWER_CMSG_PORT_VNIC_TYPE_CTRL = 0x2,
+};
+
+#define NFP_FLOWER_CMSG_PORT_TYPE GENMASK(31, 28)
+#define NFP_FLOWER_CMSG_PORT_SYS_ID GENMASK(27, 24)
+#define NFP_FLOWER_CMSG_PORT_NFP_ID GENMASK(23, 22)
+#define NFP_FLOWER_CMSG_PORT_PCI GENMASK(15, 14)
+#define NFP_FLOWER_CMSG_PORT_VNIC_TYPE GENMASK(13, 12)
+#define NFP_FLOWER_CMSG_PORT_VNIC GENMASK(11, 6)
+#define NFP_FLOWER_CMSG_PORT_PCIE_Q GENMASK(5, 0)
+#define NFP_FLOWER_CMSG_PORT_PHYS_PORT_NUM GENMASK(7, 0)
+
+static inline u32 nfp_flower_cmsg_phys_port(u8 phys_port)
+{
+ return FIELD_PREP(NFP_FLOWER_CMSG_PORT_PHYS_PORT_NUM, phys_port) |
+ FIELD_PREP(NFP_FLOWER_CMSG_PORT_TYPE,
+ NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT);
+}
+
+static inline u32
+nfp_flower_cmsg_pcie_port(u8 nfp_pcie, enum nfp_flower_cmsg_port_vnic_type type,
+ u8 vnic, u8 q)
+{
+ return FIELD_PREP(NFP_FLOWER_CMSG_PORT_PCI, nfp_pcie) |
+ FIELD_PREP(NFP_FLOWER_CMSG_PORT_VNIC_TYPE, type) |
+ FIELD_PREP(NFP_FLOWER_CMSG_PORT_VNIC, vnic) |
+ FIELD_PREP(NFP_FLOWER_CMSG_PORT_PCIE_Q, q) |
+ FIELD_PREP(NFP_FLOWER_CMSG_PORT_TYPE,
+ NFP_FLOWER_CMSG_PORT_TYPE_PCIE_PORT);
+}
+
+static inline void *nfp_flower_cmsg_get_data(struct sk_buff *skb)
+{
+ return (unsigned char *)skb->data + NFP_FLOWER_CMSG_HLEN;
+}
+
+int nfp_flower_cmsg_portmod(struct nfp_repr *repr, bool carrier_ok);
+void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb);
+struct sk_buff *
+nfp_flower_cmsg_alloc(struct nfp_app *app, unsigned int size,
+ enum nfp_flower_cmsg_type_port type);
+
+#endif
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c
new file mode 100644
index 000000000000..5fe6d3582597
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.c
@@ -0,0 +1,391 @@
+/*
+ * Copyright (C) 2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/pci.h>
+#include <linux/skbuff.h>
+#include <linux/vmalloc.h>
+#include <net/devlink.h>
+#include <net/dst_metadata.h>
+
+#include "main.h"
+#include "../nfpcore/nfp_cpp.h"
+#include "../nfpcore/nfp_nffw.h"
+#include "../nfpcore/nfp_nsp.h"
+#include "../nfp_app.h"
+#include "../nfp_main.h"
+#include "../nfp_net.h"
+#include "../nfp_net_repr.h"
+#include "../nfp_port.h"
+#include "./cmsg.h"
+
+#define NFP_FLOWER_ALLOWED_VER 0x0001000000010000UL
+
+static const char *nfp_flower_extra_cap(struct nfp_app *app, struct nfp_net *nn)
+{
+ return "FLOWER";
+}
+
+static enum devlink_eswitch_mode eswitch_mode_get(struct nfp_app *app)
+{
+ return DEVLINK_ESWITCH_MODE_SWITCHDEV;
+}
+
+static enum nfp_repr_type
+nfp_flower_repr_get_type_and_port(struct nfp_app *app, u32 port_id, u8 *port)
+{
+ switch (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE, port_id)) {
+ case NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT:
+ *port = FIELD_GET(NFP_FLOWER_CMSG_PORT_PHYS_PORT_NUM,
+ port_id);
+ return NFP_REPR_TYPE_PHYS_PORT;
+
+ case NFP_FLOWER_CMSG_PORT_TYPE_PCIE_PORT:
+ *port = FIELD_GET(NFP_FLOWER_CMSG_PORT_VNIC, port_id);
+ if (FIELD_GET(NFP_FLOWER_CMSG_PORT_VNIC_TYPE, port_id) ==
+ NFP_FLOWER_CMSG_PORT_VNIC_TYPE_PF)
+ return NFP_REPR_TYPE_PF;
+ else
+ return NFP_REPR_TYPE_VF;
+ }
+
+ return NFP_FLOWER_CMSG_PORT_TYPE_UNSPEC;
+}
+
+static struct net_device *
+nfp_flower_repr_get(struct nfp_app *app, u32 port_id)
+{
+ enum nfp_repr_type repr_type;
+ struct nfp_reprs *reprs;
+ u8 port = 0;
+
+ repr_type = nfp_flower_repr_get_type_and_port(app, port_id, &port);
+
+ reprs = rcu_dereference(app->reprs[repr_type]);
+ if (!reprs)
+ return NULL;
+
+ if (port >= reprs->num_reprs)
+ return NULL;
+
+ return reprs->reprs[port];
+}
+
+static int
+nfp_flower_repr_netdev_open(struct nfp_app *app, struct nfp_repr *repr)
+{
+ int err;
+
+ err = nfp_flower_cmsg_portmod(repr, true);
+ if (err)
+ return err;
+
+ netif_carrier_on(repr->netdev);
+ netif_tx_wake_all_queues(repr->netdev);
+
+ return 0;
+}
+
+static int
+nfp_flower_repr_netdev_stop(struct nfp_app *app, struct nfp_repr *repr)
+{
+ netif_carrier_off(repr->netdev);
+ netif_tx_disable(repr->netdev);
+
+ return nfp_flower_cmsg_portmod(repr, false);
+}
+
+static void nfp_flower_sriov_disable(struct nfp_app *app)
+{
+ nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_VF);
+}
+
+static int
+nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
+ enum nfp_flower_cmsg_port_vnic_type vnic_type,
+ enum nfp_repr_type repr_type, unsigned int cnt)
+{
+ u8 nfp_pcie = nfp_cppcore_pcie_unit(app->pf->cpp);
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_reprs *reprs, *old_reprs;
+ enum nfp_port_type port_type;
+ const u8 queue = 0;
+ int i, err;
+
+ port_type = repr_type == NFP_REPR_TYPE_PF ? NFP_PORT_PF_PORT :
+ NFP_PORT_VF_PORT;
+
+ reprs = nfp_reprs_alloc(cnt);
+ if (!reprs)
+ return -ENOMEM;
+
+ for (i = 0; i < cnt; i++) {
+ struct nfp_port *port;
+ u32 port_id;
+
+ reprs->reprs[i] = nfp_repr_alloc(app);
+ if (!reprs->reprs[i]) {
+ err = -ENOMEM;
+ goto err_reprs_clean;
+ }
+
+ port = nfp_port_alloc(app, port_type, reprs->reprs[i]);
+ if (repr_type == NFP_REPR_TYPE_PF) {
+ port->pf_id = i;
+ } else {
+ port->pf_id = 0; /* For now we only support 1 PF */
+ port->vf_id = i;
+ }
+
+ eth_hw_addr_random(reprs->reprs[i]);
+
+ port_id = nfp_flower_cmsg_pcie_port(nfp_pcie, vnic_type,
+ i, queue);
+ err = nfp_repr_init(app, reprs->reprs[i],
+ port_id, port, priv->nn->dp.netdev);
+ if (err) {
+ nfp_port_free(port);
+ goto err_reprs_clean;
+ }
+
+ nfp_info(app->cpp, "%s%d Representor(%s) created\n",
+ repr_type == NFP_REPR_TYPE_PF ? "PF" : "VF", i,
+ reprs->reprs[i]->name);
+ }
+
+ old_reprs = nfp_app_reprs_set(app, repr_type, reprs);
+ if (IS_ERR(old_reprs)) {
+ err = PTR_ERR(old_reprs);
+ goto err_reprs_clean;
+ }
+
+ return 0;
+err_reprs_clean:
+ nfp_reprs_clean_and_free(reprs);
+ return err;
+}
+
+static int nfp_flower_sriov_enable(struct nfp_app *app, int num_vfs)
+{
+ return nfp_flower_spawn_vnic_reprs(app,
+ NFP_FLOWER_CMSG_PORT_VNIC_TYPE_VF,
+ NFP_REPR_TYPE_VF, num_vfs);
+}
+
+static void nfp_flower_stop(struct nfp_app *app)
+{
+ nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PF);
+ nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PHYS_PORT);
+
+}
+
+static int
+nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
+{
+ struct nfp_eth_table *eth_tbl = app->pf->eth_tbl;
+ struct nfp_reprs *reprs, *old_reprs;
+ unsigned int i;
+ int err;
+
+ reprs = nfp_reprs_alloc(eth_tbl->max_index + 1);
+ if (!reprs)
+ return -ENOMEM;
+
+ for (i = 0; i < eth_tbl->count; i++) {
+ int phys_port = eth_tbl->ports[i].index;
+ struct nfp_port *port;
+ u32 cmsg_port_id;
+
+ reprs->reprs[phys_port] = nfp_repr_alloc(app);
+ if (!reprs->reprs[phys_port]) {
+ err = -ENOMEM;
+ goto err_reprs_clean;
+ }
+
+ port = nfp_port_alloc(app, NFP_PORT_PHYS_PORT,
+ reprs->reprs[phys_port]);
+ if (IS_ERR(port)) {
+ err = PTR_ERR(port);
+ goto err_reprs_clean;
+ }
+ err = nfp_port_init_phy_port(app->pf, app, port, i);
+ if (err) {
+ nfp_port_free(port);
+ goto err_reprs_clean;
+ }
+
+ SET_NETDEV_DEV(reprs->reprs[phys_port], &priv->nn->pdev->dev);
+ nfp_net_get_mac_addr(app->pf, port,
+ eth_tbl->ports[i].eth_index);
+
+ cmsg_port_id = nfp_flower_cmsg_phys_port(phys_port);
+ err = nfp_repr_init(app, reprs->reprs[phys_port],
+ cmsg_port_id, port, priv->nn->dp.netdev);
+ if (err) {
+ nfp_port_free(port);
+ goto err_reprs_clean;
+ }
+
+ nfp_info(app->cpp, "Phys Port %d Representor(%s) created\n",
+ phys_port, reprs->reprs[phys_port]->name);
+ }
+
+ old_reprs = nfp_app_reprs_set(app, NFP_REPR_TYPE_PHYS_PORT, reprs);
+ if (IS_ERR(old_reprs)) {
+ err = PTR_ERR(old_reprs);
+ goto err_reprs_clean;
+ }
+
+ return 0;
+err_reprs_clean:
+ nfp_reprs_clean_and_free(reprs);
+ return err;
+}
+
+static int nfp_flower_start(struct nfp_app *app)
+{
+ int err;
+
+ err = nfp_flower_spawn_phy_reprs(app, app->priv);
+ if (err)
+ return err;
+
+ return nfp_flower_spawn_vnic_reprs(app,
+ NFP_FLOWER_CMSG_PORT_VNIC_TYPE_PF,
+ NFP_REPR_TYPE_PF, 1);
+}
+
+static int nfp_flower_vnic_init(struct nfp_app *app, struct nfp_net *nn,
+ unsigned int id)
+{
+ struct nfp_flower_priv *priv = app->priv;
+
+ if (id > 0) {
+ nfp_warn(app->cpp, "FlowerNIC doesn't support more than one data vNIC\n");
+ goto err_invalid_port;
+ }
+
+ priv->nn = nn;
+
+ eth_hw_addr_random(nn->dp.netdev);
+ netif_keep_dst(nn->dp.netdev);
+
+ return 0;
+
+err_invalid_port:
+ nn->port = nfp_port_alloc(app, NFP_PORT_INVALID, nn->dp.netdev);
+ return PTR_ERR_OR_ZERO(nn->port);
+}
+
+static int nfp_flower_init(struct nfp_app *app)
+{
+ const struct nfp_pf *pf = app->pf;
+ u64 version;
+ int err;
+
+ if (!pf->eth_tbl) {
+ nfp_warn(app->cpp, "FlowerNIC requires eth table\n");
+ return -EINVAL;
+ }
+
+ if (!pf->mac_stats_bar) {
+ nfp_warn(app->cpp, "FlowerNIC requires mac_stats BAR\n");
+ return -EINVAL;
+ }
+
+ if (!pf->vf_cfg_bar) {
+ nfp_warn(app->cpp, "FlowerNIC requires vf_cfg BAR\n");
+ return -EINVAL;
+ }
+
+ version = nfp_rtsym_read_le(app->pf->rtbl, "hw_flower_version", &err);
+ if (err) {
+ nfp_warn(app->cpp, "FlowerNIC requires hw_flower_version memory symbol\n");
+ return err;
+ }
+
+ /* We need to ensure hardware has enough flower capabilities. */
+ if (version != NFP_FLOWER_ALLOWED_VER) {
+ nfp_warn(app->cpp, "FlowerNIC: unsupported firmware version\n");
+ return -EINVAL;
+ }
+
+ app->priv = vzalloc(sizeof(struct nfp_flower_priv));
+ if (!app->priv)
+ return -ENOMEM;
+
+ err = nfp_flower_metadata_init(app);
+ if (err)
+ goto err_free_app_priv;
+
+ return 0;
+
+err_free_app_priv:
+ vfree(app->priv);
+ return err;
+}
+
+static void nfp_flower_clean(struct nfp_app *app)
+{
+ vfree(app->priv);
+ app->priv = NULL;
+}
+
+const struct nfp_app_type app_flower = {
+ .id = NFP_APP_FLOWER_NIC,
+ .name = "flower",
+ .ctrl_has_meta = true,
+
+ .extra_cap = nfp_flower_extra_cap,
+
+ .init = nfp_flower_init,
+ .clean = nfp_flower_clean,
+
+ .vnic_init = nfp_flower_vnic_init,
+
+ .repr_open = nfp_flower_repr_netdev_open,
+ .repr_stop = nfp_flower_repr_netdev_stop,
+
+ .start = nfp_flower_start,
+ .stop = nfp_flower_stop,
+
+ .ctrl_msg_rx = nfp_flower_cmsg_rx,
+
+ .sriov_enable = nfp_flower_sriov_enable,
+ .sriov_disable = nfp_flower_sriov_disable,
+
+ .eswitch_mode_get = eswitch_mode_get,
+ .repr_get = nfp_flower_repr_get,
+
+ .setup_tc = nfp_flower_setup_tc,
+};
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h
new file mode 100644
index 000000000000..9e64c048e83f
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.h
@@ -0,0 +1,159 @@
+/*
+ * Copyright (C) 2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __NFP_FLOWER_H__
+#define __NFP_FLOWER_H__ 1
+
+#include <linux/circ_buf.h>
+#include <linux/hashtable.h>
+#include <linux/time64.h>
+#include <linux/types.h>
+
+struct tc_to_netdev;
+struct net_device;
+struct nfp_app;
+
+#define NFP_FL_STATS_ENTRY_RS BIT(20)
+#define NFP_FL_STATS_ELEM_RS 4
+#define NFP_FL_REPEATED_HASH_MAX BIT(17)
+#define NFP_FLOWER_HASH_BITS 19
+#define NFP_FLOWER_MASK_ENTRY_RS 256
+#define NFP_FLOWER_MASK_ELEMENT_RS 1
+#define NFP_FLOWER_MASK_HASH_BITS 10
+
+#define NFP_FL_META_FLAG_NEW_MASK 128
+#define NFP_FL_META_FLAG_LAST_MASK 1
+
+#define NFP_FL_MASK_REUSE_TIME_NS 40000
+#define NFP_FL_MASK_ID_LOCATION 1
+
+struct nfp_fl_mask_id {
+ struct circ_buf mask_id_free_list;
+ struct timespec64 *last_used;
+ u8 init_unallocated;
+};
+
+struct nfp_fl_stats_id {
+ struct circ_buf free_list;
+ u32 init_unalloc;
+ u8 repeated_em_count;
+};
+
+/**
+ * struct nfp_flower_priv - Flower APP per-vNIC priv data
+ * @nn: Pointer to vNIC
+ * @mask_id_seed: Seed used for mask hash table
+ * @flower_version: HW version of flower
+ * @stats_ids: List of free stats ids
+ * @mask_ids: List of free mask ids
+ * @mask_table: Hash table used to store masks
+ * @flow_table: Hash table used to store flower rules
+ */
+struct nfp_flower_priv {
+ struct nfp_net *nn;
+ u32 mask_id_seed;
+ u64 flower_version;
+ struct nfp_fl_stats_id stats_ids;
+ struct nfp_fl_mask_id mask_ids;
+ DECLARE_HASHTABLE(mask_table, NFP_FLOWER_MASK_HASH_BITS);
+ DECLARE_HASHTABLE(flow_table, NFP_FLOWER_HASH_BITS);
+};
+
+struct nfp_fl_key_ls {
+ u32 key_layer_two;
+ u8 key_layer;
+ int key_size;
+};
+
+struct nfp_fl_rule_metadata {
+ u8 key_len;
+ u8 mask_len;
+ u8 act_len;
+ u8 flags;
+ __be32 host_ctx_id;
+ __be64 host_cookie __packed;
+ __be64 flow_version __packed;
+ __be32 shortcut;
+};
+
+struct nfp_fl_stats {
+ u64 pkts;
+ u64 bytes;
+ u64 used;
+};
+
+struct nfp_fl_payload {
+ struct nfp_fl_rule_metadata meta;
+ unsigned long tc_flower_cookie;
+ struct hlist_node link;
+ struct rcu_head rcu;
+ spinlock_t lock; /* lock stats */
+ struct nfp_fl_stats stats;
+ char *unmasked_data;
+ char *mask_data;
+ char *action_data;
+};
+
+struct nfp_fl_stats_frame {
+ __be32 stats_con_id;
+ __be32 pkt_count;
+ __be64 byte_count;
+ __be64 stats_cookie;
+};
+
+int nfp_flower_metadata_init(struct nfp_app *app);
+void nfp_flower_metadata_cleanup(struct nfp_app *app);
+
+int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev,
+ u32 handle, __be16 proto, struct tc_to_netdev *tc);
+int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
+ struct nfp_fl_key_ls *key_ls,
+ struct net_device *netdev,
+ struct nfp_fl_payload *nfp_flow);
+int nfp_flower_compile_action(struct tc_cls_flower_offload *flow,
+ struct net_device *netdev,
+ struct nfp_fl_payload *nfp_flow);
+int nfp_compile_flow_metadata(struct nfp_app *app,
+ struct tc_cls_flower_offload *flow,
+ struct nfp_fl_payload *nfp_flow);
+int nfp_modify_flow_metadata(struct nfp_app *app,
+ struct nfp_fl_payload *nfp_flow);
+
+struct nfp_fl_payload *
+nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie);
+struct nfp_fl_payload *
+nfp_flower_remove_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie);
+
+void nfp_flower_rx_flow_stats(struct nfp_app *app, struct sk_buff *skb);
+
+#endif
diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c
new file mode 100644
index 000000000000..0e08404480ef
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/flower/match.c
@@ -0,0 +1,292 @@
+/*
+ * Copyright (C) 2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/bitfield.h>
+#include <net/pkt_cls.h>
+
+#include "cmsg.h"
+#include "main.h"
+
+static void
+nfp_flower_compile_meta_tci(struct nfp_flower_meta_two *frame,
+ struct tc_cls_flower_offload *flow, u8 key_type,
+ bool mask_version)
+{
+ struct flow_dissector_key_vlan *flow_vlan;
+ u16 tmp_tci;
+
+ /* Populate the metadata frame. */
+ frame->nfp_flow_key_layer = key_type;
+ frame->mask_id = ~0;
+
+ if (mask_version) {
+ frame->tci = cpu_to_be16(~0);
+ return;
+ }
+
+ flow_vlan = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_VLAN,
+ flow->key);
+
+ /* Populate the tci field. */
+ if (!flow_vlan->vlan_id) {
+ tmp_tci = 0;
+ } else {
+ tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
+ flow_vlan->vlan_priority) |
+ FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
+ flow_vlan->vlan_id) |
+ NFP_FLOWER_MASK_VLAN_CFI;
+ }
+ frame->tci = cpu_to_be16(tmp_tci);
+}
+
+static void
+nfp_flower_compile_meta(struct nfp_flower_meta_one *frame, u8 key_type)
+{
+ frame->nfp_flow_key_layer = key_type;
+ frame->mask_id = 0;
+ frame->reserved = 0;
+}
+
+static int
+nfp_flower_compile_port(struct nfp_flower_in_port *frame, u32 cmsg_port,
+ bool mask_version)
+{
+ if (mask_version) {
+ frame->in_port = cpu_to_be32(~0);
+ return 0;
+ }
+
+ frame->in_port = cpu_to_be32(cmsg_port);
+
+ return 0;
+}
+
+static void
+nfp_flower_compile_mac(struct nfp_flower_mac_mpls *frame,
+ struct tc_cls_flower_offload *flow,
+ bool mask_version)
+{
+ struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
+ struct flow_dissector_key_eth_addrs *flow_mac;
+
+ flow_mac = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_ETH_ADDRS,
+ target);
+
+ memset(frame, 0, sizeof(struct nfp_flower_mac_mpls));
+
+ /* Populate mac frame. */
+ ether_addr_copy(frame->mac_dst, &flow_mac->dst[0]);
+ ether_addr_copy(frame->mac_src, &flow_mac->src[0]);
+
+ if (mask_version)
+ frame->mpls_lse = cpu_to_be32(~0);
+}
+
+static void
+nfp_flower_compile_tport(struct nfp_flower_tp_ports *frame,
+ struct tc_cls_flower_offload *flow,
+ bool mask_version)
+{
+ struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
+ struct flow_dissector_key_ports *flow_tp;
+
+ flow_tp = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_PORTS,
+ target);
+
+ frame->port_src = flow_tp->src;
+ frame->port_dst = flow_tp->dst;
+}
+
+static void
+nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *frame,
+ struct tc_cls_flower_offload *flow,
+ bool mask_version)
+{
+ struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
+ struct flow_dissector_key_ipv4_addrs *flow_ipv4;
+ struct flow_dissector_key_basic *flow_basic;
+
+ flow_ipv4 = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_IPV4_ADDRS,
+ target);
+
+ flow_basic = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_BASIC,
+ target);
+
+ /* Populate IPv4 frame. */
+ frame->reserved = 0;
+ frame->ipv4_src = flow_ipv4->src;
+ frame->ipv4_dst = flow_ipv4->dst;
+ frame->proto = flow_basic->ip_proto;
+ /* Wildcard TOS/TTL for now. */
+ frame->tos = 0;
+ frame->ttl = 0;
+}
+
+static void
+nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *frame,
+ struct tc_cls_flower_offload *flow,
+ bool mask_version)
+{
+ struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
+ struct flow_dissector_key_ipv6_addrs *flow_ipv6;
+ struct flow_dissector_key_basic *flow_basic;
+
+ flow_ipv6 = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_IPV6_ADDRS,
+ target);
+
+ flow_basic = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_BASIC,
+ target);
+
+ /* Populate IPv6 frame. */
+ frame->reserved = 0;
+ frame->ipv6_src = flow_ipv6->src;
+ frame->ipv6_dst = flow_ipv6->dst;
+ frame->proto = flow_basic->ip_proto;
+ /* Wildcard LABEL/TOS/TTL for now. */
+ frame->ipv6_flow_label_exthdr = 0;
+ frame->tos = 0;
+ frame->ttl = 0;
+}
+
+int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
+ struct nfp_fl_key_ls *key_ls,
+ struct net_device *netdev,
+ struct nfp_fl_payload *nfp_flow)
+{
+ int err;
+ u8 *ext;
+ u8 *msk;
+
+ memset(nfp_flow->unmasked_data, 0, key_ls->key_size);
+ memset(nfp_flow->mask_data, 0, key_ls->key_size);
+
+ ext = nfp_flow->unmasked_data;
+ msk = nfp_flow->mask_data;
+ if (NFP_FLOWER_LAYER_PORT & key_ls->key_layer) {
+ /* Populate Exact Metadata. */
+ nfp_flower_compile_meta_tci((struct nfp_flower_meta_two *)ext,
+ flow, key_ls->key_layer, false);
+ /* Populate Mask Metadata. */
+ nfp_flower_compile_meta_tci((struct nfp_flower_meta_two *)msk,
+ flow, key_ls->key_layer, true);
+ ext += sizeof(struct nfp_flower_meta_two);
+ msk += sizeof(struct nfp_flower_meta_two);
+
+ /* Populate Exact Port data. */
+ err = nfp_flower_compile_port((struct nfp_flower_in_port *)ext,
+ nfp_repr_get_port_id(netdev),
+ false);
+ if (err)
+ return err;
+
+ /* Populate Mask Port Data. */
+ err = nfp_flower_compile_port((struct nfp_flower_in_port *)msk,
+ nfp_repr_get_port_id(netdev),
+ true);
+ if (err)
+ return err;
+
+ ext += sizeof(struct nfp_flower_in_port);
+ msk += sizeof(struct nfp_flower_in_port);
+ } else {
+ /* Populate Exact Metadata. */
+ nfp_flower_compile_meta((struct nfp_flower_meta_one *)ext,
+ key_ls->key_layer);
+ /* Populate Mask Metadata. */
+ nfp_flower_compile_meta((struct nfp_flower_meta_one *)msk,
+ key_ls->key_layer);
+ ext += sizeof(struct nfp_flower_meta_one);
+ msk += sizeof(struct nfp_flower_meta_one);
+ }
+
+ if (NFP_FLOWER_LAYER_META & key_ls->key_layer) {
+ /* Additional Metadata Fields.
+ * Currently unsupported.
+ */
+ return -EOPNOTSUPP;
+ }
+
+ if (NFP_FLOWER_LAYER_MAC & key_ls->key_layer) {
+ /* Populate Exact MAC Data. */
+ nfp_flower_compile_mac((struct nfp_flower_mac_mpls *)ext,
+ flow, false);
+ /* Populate Mask MAC Data. */
+ nfp_flower_compile_mac((struct nfp_flower_mac_mpls *)msk,
+ flow, true);
+ ext += sizeof(struct nfp_flower_mac_mpls);
+ msk += sizeof(struct nfp_flower_mac_mpls);
+ }
+
+ if (NFP_FLOWER_LAYER_TP & key_ls->key_layer) {
+ /* Populate Exact TP Data. */
+ nfp_flower_compile_tport((struct nfp_flower_tp_ports *)ext,
+ flow, false);
+ /* Populate Mask TP Data. */
+ nfp_flower_compile_tport((struct nfp_flower_tp_ports *)msk,
+ flow, true);
+ ext += sizeof(struct nfp_flower_tp_ports);
+ msk += sizeof(struct nfp_flower_tp_ports);
+ }
+
+ if (NFP_FLOWER_LAYER_IPV4 & key_ls->key_layer) {
+ /* Populate Exact IPv4 Data. */
+ nfp_flower_compile_ipv4((struct nfp_flower_ipv4 *)ext,
+ flow, false);
+ /* Populate Mask IPv4 Data. */
+ nfp_flower_compile_ipv4((struct nfp_flower_ipv4 *)msk,
+ flow, true);
+ ext += sizeof(struct nfp_flower_ipv4);
+ msk += sizeof(struct nfp_flower_ipv4);
+ }
+
+ if (NFP_FLOWER_LAYER_IPV6 & key_ls->key_layer) {
+ /* Populate Exact IPv4 Data. */
+ nfp_flower_compile_ipv6((struct nfp_flower_ipv6 *)ext,
+ flow, false);
+ /* Populate Mask IPv4 Data. */
+ nfp_flower_compile_ipv6((struct nfp_flower_ipv6 *)msk,
+ flow, true);
+ ext += sizeof(struct nfp_flower_ipv6);
+ msk += sizeof(struct nfp_flower_ipv6);
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
new file mode 100644
index 000000000000..fec0ff2ca94f
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
@@ -0,0 +1,438 @@
+/*
+ * Copyright (C) 2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/hash.h>
+#include <linux/hashtable.h>
+#include <linux/jhash.h>
+#include <linux/vmalloc.h>
+#include <net/pkt_cls.h>
+
+#include "cmsg.h"
+#include "main.h"
+#include "../nfp_app.h"
+
+struct nfp_mask_id_table {
+ struct hlist_node link;
+ u32 hash_key;
+ u32 ref_cnt;
+ u8 mask_id;
+};
+
+static int nfp_release_stats_entry(struct nfp_app *app, u32 stats_context_id)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ struct circ_buf *ring;
+
+ ring = &priv->stats_ids.free_list;
+ /* Check if buffer is full. */
+ if (!CIRC_SPACE(ring->head, ring->tail, NFP_FL_STATS_ENTRY_RS *
+ NFP_FL_STATS_ELEM_RS -
+ NFP_FL_STATS_ELEM_RS + 1))
+ return -ENOBUFS;
+
+ memcpy(&ring->buf[ring->head], &stats_context_id, NFP_FL_STATS_ELEM_RS);
+ ring->head = (ring->head + NFP_FL_STATS_ELEM_RS) %
+ (NFP_FL_STATS_ENTRY_RS * NFP_FL_STATS_ELEM_RS);
+
+ return 0;
+}
+
+static int nfp_get_stats_entry(struct nfp_app *app, u32 *stats_context_id)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ u32 freed_stats_id, temp_stats_id;
+ struct circ_buf *ring;
+
+ ring = &priv->stats_ids.free_list;
+ freed_stats_id = NFP_FL_STATS_ENTRY_RS;
+ /* Check for unallocated entries first. */
+ if (priv->stats_ids.init_unalloc > 0) {
+ *stats_context_id = priv->stats_ids.init_unalloc - 1;
+ priv->stats_ids.init_unalloc--;
+ return 0;
+ }
+
+ /* Check if buffer is empty. */
+ if (ring->head == ring->tail) {
+ *stats_context_id = freed_stats_id;
+ return -ENOENT;
+ }
+
+ memcpy(&temp_stats_id, &ring->buf[ring->tail], NFP_FL_STATS_ELEM_RS);
+ *stats_context_id = temp_stats_id;
+ memcpy(&ring->buf[ring->tail], &freed_stats_id, NFP_FL_STATS_ELEM_RS);
+ ring->tail = (ring->tail + NFP_FL_STATS_ELEM_RS) %
+ (NFP_FL_STATS_ENTRY_RS * NFP_FL_STATS_ELEM_RS);
+
+ return 0;
+}
+
+/* Must be called with either RTNL or rcu_read_lock */
+struct nfp_fl_payload *
+nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_fl_payload *flower_entry;
+
+ hash_for_each_possible_rcu(priv->flow_table, flower_entry, link,
+ tc_flower_cookie)
+ if (flower_entry->tc_flower_cookie == tc_flower_cookie)
+ return flower_entry;
+
+ return NULL;
+}
+
+static void
+nfp_flower_update_stats(struct nfp_app *app, struct nfp_fl_stats_frame *stats)
+{
+ struct nfp_fl_payload *nfp_flow;
+ unsigned long flower_cookie;
+
+ flower_cookie = be64_to_cpu(stats->stats_cookie);
+
+ rcu_read_lock();
+ nfp_flow = nfp_flower_search_fl_table(app, flower_cookie);
+ if (!nfp_flow)
+ goto exit_rcu_unlock;
+
+ if (nfp_flow->meta.host_ctx_id != stats->stats_con_id)
+ goto exit_rcu_unlock;
+
+ spin_lock(&nfp_flow->lock);
+ nfp_flow->stats.pkts += be32_to_cpu(stats->pkt_count);
+ nfp_flow->stats.bytes += be64_to_cpu(stats->byte_count);
+ nfp_flow->stats.used = jiffies;
+ spin_unlock(&nfp_flow->lock);
+
+exit_rcu_unlock:
+ rcu_read_unlock();
+}
+
+void nfp_flower_rx_flow_stats(struct nfp_app *app, struct sk_buff *skb)
+{
+ unsigned int msg_len = skb->len - NFP_FLOWER_CMSG_HLEN;
+ struct nfp_fl_stats_frame *stats_frame;
+ unsigned char *msg;
+ int i;
+
+ msg = nfp_flower_cmsg_get_data(skb);
+
+ stats_frame = (struct nfp_fl_stats_frame *)msg;
+ for (i = 0; i < msg_len / sizeof(*stats_frame); i++)
+ nfp_flower_update_stats(app, stats_frame + i);
+}
+
+static int nfp_release_mask_id(struct nfp_app *app, u8 mask_id)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ struct circ_buf *ring;
+ struct timespec64 now;
+
+ ring = &priv->mask_ids.mask_id_free_list;
+ /* Checking if buffer is full. */
+ if (CIRC_SPACE(ring->head, ring->tail, NFP_FLOWER_MASK_ENTRY_RS) == 0)
+ return -ENOBUFS;
+
+ memcpy(&ring->buf[ring->head], &mask_id, NFP_FLOWER_MASK_ELEMENT_RS);
+ ring->head = (ring->head + NFP_FLOWER_MASK_ELEMENT_RS) %
+ (NFP_FLOWER_MASK_ENTRY_RS * NFP_FLOWER_MASK_ELEMENT_RS);
+
+ getnstimeofday64(&now);
+ priv->mask_ids.last_used[mask_id] = now;
+
+ return 0;
+}
+
+static int nfp_mask_alloc(struct nfp_app *app, u8 *mask_id)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ struct timespec64 delta, now;
+ struct circ_buf *ring;
+ u8 temp_id, freed_id;
+
+ ring = &priv->mask_ids.mask_id_free_list;
+ freed_id = NFP_FLOWER_MASK_ENTRY_RS - 1;
+ /* Checking for unallocated entries first. */
+ if (priv->mask_ids.init_unallocated > 0) {
+ *mask_id = priv->mask_ids.init_unallocated;
+ priv->mask_ids.init_unallocated--;
+ return 0;
+ }
+
+ /* Checking if buffer is empty. */
+ if (ring->head == ring->tail)
+ goto err_not_found;
+
+ memcpy(&temp_id, &ring->buf[ring->tail], NFP_FLOWER_MASK_ELEMENT_RS);
+ *mask_id = temp_id;
+
+ getnstimeofday64(&now);
+ delta = timespec64_sub(now, priv->mask_ids.last_used[*mask_id]);
+
+ if (timespec64_to_ns(&delta) < NFP_FL_MASK_REUSE_TIME_NS)
+ goto err_not_found;
+
+ memcpy(&ring->buf[ring->tail], &freed_id, NFP_FLOWER_MASK_ELEMENT_RS);
+ ring->tail = (ring->tail + NFP_FLOWER_MASK_ELEMENT_RS) %
+ (NFP_FLOWER_MASK_ENTRY_RS * NFP_FLOWER_MASK_ELEMENT_RS);
+
+ return 0;
+
+err_not_found:
+ *mask_id = freed_id;
+ return -ENOENT;
+}
+
+static int
+nfp_add_mask_table(struct nfp_app *app, char *mask_data, u32 mask_len)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_mask_id_table *mask_entry;
+ unsigned long hash_key;
+ u8 mask_id;
+
+ if (nfp_mask_alloc(app, &mask_id))
+ return -ENOENT;
+
+ mask_entry = kmalloc(sizeof(*mask_entry), GFP_KERNEL);
+ if (!mask_entry) {
+ nfp_release_mask_id(app, mask_id);
+ return -ENOMEM;
+ }
+
+ INIT_HLIST_NODE(&mask_entry->link);
+ mask_entry->mask_id = mask_id;
+ hash_key = jhash(mask_data, mask_len, priv->mask_id_seed);
+ mask_entry->hash_key = hash_key;
+ mask_entry->ref_cnt = 1;
+ hash_add(priv->mask_table, &mask_entry->link, hash_key);
+
+ return mask_id;
+}
+
+static struct nfp_mask_id_table *
+nfp_search_mask_table(struct nfp_app *app, char *mask_data, u32 mask_len)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_mask_id_table *mask_entry;
+ unsigned long hash_key;
+
+ hash_key = jhash(mask_data, mask_len, priv->mask_id_seed);
+
+ hash_for_each_possible(priv->mask_table, mask_entry, link, hash_key)
+ if (mask_entry->hash_key == hash_key)
+ return mask_entry;
+
+ return NULL;
+}
+
+static int
+nfp_find_in_mask_table(struct nfp_app *app, char *mask_data, u32 mask_len)
+{
+ struct nfp_mask_id_table *mask_entry;
+
+ mask_entry = nfp_search_mask_table(app, mask_data, mask_len);
+ if (!mask_entry)
+ return -ENOENT;
+
+ mask_entry->ref_cnt++;
+
+ /* Casting u8 to int for later use. */
+ return mask_entry->mask_id;
+}
+
+static bool
+nfp_check_mask_add(struct nfp_app *app, char *mask_data, u32 mask_len,
+ u8 *meta_flags, u8 *mask_id)
+{
+ int id;
+
+ id = nfp_find_in_mask_table(app, mask_data, mask_len);
+ if (id < 0) {
+ id = nfp_add_mask_table(app, mask_data, mask_len);
+ if (id < 0)
+ return false;
+ *meta_flags |= NFP_FL_META_FLAG_NEW_MASK;
+ }
+ *mask_id = id;
+
+ return true;
+}
+
+static bool
+nfp_check_mask_remove(struct nfp_app *app, char *mask_data, u32 mask_len,
+ u8 *meta_flags, u8 *mask_id)
+{
+ struct nfp_mask_id_table *mask_entry;
+
+ mask_entry = nfp_search_mask_table(app, mask_data, mask_len);
+ if (!mask_entry)
+ return false;
+
+ *mask_id = mask_entry->mask_id;
+ mask_entry->ref_cnt--;
+ if (!mask_entry->ref_cnt) {
+ hash_del(&mask_entry->link);
+ nfp_release_mask_id(app, *mask_id);
+ kfree(mask_entry);
+ if (meta_flags)
+ *meta_flags |= NFP_FL_META_FLAG_LAST_MASK;
+ }
+
+ return true;
+}
+
+int nfp_compile_flow_metadata(struct nfp_app *app,
+ struct tc_cls_flower_offload *flow,
+ struct nfp_fl_payload *nfp_flow)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_fl_payload *check_entry;
+ u8 new_mask_id;
+ u32 stats_cxt;
+
+ if (nfp_get_stats_entry(app, &stats_cxt))
+ return -ENOENT;
+
+ nfp_flow->meta.host_ctx_id = cpu_to_be32(stats_cxt);
+ nfp_flow->meta.host_cookie = cpu_to_be64(flow->cookie);
+
+ new_mask_id = 0;
+ if (!nfp_check_mask_add(app, nfp_flow->mask_data,
+ nfp_flow->meta.mask_len,
+ &nfp_flow->meta.flags, &new_mask_id)) {
+ if (nfp_release_stats_entry(app, stats_cxt))
+ return -EINVAL;
+ return -ENOENT;
+ }
+
+ nfp_flow->meta.flow_version = cpu_to_be64(priv->flower_version);
+ priv->flower_version++;
+
+ /* Update flow payload with mask ids. */
+ nfp_flow->unmasked_data[NFP_FL_MASK_ID_LOCATION] = new_mask_id;
+ nfp_flow->stats.pkts = 0;
+ nfp_flow->stats.bytes = 0;
+ nfp_flow->stats.used = jiffies;
+
+ check_entry = nfp_flower_search_fl_table(app, flow->cookie);
+ if (check_entry) {
+ if (nfp_release_stats_entry(app, stats_cxt))
+ return -EINVAL;
+
+ if (!nfp_check_mask_remove(app, nfp_flow->mask_data,
+ nfp_flow->meta.mask_len,
+ NULL, &new_mask_id))
+ return -EINVAL;
+
+ return -EEXIST;
+ }
+
+ return 0;
+}
+
+int nfp_modify_flow_metadata(struct nfp_app *app,
+ struct nfp_fl_payload *nfp_flow)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ u8 new_mask_id = 0;
+ u32 temp_ctx_id;
+
+ nfp_check_mask_remove(app, nfp_flow->mask_data,
+ nfp_flow->meta.mask_len, &nfp_flow->meta.flags,
+ &new_mask_id);
+
+ nfp_flow->meta.flow_version = cpu_to_be64(priv->flower_version);
+ priv->flower_version++;
+
+ /* Update flow payload with mask ids. */
+ nfp_flow->unmasked_data[NFP_FL_MASK_ID_LOCATION] = new_mask_id;
+
+ /* Release the stats ctx id. */
+ temp_ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id);
+
+ return nfp_release_stats_entry(app, temp_ctx_id);
+}
+
+int nfp_flower_metadata_init(struct nfp_app *app)
+{
+ struct nfp_flower_priv *priv = app->priv;
+
+ hash_init(priv->mask_table);
+ hash_init(priv->flow_table);
+ get_random_bytes(&priv->mask_id_seed, sizeof(priv->mask_id_seed));
+
+ /* Init ring buffer and unallocated mask_ids. */
+ priv->mask_ids.mask_id_free_list.buf =
+ kmalloc_array(NFP_FLOWER_MASK_ENTRY_RS,
+ NFP_FLOWER_MASK_ELEMENT_RS, GFP_KERNEL);
+ if (!priv->mask_ids.mask_id_free_list.buf)
+ return -ENOMEM;
+
+ priv->mask_ids.init_unallocated = NFP_FLOWER_MASK_ENTRY_RS - 1;
+
+ /* Init timestamps for mask id*/
+ priv->mask_ids.last_used =
+ kmalloc_array(NFP_FLOWER_MASK_ENTRY_RS,
+ sizeof(*priv->mask_ids.last_used), GFP_KERNEL);
+ if (!priv->mask_ids.last_used)
+ goto err_free_mask_id;
+
+ /* Init ring buffer and unallocated stats_ids. */
+ priv->stats_ids.free_list.buf =
+ vmalloc(NFP_FL_STATS_ENTRY_RS * NFP_FL_STATS_ELEM_RS);
+ if (!priv->stats_ids.free_list.buf)
+ goto err_free_last_used;
+
+ priv->stats_ids.init_unalloc = NFP_FL_REPEATED_HASH_MAX;
+
+ return 0;
+
+err_free_last_used:
+ kfree(priv->stats_ids.free_list.buf);
+err_free_mask_id:
+ kfree(priv->mask_ids.mask_id_free_list.buf);
+ return -ENOMEM;
+}
+
+void nfp_flower_metadata_cleanup(struct nfp_app *app)
+{
+ struct nfp_flower_priv *priv = app->priv;
+
+ if (!priv)
+ return;
+
+ kfree(priv->mask_ids.mask_id_free_list.buf);
+ kfree(priv->mask_ids.last_used);
+ vfree(priv->stats_ids.free_list.buf);
+}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
new file mode 100644
index 000000000000..4ad10bd5e139
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -0,0 +1,400 @@
+/*
+ * Copyright (C) 2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/skbuff.h>
+#include <net/devlink.h>
+#include <net/pkt_cls.h>
+
+#include "cmsg.h"
+#include "main.h"
+#include "../nfpcore/nfp_cpp.h"
+#include "../nfpcore/nfp_nsp.h"
+#include "../nfp_app.h"
+#include "../nfp_main.h"
+#include "../nfp_net.h"
+#include "../nfp_port.h"
+
+static int
+nfp_flower_xmit_flow(struct net_device *netdev,
+ struct nfp_fl_payload *nfp_flow, u8 mtype)
+{
+ u32 meta_len, key_len, mask_len, act_len, tot_len;
+ struct nfp_repr *priv = netdev_priv(netdev);
+ struct sk_buff *skb;
+ unsigned char *msg;
+
+ meta_len = sizeof(struct nfp_fl_rule_metadata);
+ key_len = nfp_flow->meta.key_len;
+ mask_len = nfp_flow->meta.mask_len;
+ act_len = nfp_flow->meta.act_len;
+
+ tot_len = meta_len + key_len + mask_len + act_len;
+
+ /* Convert to long words as firmware expects
+ * lengths in units of NFP_FL_LW_SIZ.
+ */
+ nfp_flow->meta.key_len >>= NFP_FL_LW_SIZ;
+ nfp_flow->meta.mask_len >>= NFP_FL_LW_SIZ;
+ nfp_flow->meta.act_len >>= NFP_FL_LW_SIZ;
+
+ skb = nfp_flower_cmsg_alloc(priv->app, tot_len, mtype);
+ if (!skb)
+ return -ENOMEM;
+
+ msg = nfp_flower_cmsg_get_data(skb);
+ memcpy(msg, &nfp_flow->meta, meta_len);
+ memcpy(&msg[meta_len], nfp_flow->unmasked_data, key_len);
+ memcpy(&msg[meta_len + key_len], nfp_flow->mask_data, mask_len);
+ memcpy(&msg[meta_len + key_len + mask_len],
+ nfp_flow->action_data, act_len);
+
+ /* Convert back to bytes as software expects
+ * lengths in units of bytes.
+ */
+ nfp_flow->meta.key_len <<= NFP_FL_LW_SIZ;
+ nfp_flow->meta.mask_len <<= NFP_FL_LW_SIZ;
+ nfp_flow->meta.act_len <<= NFP_FL_LW_SIZ;
+
+ nfp_ctrl_tx(priv->app->ctrl, skb);
+
+ return 0;
+}
+
+static bool nfp_flower_check_higher_than_mac(struct tc_cls_flower_offload *f)
+{
+ return dissector_uses_key(f->dissector,
+ FLOW_DISSECTOR_KEY_IPV4_ADDRS) ||
+ dissector_uses_key(f->dissector,
+ FLOW_DISSECTOR_KEY_IPV6_ADDRS) ||
+ dissector_uses_key(f->dissector,
+ FLOW_DISSECTOR_KEY_PORTS) ||
+ dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ICMP);
+}
+
+static int
+nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls,
+ struct tc_cls_flower_offload *flow)
+{
+ struct flow_dissector_key_control *mask_enc_ctl;
+ struct flow_dissector_key_basic *mask_basic;
+ struct flow_dissector_key_basic *key_basic;
+ u32 key_layer_two;
+ u8 key_layer;
+ int key_size;
+
+ mask_enc_ctl = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_ENC_CONTROL,
+ flow->mask);
+
+ mask_basic = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_BASIC,
+ flow->mask);
+
+ key_basic = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_BASIC,
+ flow->key);
+ key_layer_two = 0;
+ key_layer = NFP_FLOWER_LAYER_PORT | NFP_FLOWER_LAYER_MAC;
+ key_size = sizeof(struct nfp_flower_meta_one) +
+ sizeof(struct nfp_flower_in_port) +
+ sizeof(struct nfp_flower_mac_mpls);
+
+ /* We are expecting a tunnel. For now we ignore offloading. */
+ if (mask_enc_ctl->addr_type)
+ return -EOPNOTSUPP;
+
+ if (mask_basic->n_proto) {
+ /* Ethernet type is present in the key. */
+ switch (key_basic->n_proto) {
+ case cpu_to_be16(ETH_P_IP):
+ key_layer |= NFP_FLOWER_LAYER_IPV4;
+ key_size += sizeof(struct nfp_flower_ipv4);
+ break;
+
+ case cpu_to_be16(ETH_P_IPV6):
+ key_layer |= NFP_FLOWER_LAYER_IPV6;
+ key_size += sizeof(struct nfp_flower_ipv6);
+ break;
+
+ /* Currently we do not offload ARP
+ * because we rely on it to get to the host.
+ */
+ case cpu_to_be16(ETH_P_ARP):
+ return -EOPNOTSUPP;
+
+ /* Will be included in layer 2. */
+ case cpu_to_be16(ETH_P_8021Q):
+ break;
+
+ default:
+ /* Other ethtype - we need check the masks for the
+ * remainder of the key to ensure we can offload.
+ */
+ if (nfp_flower_check_higher_than_mac(flow))
+ return -EOPNOTSUPP;
+ break;
+ }
+ }
+
+ if (mask_basic->ip_proto) {
+ /* Ethernet type is present in the key. */
+ switch (key_basic->ip_proto) {
+ case IPPROTO_TCP:
+ case IPPROTO_UDP:
+ case IPPROTO_SCTP:
+ case IPPROTO_ICMP:
+ case IPPROTO_ICMPV6:
+ key_layer |= NFP_FLOWER_LAYER_TP;
+ key_size += sizeof(struct nfp_flower_tp_ports);
+ break;
+ default:
+ /* Other ip proto - we need check the masks for the
+ * remainder of the key to ensure we can offload.
+ */
+ return -EOPNOTSUPP;
+ }
+ }
+
+ ret_key_ls->key_layer = key_layer;
+ ret_key_ls->key_layer_two = key_layer_two;
+ ret_key_ls->key_size = key_size;
+
+ return 0;
+}
+
+static struct nfp_fl_payload *
+nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer)
+{
+ struct nfp_fl_payload *flow_pay;
+
+ flow_pay = kmalloc(sizeof(*flow_pay), GFP_KERNEL);
+ if (!flow_pay)
+ return NULL;
+
+ flow_pay->meta.key_len = key_layer->key_size;
+ flow_pay->unmasked_data = kmalloc(key_layer->key_size, GFP_KERNEL);
+ if (!flow_pay->unmasked_data)
+ goto err_free_flow;
+
+ flow_pay->meta.mask_len = key_layer->key_size;
+ flow_pay->mask_data = kmalloc(key_layer->key_size, GFP_KERNEL);
+ if (!flow_pay->mask_data)
+ goto err_free_unmasked;
+
+ flow_pay->action_data = kmalloc(NFP_FL_MAX_A_SIZ, GFP_KERNEL);
+ if (!flow_pay->action_data)
+ goto err_free_mask;
+
+ flow_pay->meta.flags = 0;
+ spin_lock_init(&flow_pay->lock);
+
+ return flow_pay;
+
+err_free_mask:
+ kfree(flow_pay->mask_data);
+err_free_unmasked:
+ kfree(flow_pay->unmasked_data);
+err_free_flow:
+ kfree(flow_pay);
+ return NULL;
+}
+
+/**
+ * nfp_flower_add_offload() - Adds a new flow to hardware.
+ * @app: Pointer to the APP handle
+ * @netdev: netdev structure.
+ * @flow: TC flower classifier offload structure.
+ *
+ * Adds a new flow to the repeated hash structure and action payload.
+ *
+ * Return: negative value on error, 0 if configured successfully.
+ */
+static int
+nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
+ struct tc_cls_flower_offload *flow)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_fl_payload *flow_pay;
+ struct nfp_fl_key_ls *key_layer;
+ int err;
+
+ key_layer = kmalloc(sizeof(*key_layer), GFP_KERNEL);
+ if (!key_layer)
+ return -ENOMEM;
+
+ err = nfp_flower_calculate_key_layers(key_layer, flow);
+ if (err)
+ goto err_free_key_ls;
+
+ flow_pay = nfp_flower_allocate_new(key_layer);
+ if (!flow_pay) {
+ err = -ENOMEM;
+ goto err_free_key_ls;
+ }
+
+ err = nfp_flower_compile_flow_match(flow, key_layer, netdev, flow_pay);
+ if (err)
+ goto err_destroy_flow;
+
+ err = nfp_flower_compile_action(flow, netdev, flow_pay);
+ if (err)
+ goto err_destroy_flow;
+
+ err = nfp_compile_flow_metadata(app, flow, flow_pay);
+ if (err)
+ goto err_destroy_flow;
+
+ err = nfp_flower_xmit_flow(netdev, flow_pay,
+ NFP_FLOWER_CMSG_TYPE_FLOW_ADD);
+ if (err)
+ goto err_destroy_flow;
+
+ INIT_HLIST_NODE(&flow_pay->link);
+ flow_pay->tc_flower_cookie = flow->cookie;
+ hash_add_rcu(priv->flow_table, &flow_pay->link, flow->cookie);
+
+ /* Deallocate flow payload when flower rule has been destroyed. */
+ kfree(key_layer);
+
+ return 0;
+
+err_destroy_flow:
+ kfree(flow_pay->action_data);
+ kfree(flow_pay->mask_data);
+ kfree(flow_pay->unmasked_data);
+ kfree(flow_pay);
+err_free_key_ls:
+ kfree(key_layer);
+ return err;
+}
+
+/**
+ * nfp_flower_del_offload() - Removes a flow from hardware.
+ * @app: Pointer to the APP handle
+ * @netdev: netdev structure.
+ * @flow: TC flower classifier offload structure
+ *
+ * Removes a flow from the repeated hash structure and clears the
+ * action payload.
+ *
+ * Return: negative value on error, 0 if removed successfully.
+ */
+static int
+nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
+ struct tc_cls_flower_offload *flow)
+{
+ struct nfp_fl_payload *nfp_flow;
+ int err;
+
+ nfp_flow = nfp_flower_search_fl_table(app, flow->cookie);
+ if (!nfp_flow)
+ return -ENOENT;
+
+ err = nfp_modify_flow_metadata(app, nfp_flow);
+ if (err)
+ goto err_free_flow;
+
+ err = nfp_flower_xmit_flow(netdev, nfp_flow,
+ NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
+ if (err)
+ goto err_free_flow;
+
+err_free_flow:
+ hash_del_rcu(&nfp_flow->link);
+ kfree(nfp_flow->action_data);
+ kfree(nfp_flow->mask_data);
+ kfree(nfp_flow->unmasked_data);
+ kfree_rcu(nfp_flow, rcu);
+ return err;
+}
+
+/**
+ * nfp_flower_get_stats() - Populates flow stats obtained from hardware.
+ * @app: Pointer to the APP handle
+ * @flow: TC flower classifier offload structure
+ *
+ * Populates a flow statistics structure which which corresponds to a
+ * specific flow.
+ *
+ * Return: negative value on error, 0 if stats populated successfully.
+ */
+static int
+nfp_flower_get_stats(struct nfp_app *app, struct tc_cls_flower_offload *flow)
+{
+ struct nfp_fl_payload *nfp_flow;
+
+ nfp_flow = nfp_flower_search_fl_table(app, flow->cookie);
+ if (!nfp_flow)
+ return -EINVAL;
+
+ spin_lock_bh(&nfp_flow->lock);
+ tcf_exts_stats_update(flow->exts, nfp_flow->stats.bytes,
+ nfp_flow->stats.pkts, nfp_flow->stats.used);
+
+ nfp_flow->stats.pkts = 0;
+ nfp_flow->stats.bytes = 0;
+ spin_unlock_bh(&nfp_flow->lock);
+
+ return 0;
+}
+
+static int
+nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev,
+ struct tc_cls_flower_offload *flower)
+{
+ switch (flower->command) {
+ case TC_CLSFLOWER_REPLACE:
+ return nfp_flower_add_offload(app, netdev, flower);
+ case TC_CLSFLOWER_DESTROY:
+ return nfp_flower_del_offload(app, netdev, flower);
+ case TC_CLSFLOWER_STATS:
+ return nfp_flower_get_stats(app, flower);
+ }
+
+ return -EOPNOTSUPP;
+}
+
+int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev,
+ u32 handle, __be16 proto, struct tc_to_netdev *tc)
+{
+ if (TC_H_MAJ(handle) != TC_H_MAJ(TC_H_INGRESS))
+ return -EOPNOTSUPP;
+
+ if (!eth_proto_is_802_3(proto))
+ return -EOPNOTSUPP;
+
+ if (tc->type != TC_SETUP_CLSFLOWER)
+ return -EINVAL;
+
+ return nfp_flower_repr_offload(app, netdev, tc->cls_flower);
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.c b/drivers/net/ethernet/netronome/nfp/nfp_app.c
index 396b93f54823..c704c022574f 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_app.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_app.c
@@ -38,10 +38,14 @@
#include "nfpcore/nfp_nffw.h"
#include "nfp_app.h"
#include "nfp_main.h"
+#include "nfp_net_repr.h"
static const struct nfp_app_type *apps[] = {
&app_nic,
&app_bpf,
+#ifdef CONFIG_NFP_APP_FLOWER
+ &app_flower,
+#endif
};
const char *nfp_app_mip_name(struct nfp_app *app)
@@ -51,14 +55,15 @@ const char *nfp_app_mip_name(struct nfp_app *app)
return nfp_mip_name(app->pf->mip);
}
-struct sk_buff *nfp_app_ctrl_msg_alloc(struct nfp_app *app, unsigned int size)
+struct sk_buff *
+nfp_app_ctrl_msg_alloc(struct nfp_app *app, unsigned int size, gfp_t priority)
{
struct sk_buff *skb;
if (nfp_app_ctrl_has_meta(app))
size += 8;
- skb = alloc_skb(size, GFP_ATOMIC);
+ skb = alloc_skb(size, priority);
if (!skb)
return NULL;
@@ -68,6 +73,25 @@ struct sk_buff *nfp_app_ctrl_msg_alloc(struct nfp_app *app, unsigned int size)
return skb;
}
+struct nfp_reprs *
+nfp_app_reprs_set(struct nfp_app *app, enum nfp_repr_type type,
+ struct nfp_reprs *reprs)
+{
+ struct nfp_reprs *old;
+
+ old = rcu_dereference_protected(app->reprs[type],
+ lockdep_is_held(&app->pf->lock));
+ if (reprs && old) {
+ old = ERR_PTR(-EBUSY);
+ goto exit_unlock;
+ }
+
+ rcu_assign_pointer(app->reprs[type], reprs);
+
+exit_unlock:
+ return old;
+}
+
struct nfp_app *nfp_app_alloc(struct nfp_pf *pf, enum nfp_app_id id)
{
struct nfp_app *app;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.h b/drivers/net/ethernet/netronome/nfp/nfp_app.h
index f5e373fa8c3b..5d714e10d9a9 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_app.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_app.h
@@ -34,6 +34,10 @@
#ifndef _NFP_APP_H
#define _NFP_APP_H 1
+#include <net/devlink.h>
+
+#include "nfp_net_repr.h"
+
struct bpf_prog;
struct net_device;
struct pci_dev;
@@ -43,15 +47,18 @@ struct sk_buff;
struct nfp_app;
struct nfp_cpp;
struct nfp_pf;
+struct nfp_repr;
struct nfp_net;
enum nfp_app_id {
NFP_APP_CORE_NIC = 0x1,
NFP_APP_BPF_NIC = 0x2,
+ NFP_APP_FLOWER_NIC = 0x3,
};
extern const struct nfp_app_type app_nic;
extern const struct nfp_app_type app_bpf;
+extern const struct nfp_app_type app_flower;
/**
* struct nfp_app_type - application definition
@@ -60,16 +67,23 @@ extern const struct nfp_app_type app_bpf;
* @ctrl_has_meta: control messages have prepend of type:5/port:CTRL
*
* Callbacks
- * @init: perform basic app checks
+ * @init: perform basic app checks and init
+ * @clean: clean app state
* @extra_cap: extra capabilities string
* @vnic_init: init vNICs (assign port types, etc.)
* @vnic_clean: clean up app's vNIC state
+ * @repr_open: representor netdev open callback
+ * @repr_stop: representor netdev stop callback
* @start: start application logic
* @stop: stop application logic
* @ctrl_msg_rx: control message handler
* @setup_tc: setup TC ndo
* @tc_busy: TC HW offload busy (rules loaded)
* @xdp_offload: offload an XDP program
+ * @eswitch_mode_get: get SR-IOV eswitch mode
+ * @sriov_enable: app-specific sriov initialisation
+ * @sriov_disable: app-specific sriov clean-up
+ * @repr_get: get representor netdev
*/
struct nfp_app_type {
enum nfp_app_id id;
@@ -78,6 +92,7 @@ struct nfp_app_type {
bool ctrl_has_meta;
int (*init)(struct nfp_app *app);
+ void (*clean)(struct nfp_app *app);
const char *(*extra_cap)(struct nfp_app *app, struct nfp_net *nn);
@@ -85,6 +100,9 @@ struct nfp_app_type {
unsigned int id);
void (*vnic_clean)(struct nfp_app *app, struct nfp_net *nn);
+ int (*repr_open)(struct nfp_app *app, struct nfp_repr *repr);
+ int (*repr_stop)(struct nfp_app *app, struct nfp_repr *repr);
+
int (*start)(struct nfp_app *app);
void (*stop)(struct nfp_app *app);
@@ -95,6 +113,12 @@ struct nfp_app_type {
bool (*tc_busy)(struct nfp_app *app, struct nfp_net *nn);
int (*xdp_offload)(struct nfp_app *app, struct nfp_net *nn,
struct bpf_prog *prog);
+
+ int (*sriov_enable)(struct nfp_app *app, int num_vfs);
+ void (*sriov_disable)(struct nfp_app *app);
+
+ enum devlink_eswitch_mode (*eswitch_mode_get)(struct nfp_app *app);
+ struct net_device *(*repr_get)(struct nfp_app *app, u32 id);
};
/**
@@ -103,7 +127,9 @@ struct nfp_app_type {
* @pf: backpointer to NFP PF structure
* @cpp: pointer to the CPP handle
* @ctrl: pointer to ctrl vNIC struct
+ * @reprs: array of pointers to representors
* @type: pointer to const application ops and info
+ * @priv: app-specific priv data
*/
struct nfp_app {
struct pci_dev *pdev;
@@ -111,8 +137,10 @@ struct nfp_app {
struct nfp_cpp *cpp;
struct nfp_net *ctrl;
+ struct nfp_reprs __rcu *reprs[NFP_REPR_TYPE_MAX + 1];
const struct nfp_app_type *type;
+ void *priv;
};
bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb);
@@ -124,6 +152,12 @@ static inline int nfp_app_init(struct nfp_app *app)
return app->type->init(app);
}
+static inline void nfp_app_clean(struct nfp_app *app)
+{
+ if (app->type->clean)
+ app->type->clean(app);
+}
+
static inline int nfp_app_vnic_init(struct nfp_app *app, struct nfp_net *nn,
unsigned int id)
{
@@ -136,6 +170,20 @@ static inline void nfp_app_vnic_clean(struct nfp_app *app, struct nfp_net *nn)
app->type->vnic_clean(app, nn);
}
+static inline int nfp_app_repr_open(struct nfp_app *app, struct nfp_repr *repr)
+{
+ if (!app->type->repr_open)
+ return -EINVAL;
+ return app->type->repr_open(app, repr);
+}
+
+static inline int nfp_app_repr_stop(struct nfp_app *app, struct nfp_repr *repr)
+{
+ if (!app->type->repr_stop)
+ return -EINVAL;
+ return app->type->repr_stop(app, repr);
+}
+
static inline int nfp_app_start(struct nfp_app *app, struct nfp_net *ctrl)
{
app->ctrl = ctrl;
@@ -216,8 +264,44 @@ static inline void nfp_app_ctrl_rx(struct nfp_app *app, struct sk_buff *skb)
app->type->ctrl_msg_rx(app, skb);
}
+static inline int nfp_app_eswitch_mode_get(struct nfp_app *app, u16 *mode)
+{
+ if (!app->type->eswitch_mode_get)
+ return -EOPNOTSUPP;
+
+ *mode = app->type->eswitch_mode_get(app);
+
+ return 0;
+}
+
+static inline int nfp_app_sriov_enable(struct nfp_app *app, int num_vfs)
+{
+ if (!app || !app->type->sriov_enable)
+ return -EOPNOTSUPP;
+ return app->type->sriov_enable(app, num_vfs);
+}
+
+static inline void nfp_app_sriov_disable(struct nfp_app *app)
+{
+ if (app && app->type->sriov_disable)
+ app->type->sriov_disable(app);
+}
+
+static inline struct net_device *nfp_app_repr_get(struct nfp_app *app, u32 id)
+{
+ if (unlikely(!app || !app->type->repr_get))
+ return NULL;
+
+ return app->type->repr_get(app, id);
+}
+
+struct nfp_reprs *
+nfp_app_reprs_set(struct nfp_app *app, enum nfp_repr_type type,
+ struct nfp_reprs *reprs);
+
const char *nfp_app_mip_name(struct nfp_app *app);
-struct sk_buff *nfp_app_ctrl_msg_alloc(struct nfp_app *app, unsigned int size);
+struct sk_buff *
+nfp_app_ctrl_msg_alloc(struct nfp_app *app, unsigned int size, gfp_t priority);
struct nfp_app *nfp_app_alloc(struct nfp_pf *pf, enum nfp_app_id id);
void nfp_app_free(struct nfp_app *app);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app_nic.c b/drivers/net/ethernet/netronome/nfp/nfp_app_nic.c
index 83c65e6291ee..c11a6c34e217 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_app_nic.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_app_nic.c
@@ -42,6 +42,8 @@ static int
nfp_app_nic_vnic_init_phy_port(struct nfp_pf *pf, struct nfp_app *app,
struct nfp_net *nn, unsigned int id)
{
+ int err;
+
if (!pf->eth_tbl)
return 0;
@@ -49,26 +51,13 @@ nfp_app_nic_vnic_init_phy_port(struct nfp_pf *pf, struct nfp_app *app,
if (IS_ERR(nn->port))
return PTR_ERR(nn->port);
- nn->port->eth_id = id;
- nn->port->eth_port = nfp_net_find_port(pf->eth_tbl, id);
-
- /* Check if vNIC has external port associated and cfg is OK */
- if (!nn->port->eth_port) {
- nfp_err(app->cpp,
- "NSP port entries don't match vNICs (no entry for port #%d)\n",
- id);
+ err = nfp_port_init_phy_port(pf, app, nn->port, id);
+ if (err) {
nfp_port_free(nn->port);
- return -EINVAL;
- }
- if (nn->port->eth_port->override_changed) {
- nfp_warn(app->cpp,
- "Config changed for port #%d, reboot required before port will be operational\n",
- id);
- nn->port->type = NFP_PORT_INVALID;
- return 1;
+ return err;
}
- return 0;
+ return nn->port->type == NFP_PORT_INVALID;
}
int nfp_app_nic_vnic_init(struct nfp_app *app, struct nfp_net *nn,
@@ -80,7 +69,7 @@ int nfp_app_nic_vnic_init(struct nfp_app *app, struct nfp_net *nn,
if (err)
return err < 0 ? err : 0;
- nfp_net_get_mac_addr(app->pf, nn, id);
+ nfp_net_get_mac_addr(app->pf, nn->port, id);
return 0;
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
index 2609a0f28e81..6c9f29c2e975 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
@@ -149,9 +149,27 @@ out:
return ret;
}
+static int nfp_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
+{
+ struct nfp_pf *pf = devlink_priv(devlink);
+ int ret;
+
+ mutex_lock(&pf->lock);
+ if (!pf->app) {
+ ret = -EBUSY;
+ goto out;
+ }
+ ret = nfp_app_eswitch_mode_get(pf->app, mode);
+out:
+ mutex_unlock(&pf->lock);
+
+ return ret;
+}
+
const struct devlink_ops nfp_devlink_ops = {
.port_split = nfp_devlink_port_split,
.port_unsplit = nfp_devlink_port_unsplit,
+ .eswitch_mode_get = nfp_devlink_eswitch_mode_get,
};
int nfp_devlink_port_register(struct nfp_app *app, struct nfp_port *port)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c
index 4e59dcb78c36..d67969d3e484 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c
@@ -54,6 +54,7 @@
#include "nfpcore/nfp6000_pcie.h"
+#include "nfp_app.h"
#include "nfp_main.h"
#include "nfp_net.h"
@@ -97,23 +98,41 @@ static int nfp_pcie_sriov_enable(struct pci_dev *pdev, int num_vfs)
struct nfp_pf *pf = pci_get_drvdata(pdev);
int err;
+ mutex_lock(&pf->lock);
+
if (num_vfs > pf->limit_vfs) {
nfp_info(pf->cpp, "Firmware limits number of VFs to %u\n",
pf->limit_vfs);
- return -EINVAL;
+ err = -EINVAL;
+ goto err_unlock;
}
err = pci_enable_sriov(pdev, num_vfs);
if (err) {
- dev_warn(&pdev->dev, "Failed to enable PCI sriov: %d\n", err);
- return err;
+ dev_warn(&pdev->dev, "Failed to enable PCI SR-IOV: %d\n", err);
+ goto err_unlock;
+ }
+
+ err = nfp_app_sriov_enable(pf->app, num_vfs);
+ if (err) {
+ dev_warn(&pdev->dev,
+ "App specific PCI SR-IOV configuration failed: %d\n",
+ err);
+ goto err_sriov_disable;
}
pf->num_vfs = num_vfs;
dev_dbg(&pdev->dev, "Created %d VFs.\n", pf->num_vfs);
+ mutex_unlock(&pf->lock);
return num_vfs;
+
+err_sriov_disable:
+ pci_disable_sriov(pdev);
+err_unlock:
+ mutex_unlock(&pf->lock);
+ return err;
#endif
return 0;
}
@@ -123,19 +142,26 @@ static int nfp_pcie_sriov_disable(struct pci_dev *pdev)
#ifdef CONFIG_PCI_IOV
struct nfp_pf *pf = pci_get_drvdata(pdev);
+ mutex_lock(&pf->lock);
+
/* If the VFs are assigned we cannot shut down SR-IOV without
* causing issues, so just leave the hardware available but
* disabled
*/
if (pci_vfs_assigned(pdev)) {
dev_warn(&pdev->dev, "Disabling while VFs assigned - VFs will not be deallocated\n");
+ mutex_unlock(&pf->lock);
return -EPERM;
}
+ nfp_app_sriov_disable(pf->app);
+
pf->num_vfs = 0;
pci_disable_sriov(pdev);
dev_dbg(&pdev->dev, "Removed VFs.\n");
+
+ mutex_unlock(&pf->lock);
#endif
return 0;
}
@@ -350,6 +376,12 @@ static int nfp_pci_probe(struct pci_dev *pdev,
pci_set_drvdata(pdev, pf);
pf->pdev = pdev;
+ pf->wq = alloc_workqueue("nfp-%s", 0, 2, pci_name(pdev));
+ if (!pf->wq) {
+ err = -ENOMEM;
+ goto err_pci_priv_unset;
+ }
+
pf->cpp = nfp_cpp_from_nfp6000_pcie(pdev);
if (IS_ERR_OR_NULL(pf->cpp)) {
err = PTR_ERR(pf->cpp);
@@ -382,6 +414,14 @@ static int nfp_pci_probe(struct pci_dev *pdev,
if (err)
goto err_fw_unload;
+ pf->num_vfs = pci_num_vf(pdev);
+ if (pf->num_vfs > pf->limit_vfs) {
+ dev_err(&pdev->dev,
+ "Error: %d VFs already enabled, but loaded FW can only support %d\n",
+ pf->num_vfs, pf->limit_vfs);
+ goto err_fw_unload;
+ }
+
err = nfp_net_pci_probe(pf);
if (err)
goto err_sriov_unlimit;
@@ -411,6 +451,8 @@ err_hwinfo_free:
kfree(pf->hwinfo);
nfp_cpp_free(pf->cpp);
err_disable_msix:
+ destroy_workqueue(pf->wq);
+err_pci_priv_unset:
pci_set_drvdata(pdev, NULL);
mutex_destroy(&pf->lock);
devlink_free(devlink);
@@ -443,6 +485,7 @@ static void nfp_pci_remove(struct pci_dev *pdev)
if (pf->fw_loaded)
nfp_fw_unload(pf);
+ destroy_workqueue(pf->wq);
pci_set_drvdata(pdev, NULL);
kfree(pf->hwinfo);
nfp_cpp_free(pf->cpp);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.h b/drivers/net/ethernet/netronome/nfp/nfp_main.h
index 88724f8d0dcd..a08cfba7e68e 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.h
@@ -58,6 +58,7 @@ struct nfp_hwinfo;
struct nfp_mip;
struct nfp_net;
struct nfp_nsp_identify;
+struct nfp_port;
struct nfp_rtsym_table;
/**
@@ -68,6 +69,10 @@ struct nfp_rtsym_table;
* @data_vnic_bar: Pointer to the CPP area for the data vNICs' BARs
* @ctrl_vnic_bar: Pointer to the CPP area for the ctrl vNIC's BAR
* @qc_area: Pointer to the CPP area for the queues
+ * @mac_stats_bar: Pointer to the CPP area for the MAC stats
+ * @mac_stats_mem: Pointer to mapped MAC stats area
+ * @vf_cfg_bar: Pointer to the CPP area for the VF configuration BAR
+ * @vf_cfg_mem: Pointer to mapped VF configuration area
* @irq_entries: Array of MSI-X entries for all vNICs
* @limit_vfs: Number of VFs supported by firmware (~0 for PCI limit)
* @num_vfs: Number of SR-IOV VFs enabled
@@ -84,6 +89,7 @@ struct nfp_rtsym_table;
* @num_vnics: Number of vNICs spawned
* @vnics: Linked list of vNIC structures (struct nfp_net)
* @ports: Linked list of port structures (struct nfp_port)
+ * @wq: Workqueue for running works which need to grab @lock
* @port_refresh_work: Work entry for taking netdevs out
* @lock: Protects all fields which may change after probe
*/
@@ -97,6 +103,10 @@ struct nfp_pf {
struct nfp_cpp_area *data_vnic_bar;
struct nfp_cpp_area *ctrl_vnic_bar;
struct nfp_cpp_area *qc_area;
+ struct nfp_cpp_area *mac_stats_bar;
+ u8 __iomem *mac_stats_mem;
+ struct nfp_cpp_area *vf_cfg_bar;
+ u8 __iomem *vf_cfg_mem;
struct msix_entry *irq_entries;
@@ -122,7 +132,10 @@ struct nfp_pf {
struct list_head vnics;
struct list_head ports;
+
+ struct workqueue_struct *wq;
struct work_struct port_refresh_work;
+
struct mutex lock;
};
@@ -139,7 +152,7 @@ void nfp_hwmon_unregister(struct nfp_pf *pf);
struct nfp_eth_table_port *
nfp_net_find_port(struct nfp_eth_table *eth_tbl, unsigned int id);
void
-nfp_net_get_mac_addr(struct nfp_pf *pf, struct nfp_net *nn, unsigned int id);
+nfp_net_get_mac_addr(struct nfp_pf *pf, struct nfp_port *port, unsigned int id);
bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
index 02fd8d4e253c..b1fa77bd708b 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
@@ -318,6 +318,7 @@ struct nfp_meta_parsed {
u8 csum_type;
u32 hash;
u32 mark;
+ u32 portid;
__wsum csum;
};
@@ -541,6 +542,8 @@ struct nfp_net_dp {
* @rss_cfg: RSS configuration
* @rss_key: RSS secret key
* @rss_itbl: RSS indirection table
+ * @xdp_flags: Flags with which XDP prog was loaded
+ * @xdp_prog: XDP prog (for ctrl path, both DRV and HW modes)
* @max_r_vecs: Number of allocated interrupt vectors for RX/TX
* @max_tx_rings: Maximum number of TX rings supported by the Firmware
* @max_rx_rings: Maximum number of RX rings supported by the Firmware
@@ -590,6 +593,9 @@ struct nfp_net {
u8 rss_key[NFP_NET_CFG_RSS_KEY_SZ];
u8 rss_itbl[NFP_NET_CFG_RSS_ITBL_SZ];
+ u32 xdp_flags;
+ struct bpf_prog *xdp_prog;
+
unsigned int max_tx_rings;
unsigned int max_rx_rings;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 378512dec80d..30f82b41d400 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -64,6 +64,7 @@
#include <linux/vmalloc.h>
#include <linux/ktime.h>
+#include <net/switchdev.h>
#include <net/vxlan.h>
#include "nfpcore/nfp_nsp.h"
@@ -280,6 +281,30 @@ int nfp_net_reconfig(struct nfp_net *nn, u32 update)
return ret;
}
+/**
+ * nfp_net_reconfig_mbox() - Reconfigure the firmware via the mailbox
+ * @nn: NFP Net device to reconfigure
+ * @mbox_cmd: The value for the mailbox command
+ *
+ * Helper function for mailbox updates
+ *
+ * Return: Negative errno on error, 0 on success
+ */
+static int nfp_net_reconfig_mbox(struct nfp_net *nn, u32 mbox_cmd)
+{
+ int ret;
+
+ nn_writeq(nn, NFP_NET_CFG_MBOX_CMD, mbox_cmd);
+
+ ret = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_MBOX);
+ if (ret) {
+ nn_err(nn, "Mailbox update error\n");
+ return ret;
+ }
+
+ return -nn_readl(nn, NFP_NET_CFG_MBOX_RET);
+}
+
/* Interrupt configuration and handling
*/
@@ -731,6 +756,26 @@ static void nfp_net_tx_xmit_more_flush(struct nfp_net_tx_ring *tx_ring)
tx_ring->wr_ptr_add = 0;
}
+static int nfp_net_prep_port_id(struct sk_buff *skb)
+{
+ struct metadata_dst *md_dst = skb_metadata_dst(skb);
+ unsigned char *data;
+
+ if (likely(!md_dst))
+ return 0;
+ if (unlikely(md_dst->type != METADATA_HW_PORT_MUX))
+ return 0;
+
+ if (unlikely(skb_cow_head(skb, 8)))
+ return -ENOMEM;
+
+ data = skb_push(skb, 8);
+ put_unaligned_be32(NFP_NET_META_PORTID, data);
+ put_unaligned_be32(md_dst->u.port_info.port_id, data + 4);
+
+ return 8;
+}
+
/**
* nfp_net_tx() - Main transmit entry point
* @skb: SKB to transmit
@@ -743,6 +788,7 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
struct nfp_net *nn = netdev_priv(netdev);
const struct skb_frag_struct *frag;
struct nfp_net_tx_desc *txd, txdg;
+ int f, nr_frags, wr_idx, md_bytes;
struct nfp_net_tx_ring *tx_ring;
struct nfp_net_r_vector *r_vec;
struct nfp_net_tx_buf *txbuf;
@@ -750,8 +796,6 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
struct nfp_net_dp *dp;
dma_addr_t dma_addr;
unsigned int fsize;
- int f, nr_frags;
- int wr_idx;
u16 qidx;
dp = &nn->dp;
@@ -773,6 +817,13 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_BUSY;
}
+ md_bytes = nfp_net_prep_port_id(skb);
+ if (unlikely(md_bytes < 0)) {
+ nfp_net_tx_xmit_more_flush(tx_ring);
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
/* Start with the head skbuf */
dma_addr = dma_map_single(dp->dev, skb->data, skb_headlen(skb),
DMA_TO_DEVICE);
@@ -791,7 +842,7 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
/* Build TX descriptor */
txd = &tx_ring->txds[wr_idx];
- txd->offset_eop = (nr_frags == 0) ? PCIE_DESC_TX_EOP : 0;
+ txd->offset_eop = (nr_frags ? 0 : PCIE_DESC_TX_EOP) | md_bytes;
txd->dma_len = cpu_to_le16(skb_headlen(skb));
nfp_desc_set_dma_addr(txd, dma_addr);
txd->data_len = cpu_to_le16(skb->len);
@@ -831,7 +882,7 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
*txd = txdg;
txd->dma_len = cpu_to_le16(fsize);
nfp_desc_set_dma_addr(txd, dma_addr);
- txd->offset_eop =
+ txd->offset_eop |=
(f == nr_frags - 1) ? PCIE_DESC_TX_EOP : 0;
}
@@ -1426,6 +1477,10 @@ nfp_net_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta,
meta->mark = get_unaligned_be32(data);
data += 4;
break;
+ case NFP_NET_META_PORTID:
+ meta->portid = get_unaligned_be32(data);
+ data += 4;
+ break;
case NFP_NET_META_CSUM:
meta->csum_type = CHECKSUM_COMPLETE;
meta->csum =
@@ -1570,6 +1625,7 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
struct nfp_net_rx_buf *rxbuf;
struct nfp_net_rx_desc *rxd;
struct nfp_meta_parsed meta;
+ struct net_device *netdev;
dma_addr_t new_dma_addr;
void *new_frag;
@@ -1648,7 +1704,7 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
}
if (xdp_prog && !(rxd->rxd.flags & PCIE_DESC_RX_BPF &&
- dp->bpf_offload_xdp)) {
+ dp->bpf_offload_xdp) && !meta.portid) {
unsigned int dma_off;
void *hard_start;
int act;
@@ -1694,6 +1750,20 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
continue;
}
+ if (likely(!meta.portid)) {
+ netdev = dp->netdev;
+ } else {
+ struct nfp_net *nn;
+
+ nn = netdev_priv(dp->netdev);
+ netdev = nfp_app_repr_get(nn->app, meta.portid);
+ if (unlikely(!netdev)) {
+ nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, skb);
+ continue;
+ }
+ nfp_repr_inc_rx_stats(netdev, pkt_len);
+ }
+
nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr);
nfp_net_rx_give_one(dp, rx_ring, new_frag, new_dma_addr);
@@ -1705,7 +1775,7 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
skb_set_hash(skb, meta.hash, meta.hash_type);
skb_record_rx_queue(skb, rx_ring->idx);
- skb->protocol = eth_type_trans(skb, dp->netdev);
+ skb->protocol = eth_type_trans(skb, netdev);
nfp_net_rx_csum(dp, r_vec, rxd, &meta, skb);
@@ -2960,6 +3030,40 @@ static int nfp_net_change_mtu(struct net_device *netdev, int new_mtu)
return nfp_net_ring_reconfig(nn, dp, NULL);
}
+static int
+nfp_net_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
+{
+ struct nfp_net *nn = netdev_priv(netdev);
+
+ /* Priority tagged packets with vlan id 0 are processed by the
+ * NFP as untagged packets
+ */
+ if (!vid)
+ return 0;
+
+ nn_writew(nn, NFP_NET_CFG_VLAN_FILTER_VID, vid);
+ nn_writew(nn, NFP_NET_CFG_VLAN_FILTER_PROTO, ETH_P_8021Q);
+
+ return nfp_net_reconfig_mbox(nn, NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_ADD);
+}
+
+static int
+nfp_net_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
+{
+ struct nfp_net *nn = netdev_priv(netdev);
+
+ /* Priority tagged packets with vlan id 0 are processed by the
+ * NFP as untagged packets
+ */
+ if (!vid)
+ return 0;
+
+ nn_writew(nn, NFP_NET_CFG_VLAN_FILTER_VID, vid);
+ nn_writew(nn, NFP_NET_CFG_VLAN_FILTER_PROTO, ETH_P_8021Q);
+
+ return nfp_net_reconfig_mbox(nn, NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL);
+}
+
static void nfp_net_stat64(struct net_device *netdev,
struct rtnl_link_stats64 *stats)
{
@@ -2993,18 +3097,6 @@ static void nfp_net_stat64(struct net_device *netdev,
}
}
-static int
-nfp_net_setup_tc(struct net_device *netdev, u32 handle, u32 chain_index,
- __be16 proto, struct tc_to_netdev *tc)
-{
- struct nfp_net *nn = netdev_priv(netdev);
-
- if (chain_index)
- return -EOPNOTSUPP;
-
- return nfp_app_setup_tc(nn->app, netdev, handle, proto, tc);
-}
-
static int nfp_net_set_features(struct net_device *netdev,
netdev_features_t features)
{
@@ -3053,6 +3145,13 @@ static int nfp_net_set_features(struct net_device *netdev,
new_ctrl &= ~NFP_NET_CFG_CTRL_TXVLAN;
}
+ if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
+ if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
+ new_ctrl |= NFP_NET_CFG_CTRL_CTAG_FILTER;
+ else
+ new_ctrl &= ~NFP_NET_CFG_CTRL_CTAG_FILTER;
+ }
+
if (changed & NETIF_F_SG) {
if (features & NETIF_F_SG)
new_ctrl |= NFP_NET_CFG_CTRL_GATHER;
@@ -3209,19 +3308,14 @@ static void nfp_net_del_vxlan_port(struct net_device *netdev,
nfp_net_set_vxlan_port(nn, idx, 0);
}
-static int nfp_net_xdp_setup(struct nfp_net *nn, struct netdev_xdp *xdp)
+static int
+nfp_net_xdp_setup_drv(struct nfp_net *nn, struct bpf_prog *prog,
+ struct netlink_ext_ack *extack)
{
- struct bpf_prog *old_prog = nn->dp.xdp_prog;
- struct bpf_prog *prog = xdp->prog;
struct nfp_net_dp *dp;
- int err;
- if (!prog && !nn->dp.xdp_prog)
- return 0;
- if (prog && nn->dp.xdp_prog) {
- prog = xchg(&nn->dp.xdp_prog, prog);
- bpf_prog_put(prog);
- nfp_app_xdp_offload(nn->app, nn, nn->dp.xdp_prog);
+ if (!prog == !nn->dp.xdp_prog) {
+ WRITE_ONCE(nn->dp.xdp_prog, prog);
return 0;
}
@@ -3235,14 +3329,37 @@ static int nfp_net_xdp_setup(struct nfp_net *nn, struct netdev_xdp *xdp)
dp->rx_dma_off = prog ? XDP_PACKET_HEADROOM - nn->dp.rx_offset : 0;
/* We need RX reconfig to remap the buffers (BIDIR vs FROM_DEV) */
- err = nfp_net_ring_reconfig(nn, dp, xdp->extack);
+ return nfp_net_ring_reconfig(nn, dp, extack);
+}
+
+static int
+nfp_net_xdp_setup(struct nfp_net *nn, struct bpf_prog *prog, u32 flags,
+ struct netlink_ext_ack *extack)
+{
+ struct bpf_prog *drv_prog, *offload_prog;
+ int err;
+
+ if (nn->xdp_prog && (flags ^ nn->xdp_flags) & XDP_FLAGS_MODES)
+ return -EBUSY;
+
+ /* Load both when no flags set to allow easy activation of driver path
+ * when program is replaced by one which can't be offloaded.
+ */
+ drv_prog = flags & XDP_FLAGS_HW_MODE ? NULL : prog;
+ offload_prog = flags & XDP_FLAGS_DRV_MODE ? NULL : prog;
+
+ err = nfp_net_xdp_setup_drv(nn, drv_prog, extack);
if (err)
return err;
- if (old_prog)
- bpf_prog_put(old_prog);
+ err = nfp_app_xdp_offload(nn->app, nn, offload_prog);
+ if (err && flags & XDP_FLAGS_HW_MODE)
+ return err;
- nfp_app_xdp_offload(nn->app, nn, nn->dp.xdp_prog);
+ if (nn->xdp_prog)
+ bpf_prog_put(nn->xdp_prog);
+ nn->xdp_prog = prog;
+ nn->xdp_flags = flags;
return 0;
}
@@ -3253,10 +3370,14 @@ static int nfp_net_xdp(struct net_device *netdev, struct netdev_xdp *xdp)
switch (xdp->command) {
case XDP_SETUP_PROG:
- return nfp_net_xdp_setup(nn, xdp);
+ case XDP_SETUP_PROG_HW:
+ return nfp_net_xdp_setup(nn, xdp->prog, xdp->flags,
+ xdp->extack);
case XDP_QUERY_PROG:
- xdp->prog_attached = !!nn->dp.xdp_prog;
- xdp->prog_id = nn->dp.xdp_prog ? nn->dp.xdp_prog->aux->id : 0;
+ xdp->prog_attached = !!nn->xdp_prog;
+ if (nn->dp.bpf_offload_xdp)
+ xdp->prog_attached = XDP_ATTACHED_HW;
+ xdp->prog_id = nn->xdp_prog ? nn->xdp_prog->aux->id : 0;
return 0;
default:
return -EINVAL;
@@ -3289,7 +3410,9 @@ const struct net_device_ops nfp_net_netdev_ops = {
.ndo_stop = nfp_net_netdev_close,
.ndo_start_xmit = nfp_net_tx,
.ndo_get_stats64 = nfp_net_stat64,
- .ndo_setup_tc = nfp_net_setup_tc,
+ .ndo_vlan_rx_add_vid = nfp_net_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = nfp_net_vlan_rx_kill_vid,
+ .ndo_setup_tc = nfp_port_setup_tc,
.ndo_tx_timeout = nfp_net_tx_timeout,
.ndo_set_rx_mode = nfp_net_set_rx_mode,
.ndo_change_mtu = nfp_net_change_mtu,
@@ -3316,7 +3439,7 @@ void nfp_net_info(struct nfp_net *nn)
nn->fw_ver.resv, nn->fw_ver.class,
nn->fw_ver.major, nn->fw_ver.minor,
nn->max_mtu);
- nn_info(nn, "CAP: %#x %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
+ nn_info(nn, "CAP: %#x %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
nn->cap,
nn->cap & NFP_NET_CFG_CTRL_PROMISC ? "PROMISC " : "",
nn->cap & NFP_NET_CFG_CTRL_L2BC ? "L2BCFILT " : "",
@@ -3331,6 +3454,7 @@ void nfp_net_info(struct nfp_net *nn)
nn->cap & NFP_NET_CFG_CTRL_LSO2 ? "TSO2 " : "",
nn->cap & NFP_NET_CFG_CTRL_RSS ? "RSS1 " : "",
nn->cap & NFP_NET_CFG_CTRL_RSS2 ? "RSS2 " : "",
+ nn->cap & NFP_NET_CFG_CTRL_CTAG_FILTER ? "CTAG_FILTER " : "",
nn->cap & NFP_NET_CFG_CTRL_L2SWITCH ? "L2SWITCH " : "",
nn->cap & NFP_NET_CFG_CTRL_MSIXAUTO ? "AUTOMASK " : "",
nn->cap & NFP_NET_CFG_CTRL_IRQMOD ? "IRQMOD " : "",
@@ -3411,6 +3535,9 @@ struct nfp_net *nfp_net_alloc(struct pci_dev *pdev, bool needs_netdev,
*/
void nfp_net_free(struct nfp_net *nn)
{
+ if (nn->xdp_prog)
+ bpf_prog_put(nn->xdp_prog);
+
if (nn->dp.netdev)
free_netdev(nn->dp.netdev);
else
@@ -3547,6 +3674,10 @@ static void nfp_net_netdev_init(struct nfp_net *nn)
nn->dp.ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
}
}
+ if (nn->cap & NFP_NET_CFG_CTRL_CTAG_FILTER) {
+ netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ nn->dp.ctrl |= NFP_NET_CFG_CTRL_CTAG_FILTER;
+ }
netdev->features = netdev->hw_features;
@@ -3561,6 +3692,8 @@ static void nfp_net_netdev_init(struct nfp_net *nn)
netdev->netdev_ops = &nfp_net_netdev_ops;
netdev->watchdog_timeo = msecs_to_jiffies(5 * 1000);
+ SWITCHDEV_SET_OPS(netdev, &nfp_port_switchdev_ops);
+
/* MTU range: 68 - hw-specific max */
netdev->min_mtu = ETH_MIN_MTU;
netdev->max_mtu = nn->max_mtu;
@@ -3664,7 +3797,4 @@ void nfp_net_clean(struct nfp_net *nn)
return;
unregister_netdev(nn->dp.netdev);
-
- if (nn->dp.xdp_prog)
- bpf_prog_put(nn->dp.xdp_prog);
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
index 48a8bf97645e..e5e94e0746ec 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
@@ -124,6 +124,7 @@
#define NFP_NET_CFG_CTRL_SCATTER (0x1 << 8) /* Scatter DMA */
#define NFP_NET_CFG_CTRL_GATHER (0x1 << 9) /* Gather DMA */
#define NFP_NET_CFG_CTRL_LSO (0x1 << 10) /* LSO/TSO (version 1) */
+#define NFP_NET_CFG_CTRL_CTAG_FILTER (0x1 << 11) /* VLAN CTAG filtering */
#define NFP_NET_CFG_CTRL_RINGCFG (0x1 << 16) /* Ring runtime changes */
#define NFP_NET_CFG_CTRL_RSS (0x1 << 17) /* RSS (version 1) */
#define NFP_NET_CFG_CTRL_IRQMOD (0x1 << 18) /* Interrupt moderation */
@@ -162,6 +163,7 @@
#define NFP_NET_CFG_UPDATE_VXLAN (0x1 << 9) /* VXLAN port change */
#define NFP_NET_CFG_UPDATE_BPF (0x1 << 10) /* BPF program load */
#define NFP_NET_CFG_UPDATE_MACADDR (0x1 << 11) /* MAC address change */
+#define NFP_NET_CFG_UPDATE_MBOX (0x1 << 12) /* Mailbox update */
#define NFP_NET_CFG_UPDATE_ERR (0x1 << 31) /* A error occurred */
#define NFP_NET_CFG_TXRS_ENABLE 0x0008
#define NFP_NET_CFG_RXRS_ENABLE 0x0010
@@ -400,4 +402,29 @@
#define NFP_NET_CFG_RXR_STATS(_x) (NFP_NET_CFG_RXR_STATS_BASE + \
((_x) * 0x10))
+/**
+ * General use mailbox area (0x1800 - 0x19ff)
+ * 4B used for update command and 4B return code
+ * followed by a max of 504B of variable length value
+ */
+#define NFP_NET_CFG_MBOX_CMD 0x1800
+#define NFP_NET_CFG_MBOX_RET 0x1804
+#define NFP_NET_CFG_MBOX_VAL 0x1808
+#define NFP_NET_CFG_MBOX_VAL_MAX_SZ 0x1F8
+
+#define NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_ADD 1
+#define NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL 2
+
+/**
+ * VLAN filtering using general use mailbox
+ * @NFP_NET_CFG_VLAN_FILTER: Base address of VLAN filter mailbox
+ * @NFP_NET_CFG_VLAN_FILTER_VID: VLAN ID to filter
+ * @NFP_NET_CFG_VLAN_FILTER_PROTO: VLAN proto to filter
+ * @NFP_NET_CFG_VXLAN_SZ: Size of the VLAN filter mailbox in bytes
+ */
+#define NFP_NET_CFG_VLAN_FILTER NFP_NET_CFG_MBOX_VAL
+#define NFP_NET_CFG_VLAN_FILTER_VID NFP_NET_CFG_VLAN_FILTER
+#define NFP_NET_CFG_VLAN_FILTER_PROTO (NFP_NET_CFG_VLAN_FILTER + 2)
+#define NFP_NET_CFG_VLAN_FILTER_SZ 0x0004
+
#endif /* _NFP_NET_CTRL_H_ */
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
index bc2bc0886176..c85a2f18c4df 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
@@ -81,79 +81,26 @@ static int nfp_is_ready(struct nfp_pf *pf)
}
/**
- * nfp_net_map_area() - Help function to map an area
- * @cpp: NFP CPP handler
- * @name: Name for the area
- * @target: CPP target
- * @addr: CPP address
- * @size: Size of the area
- * @area: Area handle (returned).
- *
- * This function is primarily to simplify the code in the main probe
- * function. To undo the effect of this functions call
- * @nfp_cpp_area_release_free(*area);
- *
- * Return: Pointer to memory mapped area or ERR_PTR
- */
-static u8 __iomem *nfp_net_map_area(struct nfp_cpp *cpp,
- const char *name, int isl, int target,
- unsigned long long addr, unsigned long size,
- struct nfp_cpp_area **area)
-{
- u8 __iomem *res;
- u32 dest;
- int err;
-
- dest = NFP_CPP_ISLAND_ID(target, NFP_CPP_ACTION_RW, 0, isl);
-
- *area = nfp_cpp_area_alloc_with_name(cpp, dest, name, addr, size);
- if (!*area) {
- err = -EIO;
- goto err_area;
- }
-
- err = nfp_cpp_area_acquire(*area);
- if (err < 0)
- goto err_acquire;
-
- res = nfp_cpp_area_iomem(*area);
- if (!res) {
- err = -EIO;
- goto err_map;
- }
-
- return res;
-
-err_map:
- nfp_cpp_area_release(*area);
-err_acquire:
- nfp_cpp_area_free(*area);
-err_area:
- return (u8 __iomem *)ERR_PTR(err);
-}
-
-/**
* nfp_net_get_mac_addr() - Get the MAC address.
* @pf: NFP PF handle
- * @nn: NFP Network structure
+ * @port: NFP port structure
* @id: NFP port id
*
* First try to get the MAC address from NSP ETH table. If that
* fails try HWInfo. As a last resort generate a random address.
*/
void
-nfp_net_get_mac_addr(struct nfp_pf *pf, struct nfp_net *nn, unsigned int id)
+nfp_net_get_mac_addr(struct nfp_pf *pf, struct nfp_port *port, unsigned int id)
{
struct nfp_eth_table_port *eth_port;
- struct nfp_net_dp *dp = &nn->dp;
u8 mac_addr[ETH_ALEN];
const char *mac_str;
char name[32];
- eth_port = __nfp_port_get_eth_port(nn->port);
+ eth_port = __nfp_port_get_eth_port(port);
if (eth_port) {
- ether_addr_copy(dp->netdev->dev_addr, eth_port->mac_addr);
- ether_addr_copy(dp->netdev->perm_addr, eth_port->mac_addr);
+ ether_addr_copy(port->netdev->dev_addr, eth_port->mac_addr);
+ ether_addr_copy(port->netdev->perm_addr, eth_port->mac_addr);
return;
}
@@ -161,22 +108,22 @@ nfp_net_get_mac_addr(struct nfp_pf *pf, struct nfp_net *nn, unsigned int id)
mac_str = nfp_hwinfo_lookup(pf->hwinfo, name);
if (!mac_str) {
- dev_warn(dp->dev, "Can't lookup MAC address. Generate\n");
- eth_hw_addr_random(dp->netdev);
+ nfp_warn(pf->cpp, "Can't lookup MAC address. Generate\n");
+ eth_hw_addr_random(port->netdev);
return;
}
if (sscanf(mac_str, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx",
&mac_addr[0], &mac_addr[1], &mac_addr[2],
&mac_addr[3], &mac_addr[4], &mac_addr[5]) != 6) {
- dev_warn(dp->dev,
- "Can't parse MAC address (%s). Generate.\n", mac_str);
- eth_hw_addr_random(dp->netdev);
+ nfp_warn(pf->cpp, "Can't parse MAC address (%s). Generate.\n",
+ mac_str);
+ eth_hw_addr_random(port->netdev);
return;
}
- ether_addr_copy(dp->netdev->dev_addr, mac_addr);
- ether_addr_copy(dp->netdev->perm_addr, mac_addr);
+ ether_addr_copy(port->netdev->dev_addr, mac_addr);
+ ether_addr_copy(port->netdev->perm_addr, mac_addr);
}
struct nfp_eth_table_port *
@@ -227,33 +174,12 @@ static u8 __iomem *
nfp_net_pf_map_rtsym(struct nfp_pf *pf, const char *name, const char *sym_fmt,
unsigned int min_size, struct nfp_cpp_area **area)
{
- const struct nfp_rtsym *sym;
char pf_symbol[256];
- u8 __iomem *mem;
snprintf(pf_symbol, sizeof(pf_symbol), sym_fmt,
nfp_cppcore_pcie_unit(pf->cpp));
- sym = nfp_rtsym_lookup(pf->rtbl, pf_symbol);
- if (!sym) {
- nfp_err(pf->cpp, "Failed to find PF symbol %s\n", pf_symbol);
- return (u8 __iomem *)ERR_PTR(-ENOENT);
- }
-
- if (sym->size < min_size) {
- nfp_err(pf->cpp, "PF symbol %s too small\n", pf_symbol);
- return (u8 __iomem *)ERR_PTR(-EINVAL);
- }
-
- mem = nfp_net_map_area(pf->cpp, name, sym->domain, sym->target,
- sym->addr, sym->size, area);
- if (IS_ERR(mem)) {
- nfp_err(pf->cpp, "Failed to map PF symbol %s: %ld\n",
- pf_symbol, PTR_ERR(mem));
- return mem;
- }
-
- return mem;
+ return nfp_rtsym_map(pf->rtbl, pf_symbol, name, min_size, area);
}
static void nfp_net_pf_free_vnic(struct nfp_pf *pf, struct nfp_net *nn)
@@ -486,8 +412,9 @@ nfp_net_pf_app_init(struct nfp_pf *pf, u8 __iomem *qc_bar, unsigned int stride)
NFP_PF_CSR_SLICE_SIZE,
&pf->ctrl_vnic_bar);
if (IS_ERR(ctrl_bar)) {
+ nfp_err(pf->cpp, "Failed to find data vNIC memory symbol\n");
err = PTR_ERR(ctrl_bar);
- goto err_free;
+ goto err_app_clean;
}
pf->ctrl_vnic = nfp_net_pf_alloc_vnic(pf, false, ctrl_bar, qc_bar,
@@ -501,8 +428,11 @@ nfp_net_pf_app_init(struct nfp_pf *pf, u8 __iomem *qc_bar, unsigned int stride)
err_unmap:
nfp_cpp_area_release_free(pf->ctrl_vnic_bar);
+err_app_clean:
+ nfp_app_clean(pf->app);
err_free:
nfp_app_free(pf->app);
+ pf->app = NULL;
return err;
}
@@ -512,6 +442,7 @@ static void nfp_net_pf_app_clean(struct nfp_pf *pf)
nfp_net_pf_free_vnic(pf, pf->ctrl_vnic);
nfp_cpp_area_release_free(pf->ctrl_vnic_bar);
}
+ nfp_app_clean(pf->app);
nfp_app_free(pf->app);
pf->app = NULL;
}
@@ -557,8 +488,16 @@ static int nfp_net_pf_app_start(struct nfp_pf *pf)
if (err)
goto err_ctrl_stop;
+ if (pf->num_vfs) {
+ err = nfp_app_sriov_enable(pf->app, pf->num_vfs);
+ if (err)
+ goto err_app_stop;
+ }
+
return 0;
+err_app_stop:
+ nfp_app_stop(pf->app);
err_ctrl_stop:
nfp_net_pf_app_stop_ctrl(pf);
return err;
@@ -566,10 +505,82 @@ err_ctrl_stop:
static void nfp_net_pf_app_stop(struct nfp_pf *pf)
{
+ if (pf->num_vfs)
+ nfp_app_sriov_disable(pf->app);
nfp_app_stop(pf->app);
nfp_net_pf_app_stop_ctrl(pf);
}
+static void nfp_net_pci_unmap_mem(struct nfp_pf *pf)
+{
+ if (pf->vf_cfg_bar)
+ nfp_cpp_area_release_free(pf->vf_cfg_bar);
+ if (pf->mac_stats_bar)
+ nfp_cpp_area_release_free(pf->mac_stats_bar);
+ nfp_cpp_area_release_free(pf->qc_area);
+ nfp_cpp_area_release_free(pf->data_vnic_bar);
+}
+
+static int nfp_net_pci_map_mem(struct nfp_pf *pf)
+{
+ u8 __iomem *mem;
+ u32 min_size;
+ int err;
+
+ min_size = pf->max_data_vnics * NFP_PF_CSR_SLICE_SIZE;
+ mem = nfp_net_pf_map_rtsym(pf, "net.ctrl", "_pf%d_net_bar0",
+ min_size, &pf->data_vnic_bar);
+ if (IS_ERR(mem)) {
+ nfp_err(pf->cpp, "Failed to find data vNIC memory symbol\n");
+ return PTR_ERR(mem);
+ }
+
+ min_size = NFP_MAC_STATS_SIZE * (pf->eth_tbl->max_index + 1);
+ pf->mac_stats_mem = nfp_rtsym_map(pf->rtbl, "_mac_stats",
+ "net.macstats", min_size,
+ &pf->mac_stats_bar);
+ if (IS_ERR(pf->mac_stats_mem)) {
+ if (PTR_ERR(pf->mac_stats_mem) != -ENOENT) {
+ err = PTR_ERR(pf->mac_stats_mem);
+ goto err_unmap_ctrl;
+ }
+ pf->mac_stats_mem = NULL;
+ }
+
+ pf->vf_cfg_mem = nfp_net_pf_map_rtsym(pf, "net.vfcfg",
+ "_pf%d_net_vf_bar",
+ NFP_NET_CFG_BAR_SZ *
+ pf->limit_vfs, &pf->vf_cfg_bar);
+ if (IS_ERR(pf->vf_cfg_mem)) {
+ if (PTR_ERR(pf->vf_cfg_mem) != -ENOENT) {
+ err = PTR_ERR(pf->vf_cfg_mem);
+ goto err_unmap_mac_stats;
+ }
+ pf->vf_cfg_mem = NULL;
+ }
+
+ mem = nfp_cpp_map_area(pf->cpp, "net.qc", 0, 0,
+ NFP_PCIE_QUEUE(0), NFP_QCP_QUEUE_AREA_SZ,
+ &pf->qc_area);
+ if (IS_ERR(mem)) {
+ nfp_err(pf->cpp, "Failed to map Queue Controller area.\n");
+ err = PTR_ERR(mem);
+ goto err_unmap_vf_cfg;
+ }
+
+ return 0;
+
+err_unmap_vf_cfg:
+ if (pf->vf_cfg_bar)
+ nfp_cpp_area_release_free(pf->vf_cfg_bar);
+err_unmap_mac_stats:
+ if (pf->mac_stats_bar)
+ nfp_cpp_area_release_free(pf->mac_stats_bar);
+err_unmap_ctrl:
+ nfp_cpp_area_release_free(pf->data_vnic_bar);
+ return err;
+}
+
static void nfp_net_pci_remove_finish(struct nfp_pf *pf)
{
nfp_net_pf_app_stop(pf);
@@ -577,11 +588,8 @@ static void nfp_net_pci_remove_finish(struct nfp_pf *pf)
nfp_net_debugfs_dir_clean(&pf->ddir);
nfp_net_pf_free_irqs(pf);
-
nfp_net_pf_app_clean(pf);
-
- nfp_cpp_area_release_free(pf->qc_area);
- nfp_cpp_area_release_free(pf->data_vnic_bar);
+ nfp_net_pci_unmap_mem(pf);
}
static int
@@ -674,7 +682,7 @@ void nfp_net_refresh_port_table(struct nfp_port *port)
set_bit(NFP_PORT_CHANGED, &port->flags);
- schedule_work(&pf->port_refresh_work);
+ queue_work(pf->wq, &pf->port_refresh_work);
}
int nfp_net_refresh_eth_port(struct nfp_port *port)
@@ -706,7 +714,6 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
{
struct nfp_net_fw_version fw_ver;
u8 __iomem *ctrl_bar, *qc_bar;
- u32 ctrl_bar_sz;
int stride;
int err;
@@ -718,6 +725,12 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
return -EINVAL;
}
+ if (!pf->rtbl) {
+ nfp_err(pf->cpp, "No %s, giving up.\n",
+ pf->fw_loaded ? "symbol table" : "firmware found");
+ return -EPROBE_DEFER;
+ }
+
mutex_lock(&pf->lock);
pf->max_data_vnics = nfp_net_pf_get_num_ports(pf);
if ((int)pf->max_data_vnics < 0) {
@@ -725,14 +738,15 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
goto err_unlock;
}
- ctrl_bar_sz = pf->max_data_vnics * NFP_PF_CSR_SLICE_SIZE;
- ctrl_bar = nfp_net_pf_map_rtsym(pf, "net.ctrl", "_pf%d_net_bar0",
- ctrl_bar_sz, &pf->data_vnic_bar);
- if (IS_ERR(ctrl_bar)) {
- err = PTR_ERR(ctrl_bar);
- if (!pf->fw_loaded && err == -ENOENT)
- err = -EPROBE_DEFER;
+ err = nfp_net_pci_map_mem(pf);
+ if (err)
goto err_unlock;
+
+ ctrl_bar = nfp_cpp_area_iomem(pf->data_vnic_bar);
+ qc_bar = nfp_cpp_area_iomem(pf->qc_area);
+ if (!ctrl_bar || !qc_bar) {
+ err = -EIO;
+ goto err_unmap;
}
nfp_net_get_fw_version(&fw_ver, ctrl_bar);
@@ -740,7 +754,7 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
nfp_err(pf->cpp, "Unknown Firmware ABI %d.%d.%d.%d\n",
fw_ver.resv, fw_ver.class, fw_ver.major, fw_ver.minor);
err = -EINVAL;
- goto err_ctrl_unmap;
+ goto err_unmap;
}
/* Determine stride */
@@ -757,23 +771,13 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
fw_ver.resv, fw_ver.class,
fw_ver.major, fw_ver.minor);
err = -EINVAL;
- goto err_ctrl_unmap;
+ goto err_unmap;
}
}
- /* Map queues */
- qc_bar = nfp_net_map_area(pf->cpp, "net.qc", 0, 0,
- NFP_PCIE_QUEUE(0), NFP_QCP_QUEUE_AREA_SZ,
- &pf->qc_area);
- if (IS_ERR(qc_bar)) {
- nfp_err(pf->cpp, "Failed to map Queue Controller area.\n");
- err = PTR_ERR(qc_bar);
- goto err_ctrl_unmap;
- }
-
err = nfp_net_pf_app_init(pf, qc_bar, stride);
if (err)
- goto err_unmap_qc;
+ goto err_unmap;
pf->ddir = nfp_net_debugfs_device_add(pf->pdev);
@@ -807,10 +811,8 @@ err_free_vnics:
err_clean_ddir:
nfp_net_debugfs_dir_clean(&pf->ddir);
nfp_net_pf_app_clean(pf);
-err_unmap_qc:
- nfp_cpp_area_release_free(pf->qc_area);
-err_ctrl_unmap:
- nfp_cpp_area_release_free(pf->data_vnic_bar);
+err_unmap:
+ nfp_net_pci_unmap_mem(pf);
err_unlock:
mutex_unlock(&pf->lock);
cancel_work_sync(&pf->port_refresh_work);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
new file mode 100644
index 000000000000..8ec5474f4b18
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
@@ -0,0 +1,396 @@
+/*
+ * Copyright (C) 2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/io-64-nonatomic-hi-lo.h>
+#include <linux/lockdep.h>
+#include <net/dst_metadata.h>
+#include <net/switchdev.h>
+
+#include "nfpcore/nfp_cpp.h"
+#include "nfpcore/nfp_nsp.h"
+#include "nfp_app.h"
+#include "nfp_main.h"
+#include "nfp_net_ctrl.h"
+#include "nfp_net_repr.h"
+#include "nfp_port.h"
+
+static void
+nfp_repr_inc_tx_stats(struct net_device *netdev, unsigned int len,
+ int tx_status)
+{
+ struct nfp_repr *repr = netdev_priv(netdev);
+ struct nfp_repr_pcpu_stats *stats;
+
+ if (unlikely(tx_status != NET_XMIT_SUCCESS &&
+ tx_status != NET_XMIT_CN)) {
+ this_cpu_inc(repr->stats->tx_drops);
+ return;
+ }
+
+ stats = this_cpu_ptr(repr->stats);
+ u64_stats_update_begin(&stats->syncp);
+ stats->tx_packets++;
+ stats->tx_bytes += len;
+ u64_stats_update_end(&stats->syncp);
+}
+
+void nfp_repr_inc_rx_stats(struct net_device *netdev, unsigned int len)
+{
+ struct nfp_repr *repr = netdev_priv(netdev);
+ struct nfp_repr_pcpu_stats *stats;
+
+ stats = this_cpu_ptr(repr->stats);
+ u64_stats_update_begin(&stats->syncp);
+ stats->rx_packets++;
+ stats->rx_bytes += len;
+ u64_stats_update_end(&stats->syncp);
+}
+
+static void
+nfp_repr_phy_port_get_stats64(const struct nfp_app *app, u8 phy_port,
+ struct rtnl_link_stats64 *stats)
+{
+ u8 __iomem *mem;
+
+ mem = app->pf->mac_stats_mem + phy_port * NFP_MAC_STATS_SIZE;
+
+ /* TX and RX stats are flipped as we are returning the stats as seen
+ * at the switch port corresponding to the phys port.
+ */
+ stats->tx_packets = readq(mem + NFP_MAC_STATS_RX_FRAMES_RECEIVED_OK);
+ stats->tx_bytes = readq(mem + NFP_MAC_STATS_RX_IN_OCTETS);
+ stats->tx_dropped = readq(mem + NFP_MAC_STATS_RX_IN_ERRORS);
+
+ stats->rx_packets = readq(mem + NFP_MAC_STATS_TX_FRAMES_TRANSMITTED_OK);
+ stats->rx_bytes = readq(mem + NFP_MAC_STATS_TX_OUT_OCTETS);
+ stats->rx_dropped = readq(mem + NFP_MAC_STATS_TX_OUT_ERRORS);
+}
+
+static void
+nfp_repr_vf_get_stats64(const struct nfp_app *app, u8 vf,
+ struct rtnl_link_stats64 *stats)
+{
+ u8 __iomem *mem;
+
+ mem = app->pf->vf_cfg_mem + vf * NFP_NET_CFG_BAR_SZ;
+
+ /* TX and RX stats are flipped as we are returning the stats as seen
+ * at the switch port corresponding to the VF.
+ */
+ stats->tx_packets = readq(mem + NFP_NET_CFG_STATS_RX_FRAMES);
+ stats->tx_bytes = readq(mem + NFP_NET_CFG_STATS_RX_OCTETS);
+ stats->tx_dropped = readq(mem + NFP_NET_CFG_STATS_RX_DISCARDS);
+
+ stats->rx_packets = readq(mem + NFP_NET_CFG_STATS_TX_FRAMES);
+ stats->rx_bytes = readq(mem + NFP_NET_CFG_STATS_TX_OCTETS);
+ stats->rx_dropped = readq(mem + NFP_NET_CFG_STATS_TX_DISCARDS);
+}
+
+static void
+nfp_repr_pf_get_stats64(const struct nfp_app *app, u8 pf,
+ struct rtnl_link_stats64 *stats)
+{
+ u8 __iomem *mem;
+
+ if (pf)
+ return;
+
+ mem = nfp_cpp_area_iomem(app->pf->data_vnic_bar);
+
+ stats->tx_packets = readq(mem + NFP_NET_CFG_STATS_RX_FRAMES);
+ stats->tx_bytes = readq(mem + NFP_NET_CFG_STATS_RX_OCTETS);
+ stats->tx_dropped = readq(mem + NFP_NET_CFG_STATS_RX_DISCARDS);
+
+ stats->rx_packets = readq(mem + NFP_NET_CFG_STATS_TX_FRAMES);
+ stats->rx_bytes = readq(mem + NFP_NET_CFG_STATS_TX_OCTETS);
+ stats->rx_dropped = readq(mem + NFP_NET_CFG_STATS_TX_DISCARDS);
+}
+
+static void
+nfp_repr_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
+{
+ struct nfp_repr *repr = netdev_priv(netdev);
+ struct nfp_eth_table_port *eth_port;
+ struct nfp_app *app = repr->app;
+
+ if (WARN_ON(!repr->port))
+ return;
+
+ switch (repr->port->type) {
+ case NFP_PORT_PHYS_PORT:
+ eth_port = __nfp_port_get_eth_port(repr->port);
+ if (!eth_port)
+ break;
+ nfp_repr_phy_port_get_stats64(app, eth_port->index, stats);
+ break;
+ case NFP_PORT_PF_PORT:
+ nfp_repr_pf_get_stats64(app, repr->port->pf_id, stats);
+ break;
+ case NFP_PORT_VF_PORT:
+ nfp_repr_vf_get_stats64(app, repr->port->vf_id, stats);
+ default:
+ break;
+ }
+}
+
+static bool
+nfp_repr_has_offload_stats(const struct net_device *dev, int attr_id)
+{
+ switch (attr_id) {
+ case IFLA_OFFLOAD_XSTATS_CPU_HIT:
+ return true;
+ }
+
+ return false;
+}
+
+static int
+nfp_repr_get_host_stats64(const struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct nfp_repr *repr = netdev_priv(netdev);
+ int i;
+
+ for_each_possible_cpu(i) {
+ u64 tbytes, tpkts, tdrops, rbytes, rpkts;
+ struct nfp_repr_pcpu_stats *repr_stats;
+ unsigned int start;
+
+ repr_stats = per_cpu_ptr(repr->stats, i);
+ do {
+ start = u64_stats_fetch_begin_irq(&repr_stats->syncp);
+ tbytes = repr_stats->tx_bytes;
+ tpkts = repr_stats->tx_packets;
+ tdrops = repr_stats->tx_drops;
+ rbytes = repr_stats->rx_bytes;
+ rpkts = repr_stats->rx_packets;
+ } while (u64_stats_fetch_retry_irq(&repr_stats->syncp, start));
+
+ stats->tx_bytes += tbytes;
+ stats->tx_packets += tpkts;
+ stats->tx_dropped += tdrops;
+ stats->rx_bytes += rbytes;
+ stats->rx_packets += rpkts;
+ }
+
+ return 0;
+}
+
+static int
+nfp_repr_get_offload_stats(int attr_id, const struct net_device *dev,
+ void *stats)
+{
+ switch (attr_id) {
+ case IFLA_OFFLOAD_XSTATS_CPU_HIT:
+ return nfp_repr_get_host_stats64(dev, stats);
+ }
+
+ return -EINVAL;
+}
+
+static netdev_tx_t nfp_repr_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct nfp_repr *repr = netdev_priv(netdev);
+ unsigned int len = skb->len;
+ int ret;
+
+ skb_dst_drop(skb);
+ dst_hold((struct dst_entry *)repr->dst);
+ skb_dst_set(skb, (struct dst_entry *)repr->dst);
+ skb->dev = repr->dst->u.port_info.lower_dev;
+
+ ret = dev_queue_xmit(skb);
+ nfp_repr_inc_tx_stats(netdev, len, ret);
+
+ return ret;
+}
+
+static int nfp_repr_stop(struct net_device *netdev)
+{
+ struct nfp_repr *repr = netdev_priv(netdev);
+
+ return nfp_app_repr_stop(repr->app, repr);
+}
+
+static int nfp_repr_open(struct net_device *netdev)
+{
+ struct nfp_repr *repr = netdev_priv(netdev);
+
+ return nfp_app_repr_open(repr->app, repr);
+}
+
+const struct net_device_ops nfp_repr_netdev_ops = {
+ .ndo_open = nfp_repr_open,
+ .ndo_stop = nfp_repr_stop,
+ .ndo_start_xmit = nfp_repr_xmit,
+ .ndo_get_stats64 = nfp_repr_get_stats64,
+ .ndo_has_offload_stats = nfp_repr_has_offload_stats,
+ .ndo_get_offload_stats = nfp_repr_get_offload_stats,
+ .ndo_get_phys_port_name = nfp_port_get_phys_port_name,
+ .ndo_setup_tc = nfp_port_setup_tc,
+};
+
+static void nfp_repr_clean(struct nfp_repr *repr)
+{
+ unregister_netdev(repr->netdev);
+ dst_release((struct dst_entry *)repr->dst);
+ nfp_port_free(repr->port);
+}
+
+static struct lock_class_key nfp_repr_netdev_xmit_lock_key;
+static struct lock_class_key nfp_repr_netdev_addr_lock_key;
+
+static void nfp_repr_set_lockdep_class_one(struct net_device *dev,
+ struct netdev_queue *txq,
+ void *_unused)
+{
+ lockdep_set_class(&txq->_xmit_lock, &nfp_repr_netdev_xmit_lock_key);
+}
+
+static void nfp_repr_set_lockdep_class(struct net_device *dev)
+{
+ lockdep_set_class(&dev->addr_list_lock, &nfp_repr_netdev_addr_lock_key);
+ netdev_for_each_tx_queue(dev, nfp_repr_set_lockdep_class_one, NULL);
+}
+
+int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
+ u32 cmsg_port_id, struct nfp_port *port,
+ struct net_device *pf_netdev)
+{
+ struct nfp_repr *repr = netdev_priv(netdev);
+ int err;
+
+ nfp_repr_set_lockdep_class(netdev);
+
+ repr->port = port;
+ repr->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX, GFP_KERNEL);
+ if (!repr->dst)
+ return -ENOMEM;
+ repr->dst->u.port_info.port_id = cmsg_port_id;
+ repr->dst->u.port_info.lower_dev = pf_netdev;
+
+ netdev->netdev_ops = &nfp_repr_netdev_ops;
+ SWITCHDEV_SET_OPS(netdev, &nfp_port_switchdev_ops);
+
+ if (nfp_app_has_tc(app)) {
+ netdev->features |= NETIF_F_HW_TC;
+ netdev->hw_features |= NETIF_F_HW_TC;
+ }
+
+ err = register_netdev(netdev);
+ if (err)
+ goto err_clean;
+
+ return 0;
+
+err_clean:
+ dst_release((struct dst_entry *)repr->dst);
+ return err;
+}
+
+static void nfp_repr_free(struct nfp_repr *repr)
+{
+ free_percpu(repr->stats);
+ free_netdev(repr->netdev);
+}
+
+struct net_device *nfp_repr_alloc(struct nfp_app *app)
+{
+ struct net_device *netdev;
+ struct nfp_repr *repr;
+
+ netdev = alloc_etherdev(sizeof(*repr));
+ if (!netdev)
+ return NULL;
+
+ repr = netdev_priv(netdev);
+ repr->netdev = netdev;
+ repr->app = app;
+
+ repr->stats = netdev_alloc_pcpu_stats(struct nfp_repr_pcpu_stats);
+ if (!repr->stats)
+ goto err_free_netdev;
+
+ return netdev;
+
+err_free_netdev:
+ free_netdev(netdev);
+ return NULL;
+}
+
+static void nfp_repr_clean_and_free(struct nfp_repr *repr)
+{
+ nfp_info(repr->app->cpp, "Destroying Representor(%s)\n",
+ repr->netdev->name);
+ nfp_repr_clean(repr);
+ nfp_repr_free(repr);
+}
+
+void nfp_reprs_clean_and_free(struct nfp_reprs *reprs)
+{
+ unsigned int i;
+
+ for (i = 0; i < reprs->num_reprs; i++)
+ if (reprs->reprs[i])
+ nfp_repr_clean_and_free(netdev_priv(reprs->reprs[i]));
+
+ kfree(reprs);
+}
+
+void
+nfp_reprs_clean_and_free_by_type(struct nfp_app *app,
+ enum nfp_repr_type type)
+{
+ struct nfp_reprs *reprs;
+
+ reprs = nfp_app_reprs_set(app, type, NULL);
+ if (!reprs)
+ return;
+
+ synchronize_rcu();
+ nfp_reprs_clean_and_free(reprs);
+}
+
+struct nfp_reprs *nfp_reprs_alloc(unsigned int num_reprs)
+{
+ struct nfp_reprs *reprs;
+
+ reprs = kzalloc(sizeof(*reprs) +
+ num_reprs * sizeof(struct net_device *), GFP_KERNEL);
+ if (!reprs)
+ return NULL;
+ reprs->num_reprs = num_reprs;
+
+ return reprs;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h
new file mode 100644
index 000000000000..32179cad062a
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h
@@ -0,0 +1,128 @@
+/*
+ * Copyright (C) 2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef NFP_NET_REPR_H
+#define NFP_NET_REPR_H
+
+struct metadata_dst;
+struct nfp_net;
+struct nfp_port;
+
+#include <net/dst_metadata.h>
+
+/**
+ * struct nfp_reprs - container for representor netdevs
+ * @num_reprs: Number of elements in reprs array
+ * @reprs: Array of representor netdevs
+ */
+struct nfp_reprs {
+ unsigned int num_reprs;
+ struct net_device *reprs[0];
+};
+
+/**
+ * struct nfp_repr_pcpu_stats
+ * @rx_packets: Received packets
+ * @rx_bytes: Received bytes
+ * @tx_packets: Transmitted packets
+ * @tx_bytes: Transmitted dropped
+ * @tx_drops: Packets dropped on transmit
+ * @syncp: Reference count
+ */
+struct nfp_repr_pcpu_stats {
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 tx_packets;
+ u64 tx_bytes;
+ u64 tx_drops;
+ struct u64_stats_sync syncp;
+};
+
+/**
+ * struct nfp_repr - priv data for representor netdevs
+ * @netdev: Back pointer to netdev
+ * @dst: Destination for packet TX
+ * @port: Port of representor
+ * @app: APP handle
+ * @stats: Statistic of packets hitting CPU
+ */
+struct nfp_repr {
+ struct net_device *netdev;
+ struct metadata_dst *dst;
+ struct nfp_port *port;
+ struct nfp_app *app;
+ struct nfp_repr_pcpu_stats __percpu *stats;
+};
+
+/**
+ * enum nfp_repr_type - type of representor
+ * @NFP_REPR_TYPE_PHYS_PORT: external NIC port
+ * @NFP_REPR_TYPE_PF: physical function
+ * @NFP_REPR_TYPE_VF: virtual function
+ */
+enum nfp_repr_type {
+ NFP_REPR_TYPE_PHYS_PORT,
+ NFP_REPR_TYPE_PF,
+ NFP_REPR_TYPE_VF,
+
+ __NFP_REPR_TYPE_MAX,
+};
+#define NFP_REPR_TYPE_MAX (__NFP_REPR_TYPE_MAX - 1)
+
+extern const struct net_device_ops nfp_repr_netdev_ops;
+
+static inline bool nfp_netdev_is_nfp_repr(struct net_device *netdev)
+{
+ return netdev->netdev_ops == &nfp_repr_netdev_ops;
+}
+
+static inline int nfp_repr_get_port_id(struct net_device *netdev)
+{
+ struct nfp_repr *priv = netdev_priv(netdev);
+
+ return priv->dst->u.port_info.port_id;
+}
+
+void nfp_repr_inc_rx_stats(struct net_device *netdev, unsigned int len);
+int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
+ u32 cmsg_port_id, struct nfp_port *port,
+ struct net_device *pf_netdev);
+struct net_device *nfp_repr_alloc(struct nfp_app *app);
+void
+nfp_reprs_clean_and_free(struct nfp_reprs *reprs);
+void
+nfp_reprs_clean_and_free_by_type(struct nfp_app *app,
+ enum nfp_repr_type type);
+struct nfp_reprs *nfp_reprs_alloc(unsigned int num_reprs);
+
+#endif /* NFP_NET_REPR_H */
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.c b/drivers/net/ethernet/netronome/nfp/nfp_port.c
index a17410ac01ab..776e54dd5dd0 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_port.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_port.c
@@ -32,7 +32,9 @@
*/
#include <linux/lockdep.h>
+#include <net/switchdev.h>
+#include "nfpcore/nfp_cpp.h"
#include "nfpcore/nfp_nsp.h"
#include "nfp_app.h"
#include "nfp_main.h"
@@ -41,13 +43,64 @@
struct nfp_port *nfp_port_from_netdev(struct net_device *netdev)
{
- struct nfp_net *nn;
+ if (nfp_netdev_is_nfp_net(netdev)) {
+ struct nfp_net *nn = netdev_priv(netdev);
- if (WARN_ON(!nfp_netdev_is_nfp_net(netdev)))
- return NULL;
- nn = netdev_priv(netdev);
+ return nn->port;
+ }
+
+ if (nfp_netdev_is_nfp_repr(netdev)) {
+ struct nfp_repr *repr = netdev_priv(netdev);
+
+ return repr->port;
+ }
+
+ WARN(1, "Unknown netdev type for nfp_port\n");
+
+ return NULL;
+}
+
+static int
+nfp_port_attr_get(struct net_device *netdev, struct switchdev_attr *attr)
+{
+ struct nfp_port *port;
+
+ port = nfp_port_from_netdev(netdev);
+ if (!port)
+ return -EOPNOTSUPP;
+
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_ID_PORT_PARENT_ID: {
+ const u8 *serial;
+ /* N.B: attr->u.ppid.id is binary data */
+ attr->u.ppid.id_len = nfp_cpp_serial(port->app->cpp, &serial);
+ memcpy(&attr->u.ppid.id, serial, attr->u.ppid.id_len);
+ break;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
- return nn->port;
+const struct switchdev_ops nfp_port_switchdev_ops = {
+ .switchdev_port_attr_get = nfp_port_attr_get,
+};
+
+int nfp_port_setup_tc(struct net_device *netdev, u32 handle, u32 chain_index,
+ __be16 proto, struct tc_to_netdev *tc)
+{
+ struct nfp_port *port;
+
+ if (chain_index)
+ return -EOPNOTSUPP;
+
+ port = nfp_port_from_netdev(netdev);
+ if (!port)
+ return -EOPNOTSUPP;
+
+ return nfp_app_setup_tc(port->app, netdev, handle, proto, tc);
}
struct nfp_port *
@@ -97,21 +150,61 @@ nfp_port_get_phys_port_name(struct net_device *netdev, char *name, size_t len)
int n;
port = nfp_port_from_netdev(netdev);
- eth_port = __nfp_port_get_eth_port(port);
- if (!eth_port)
+ if (!port)
+ return -EOPNOTSUPP;
+
+ switch (port->type) {
+ case NFP_PORT_PHYS_PORT:
+ eth_port = __nfp_port_get_eth_port(port);
+ if (!eth_port)
+ return -EOPNOTSUPP;
+
+ if (!eth_port->is_split)
+ n = snprintf(name, len, "p%d", eth_port->label_port);
+ else
+ n = snprintf(name, len, "p%ds%d", eth_port->label_port,
+ eth_port->label_subport);
+ break;
+ case NFP_PORT_PF_PORT:
+ n = snprintf(name, len, "pf%d", port->pf_id);
+ break;
+ case NFP_PORT_VF_PORT:
+ n = snprintf(name, len, "pf%dvf%d", port->pf_id, port->vf_id);
+ break;
+ default:
return -EOPNOTSUPP;
+ }
- if (!eth_port->is_split)
- n = snprintf(name, len, "p%d", eth_port->label_port);
- else
- n = snprintf(name, len, "p%ds%d", eth_port->label_port,
- eth_port->label_subport);
if (n >= len)
return -EINVAL;
return 0;
}
+int nfp_port_init_phy_port(struct nfp_pf *pf, struct nfp_app *app,
+ struct nfp_port *port, unsigned int id)
+{
+ port->eth_id = id;
+ port->eth_port = nfp_net_find_port(pf->eth_tbl, id);
+
+ /* Check if vNIC has external port associated and cfg is OK */
+ if (!port->eth_port) {
+ nfp_err(app->cpp,
+ "NSP port entries don't match vNICs (no entry for port #%d)\n",
+ id);
+ return -EINVAL;
+ }
+ if (port->eth_port->override_changed) {
+ nfp_warn(app->cpp,
+ "Config changed for port #%d, reboot required before port will be operational\n",
+ id);
+ port->type = NFP_PORT_INVALID;
+ return 0;
+ }
+
+ return 0;
+}
+
struct nfp_port *
nfp_port_alloc(struct nfp_app *app, enum nfp_port_type type,
struct net_device *netdev)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.h b/drivers/net/ethernet/netronome/nfp/nfp_port.h
index 4d1a9b3fed41..a33d22e18f94 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_port.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_port.h
@@ -36,6 +36,7 @@
#include <net/devlink.h>
+struct tc_to_netdev;
struct net_device;
struct nfp_app;
struct nfp_pf;
@@ -47,10 +48,14 @@ struct nfp_port;
* state when port disappears because of FW fault or config
* change
* @NFP_PORT_PHYS_PORT: external NIC port
+ * @NFP_PORT_PF_PORT: logical port of PCI PF
+ * @NFP_PORT_VF_PORT: logical port of PCI VF
*/
enum nfp_port_type {
NFP_PORT_INVALID,
NFP_PORT_PHYS_PORT,
+ NFP_PORT_PF_PORT,
+ NFP_PORT_VF_PORT,
};
/**
@@ -72,6 +77,8 @@ enum nfp_port_flags {
* @dl_port: devlink port structure
* @eth_id: for %NFP_PORT_PHYS_PORT port ID in NFP enumeration scheme
* @eth_port: for %NFP_PORT_PHYS_PORT translated ETH Table port entry
+ * @pf_id: for %NFP_PORT_PF_PORT, %NFP_PORT_VF_PORT ID of the PCI PF (0-3)
+ * @vf_id: for %NFP_PORT_VF_PORT ID of the PCI VF within @pf_id
* @port_list: entry on pf's list of ports
*/
struct nfp_port {
@@ -84,12 +91,27 @@ struct nfp_port {
struct devlink_port dl_port;
- unsigned int eth_id;
- struct nfp_eth_table_port *eth_port;
+ union {
+ /* NFP_PORT_PHYS_PORT */
+ struct {
+ unsigned int eth_id;
+ struct nfp_eth_table_port *eth_port;
+ };
+ /* NFP_PORT_PF_PORT, NFP_PORT_VF_PORT */
+ struct {
+ unsigned int pf_id;
+ unsigned int vf_id;
+ };
+ };
struct list_head port_list;
};
+extern const struct switchdev_ops nfp_port_switchdev_ops;
+
+int nfp_port_setup_tc(struct net_device *netdev, u32 handle, u32 chain_index,
+ __be16 proto, struct tc_to_netdev *tc);
+
struct nfp_port *nfp_port_from_netdev(struct net_device *netdev);
struct nfp_port *
nfp_port_from_id(struct nfp_pf *pf, enum nfp_port_type type, unsigned int id);
@@ -104,6 +126,9 @@ nfp_port_alloc(struct nfp_app *app, enum nfp_port_type type,
struct net_device *netdev);
void nfp_port_free(struct nfp_port *port);
+int nfp_port_init_phy_port(struct nfp_pf *pf, struct nfp_app *app,
+ struct nfp_port *port, unsigned int id);
+
int nfp_net_refresh_eth_port(struct nfp_port *port);
void nfp_net_refresh_port_table(struct nfp_port *port);
int nfp_net_refresh_port_table_sync(struct nfp_pf *pf);
@@ -111,4 +136,64 @@ int nfp_net_refresh_port_table_sync(struct nfp_pf *pf);
int nfp_devlink_port_register(struct nfp_app *app, struct nfp_port *port);
void nfp_devlink_port_unregister(struct nfp_port *port);
+/**
+ * Mac stats (0x0000 - 0x0200)
+ * all counters are 64bit.
+ */
+#define NFP_MAC_STATS_BASE 0x0000
+#define NFP_MAC_STATS_SIZE 0x0200
+
+#define NFP_MAC_STATS_RX_IN_OCTETS (NFP_MAC_STATS_BASE + 0x000)
+#define NFP_MAC_STATS_RX_FRAME_TOO_LONG_ERRORS (NFP_MAC_STATS_BASE + 0x010)
+#define NFP_MAC_STATS_RX_RANGE_LENGTH_ERRORS (NFP_MAC_STATS_BASE + 0x018)
+#define NFP_MAC_STATS_RX_VLAN_REVEIVE_OK (NFP_MAC_STATS_BASE + 0x020)
+#define NFP_MAC_STATS_RX_IN_ERRORS (NFP_MAC_STATS_BASE + 0x028)
+#define NFP_MAC_STATS_RX_IN_BROADCAST_PKTS (NFP_MAC_STATS_BASE + 0x030)
+#define NFP_MAC_STATS_RX_STATS_DROP_EVENTS (NFP_MAC_STATS_BASE + 0x038)
+#define NFP_MAC_STATS_RX_ALIGNMENT_ERRORS (NFP_MAC_STATS_BASE + 0x040)
+#define NFP_MAC_STATS_RX_PAUSE_MAC_CTRL_FRAMES (NFP_MAC_STATS_BASE + 0x048)
+#define NFP_MAC_STATS_RX_FRAMES_RECEIVED_OK (NFP_MAC_STATS_BASE + 0x050)
+#define NFP_MAC_STATS_RX_FRAME_CHECK_SEQUENCE_ERRORS (NFP_MAC_STATS_BASE + 0x058)
+#define NFP_MAC_STATS_RX_UNICAST_PKTS (NFP_MAC_STATS_BASE + 0x060)
+#define NFP_MAC_STATS_RX_MULTICAST_PKTS (NFP_MAC_STATS_BASE + 0x068)
+#define NFP_MAC_STATS_RX_STATS_PKTS (NFP_MAC_STATS_BASE + 0x070)
+#define NFP_MAC_STATS_RX_STATS_UNDERSIZE_PKTS (NFP_MAC_STATS_BASE + 0x078)
+#define NFP_MAC_STATS_RX_STATS_PKTS_64_OCTETS (NFP_MAC_STATS_BASE + 0x080)
+#define NFP_MAC_STATS_RX_STATS_PKTS_65_TO_127_OCTETS (NFP_MAC_STATS_BASE + 0x088)
+#define NFP_MAC_STATS_RX_STATS_PKTS_512_TO_1023_OCTETS (NFP_MAC_STATS_BASE + 0x090)
+#define NFP_MAC_STATS_RX_STATS_PKTS_1024_TO_1518_OCTETS (NFP_MAC_STATS_BASE + 0x098)
+#define NFP_MAC_STATS_RX_STATS_JABBERS (NFP_MAC_STATS_BASE + 0x0a0)
+#define NFP_MAC_STATS_RX_STATS_FRAGMENTS (NFP_MAC_STATS_BASE + 0x0a8)
+#define NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS2 (NFP_MAC_STATS_BASE + 0x0b0)
+#define NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS3 (NFP_MAC_STATS_BASE + 0x0b8)
+#define NFP_MAC_STATS_RX_STATS_PKTS_128_TO_255_OCTETS (NFP_MAC_STATS_BASE + 0x0c0)
+#define NFP_MAC_STATS_RX_STATS_PKTS_256_TO_511_OCTETS (NFP_MAC_STATS_BASE + 0x0c8)
+#define NFP_MAC_STATS_RX_STATS_PKTS_1519_TO_MAX_OCTETS (NFP_MAC_STATS_BASE + 0x0d0)
+#define NFP_MAC_STATS_RX_OVERSIZE_PKTS (NFP_MAC_STATS_BASE + 0x0d8)
+#define NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS0 (NFP_MAC_STATS_BASE + 0x0e0)
+#define NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS1 (NFP_MAC_STATS_BASE + 0x0e8)
+#define NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS4 (NFP_MAC_STATS_BASE + 0x0f0)
+#define NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS5 (NFP_MAC_STATS_BASE + 0x0f8)
+#define NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS6 (NFP_MAC_STATS_BASE + 0x100)
+#define NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS7 (NFP_MAC_STATS_BASE + 0x108)
+#define NFP_MAC_STATS_RX_MAC_CTRL_FRAMES_RECEIVED (NFP_MAC_STATS_BASE + 0x110)
+#define NFP_MAC_STATS_RX_MAC_HEAD_DROP (NFP_MAC_STATS_BASE + 0x118)
+
+#define NFP_MAC_STATS_TX_QUEUE_DROP (NFP_MAC_STATS_BASE + 0x138)
+#define NFP_MAC_STATS_TX_OUT_OCTETS (NFP_MAC_STATS_BASE + 0x140)
+#define NFP_MAC_STATS_TX_VLAN_TRANSMITTED_OK (NFP_MAC_STATS_BASE + 0x150)
+#define NFP_MAC_STATS_TX_OUT_ERRORS (NFP_MAC_STATS_BASE + 0x158)
+#define NFP_MAC_STATS_TX_BROADCAST_PKTS (NFP_MAC_STATS_BASE + 0x160)
+#define NFP_MAC_STATS_TX_PKTS_64_OCTETS (NFP_MAC_STATS_BASE + 0x168)
+#define NFP_MAC_STATS_TX_PKTS_256_TO_511_OCTETS (NFP_MAC_STATS_BASE + 0x170)
+#define NFP_MAC_STATS_TX_PKTS_512_TO_1023_OCTETS (NFP_MAC_STATS_BASE + 0x178)
+#define NFP_MAC_STATS_TX_PAUSE_MAC_CTRL_FRAMES (NFP_MAC_STATS_BASE + 0x180)
+#define NFP_MAC_STATS_TX_FRAMES_TRANSMITTED_OK (NFP_MAC_STATS_BASE + 0x188)
+#define NFP_MAC_STATS_TX_UNICAST_PKTS (NFP_MAC_STATS_BASE + 0x190)
+#define NFP_MAC_STATS_TX_MULTICAST_PKTS (NFP_MAC_STATS_BASE + 0x198)
+#define NFP_MAC_STATS_TX_PKTS_65_TO_127_OCTETS (NFP_MAC_STATS_BASE + 0x1a0)
+#define NFP_MAC_STATS_TX_PKTS_127_TO_512_OCTETS (NFP_MAC_STATS_BASE + 0x1a8)
+#define NFP_MAC_STATS_TX_PKTS_128_TO_1518_OCTETS (NFP_MAC_STATS_BASE + 0x1b0)
+#define NFP_MAC_STATS_TX_PKTS_1518_TO_MAX_OCTETS (NFP_MAC_STATS_BASE + 0x1b8)
+
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h
index 25a967158ce9..5798adc57cbc 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h
@@ -230,6 +230,9 @@ struct nfp_cpp_area *nfp_cpp_area_alloc_with_name(struct nfp_cpp *cpp,
struct nfp_cpp_area *nfp_cpp_area_alloc(struct nfp_cpp *cpp, u32 cpp_id,
unsigned long long address,
unsigned long size);
+struct nfp_cpp_area *
+nfp_cpp_area_alloc_acquire(struct nfp_cpp *cpp, const char *name, u32 cpp_id,
+ unsigned long long address, unsigned long size);
void nfp_cpp_area_free(struct nfp_cpp_area *area);
int nfp_cpp_area_acquire(struct nfp_cpp_area *area);
int nfp_cpp_area_acquire_nonblocking(struct nfp_cpp_area *area);
@@ -239,8 +242,6 @@ int nfp_cpp_area_read(struct nfp_cpp_area *area, unsigned long offset,
void *buffer, size_t length);
int nfp_cpp_area_write(struct nfp_cpp_area *area, unsigned long offset,
const void *buffer, size_t length);
-int nfp_cpp_area_check_range(struct nfp_cpp_area *area,
- unsigned long long offset, unsigned long size);
const char *nfp_cpp_area_name(struct nfp_cpp_area *cpp_area);
void *nfp_cpp_area_priv(struct nfp_cpp_area *cpp_area);
struct nfp_cpp *nfp_cpp_area_cpp(struct nfp_cpp_area *cpp_area);
@@ -278,6 +279,10 @@ int nfp_cpp_readq(struct nfp_cpp *cpp, u32 cpp_id,
int nfp_cpp_writeq(struct nfp_cpp *cpp, u32 cpp_id,
unsigned long long address, u64 value);
+u8 __iomem *
+nfp_cpp_map_area(struct nfp_cpp *cpp, const char *name, int domain, int target,
+ u64 addr, unsigned long size, struct nfp_cpp_area **area);
+
struct nfp_cpp_mutex;
int nfp_cpp_mutex_init(struct nfp_cpp *cpp, int target,
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
index 9b69dcf87be9..04dd5758ecf5 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
@@ -361,6 +361,41 @@ nfp_cpp_area_alloc(struct nfp_cpp *cpp, u32 dest,
}
/**
+ * nfp_cpp_area_alloc_acquire() - allocate a new CPP area and lock it down
+ * @cpp: CPP handle
+ * @name: Name of region
+ * @dest: CPP id
+ * @address: Start address on CPP target
+ * @size: Size of area
+ *
+ * Allocate and initialize a CPP area structure, and lock it down so
+ * that it can be accessed directly.
+ *
+ * NOTE: @address and @size must be 32-bit aligned values.
+ *
+ * NOTE: The area must also be 'released' when the structure is freed.
+ *
+ * Return: NFP CPP Area handle, or NULL
+ */
+struct nfp_cpp_area *
+nfp_cpp_area_alloc_acquire(struct nfp_cpp *cpp, const char *name, u32 dest,
+ unsigned long long address, unsigned long size)
+{
+ struct nfp_cpp_area *area;
+
+ area = nfp_cpp_area_alloc_with_name(cpp, dest, name, address, size);
+ if (!area)
+ return NULL;
+
+ if (nfp_cpp_area_acquire(area)) {
+ nfp_cpp_area_free(area);
+ return NULL;
+ }
+
+ return area;
+}
+
+/**
* nfp_cpp_area_free() - free up the CPP area
* @area: CPP area handle
*
@@ -536,27 +571,6 @@ int nfp_cpp_area_write(struct nfp_cpp_area *area,
}
/**
- * nfp_cpp_area_check_range() - check if address range fits in CPP area
- * @area: CPP area handle
- * @offset: offset into CPP target
- * @length: size of address range in bytes
- *
- * Check if address range fits within CPP area. Return 0 if area
- * fits or -EFAULT on error.
- *
- * Return: 0, or -ERRNO
- */
-int nfp_cpp_area_check_range(struct nfp_cpp_area *area,
- unsigned long long offset, unsigned long length)
-{
- if (offset < area->offset ||
- offset + length > area->offset + area->size)
- return -EFAULT;
-
- return 0;
-}
-
-/**
* nfp_cpp_area_name() - return name of a CPP area
* @cpp_area: CPP area handle
*
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c
index 0ba0379b8f75..ab86bceb93f2 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c
@@ -279,3 +279,43 @@ exit_release:
return err;
}
+
+/**
+ * nfp_cpp_map_area() - Helper function to map an area
+ * @cpp: NFP CPP handler
+ * @name: Name for the area
+ * @domain: CPP domain
+ * @target: CPP target
+ * @addr: CPP address
+ * @size: Size of the area
+ * @area: Area handle (output)
+ *
+ * Map an area of IOMEM access. To undo the effect of this function call
+ * @nfp_cpp_area_release_free(*area).
+ *
+ * Return: Pointer to memory mapped area or ERR_PTR
+ */
+u8 __iomem *
+nfp_cpp_map_area(struct nfp_cpp *cpp, const char *name, int domain, int target,
+ u64 addr, unsigned long size, struct nfp_cpp_area **area)
+{
+ u8 __iomem *res;
+ u32 dest;
+
+ dest = NFP_CPP_ISLAND_ID(target, NFP_CPP_ACTION_RW, 0, domain);
+
+ *area = nfp_cpp_area_alloc_acquire(cpp, name, dest, addr, size);
+ if (!*area)
+ goto err_eio;
+
+ res = nfp_cpp_area_iomem(*area);
+ if (!res)
+ goto err_release_free;
+
+ return res;
+
+err_release_free:
+ nfp_cpp_area_release_free(*area);
+err_eio:
+ return (u8 __iomem *)ERR_PTR(-EIO);
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h
index d27d29782a12..c9724fb7ea4b 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h
@@ -97,7 +97,11 @@ int nfp_rtsym_count(struct nfp_rtsym_table *rtbl);
const struct nfp_rtsym *nfp_rtsym_get(struct nfp_rtsym_table *rtbl, int idx);
const struct nfp_rtsym *
nfp_rtsym_lookup(struct nfp_rtsym_table *rtbl, const char *name);
+
u64 nfp_rtsym_read_le(struct nfp_rtsym_table *rtbl, const char *name,
int *error);
+u8 __iomem *
+nfp_rtsym_map(struct nfp_rtsym_table *rtbl, const char *name, const char *id,
+ unsigned int min_size, struct nfp_cpp_area **area);
#endif /* NFP_NFFW_H */
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
index 26d7dcea4fd9..e2f028027c6f 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
@@ -76,6 +76,7 @@ enum nfp_eth_aneg {
/**
* struct nfp_eth_table - ETH table information
* @count: number of table entries
+ * @max_index: max of @index fields of all @ports
* @ports: table of ports
*
* @eth_index: port index according to legacy ethX numbering
@@ -101,6 +102,7 @@ enum nfp_eth_aneg {
*/
struct nfp_eth_table {
unsigned int count;
+ unsigned int max_index;
struct nfp_eth_table_port {
unsigned int eth_index;
unsigned int index;
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
index b0f8785c064f..c2bc36e8649f 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
@@ -190,7 +190,9 @@ nfp_eth_calc_port_geometry(struct nfp_cpp *cpp, struct nfp_eth_table *table)
{
unsigned int i, j;
- for (i = 0; i < table->count; i++)
+ for (i = 0; i < table->count; i++) {
+ table->max_index = max(table->max_index, table->ports[i].index);
+
for (j = 0; j < table->count; j++) {
if (table->ports[i].label_port !=
table->ports[j].label_port)
@@ -208,6 +210,7 @@ nfp_eth_calc_port_geometry(struct nfp_cpp *cpp, struct nfp_eth_table *table)
table->ports[i].is_split = true;
}
+ }
}
static void
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c
index 203f9cbae0fb..ecda474ac7c3 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c
@@ -289,3 +289,30 @@ exit:
return ~0ULL;
return val;
}
+
+u8 __iomem *
+nfp_rtsym_map(struct nfp_rtsym_table *rtbl, const char *name, const char *id,
+ unsigned int min_size, struct nfp_cpp_area **area)
+{
+ const struct nfp_rtsym *sym;
+ u8 __iomem *mem;
+
+ sym = nfp_rtsym_lookup(rtbl, name);
+ if (!sym)
+ return (u8 __iomem *)ERR_PTR(-ENOENT);
+
+ if (sym->size < min_size) {
+ nfp_err(rtbl->cpp, "Symbol %s too small\n", name);
+ return (u8 __iomem *)ERR_PTR(-EINVAL);
+ }
+
+ mem = nfp_cpp_map_area(rtbl->cpp, id, sym->domain, sym->target,
+ sym->addr, sym->size, area);
+ if (IS_ERR(mem)) {
+ nfp_err(rtbl->cpp, "Failed to map symbol %s: %ld\n",
+ name, PTR_ERR(mem));
+ return mem;
+ }
+
+ return mem;
+}
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index 828bfd93cb54..08381ef8bdb4 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -919,7 +919,6 @@ static int __lpc_handle_recv(struct net_device *ndev, int budget)
struct sk_buff *skb;
u32 rxconsidx, len, ethst;
struct rx_status_t *prxstat;
- u8 *prdbuf;
int rx_done = 0;
/* Get the current RX buffer indexes */
@@ -960,9 +959,9 @@ static int __lpc_handle_recv(struct net_device *ndev, int budget)
ndev->stats.rx_dropped++;
} else {
/* Copy packet from buffer */
- prdbuf = skb_put_data(skb,
- pldat->rx_buff_v + rxconsidx * ENET_MAXF_SIZE,
- len);
+ skb_put_data(skb,
+ pldat->rx_buff_v + rxconsidx * ENET_MAXF_SIZE,
+ len);
/* Pass to upper layer */
skb->protocol = eth_type_trans(skb, ndev);
diff --git a/drivers/net/ethernet/packetengines/hamachi.c b/drivers/net/ethernet/packetengines/hamachi.c
index 8b026dbf0d8d..482b85e4d665 100644
--- a/drivers/net/ethernet/packetengines/hamachi.c
+++ b/drivers/net/ethernet/packetengines/hamachi.c
@@ -1495,8 +1495,8 @@ static int hamachi_rx(struct net_device *dev)
hmp->rx_skbuff[entry]->data, pkt_len);
skb_put(skb, pkt_len);
#else
- memcpy(skb_put(skb, pkt_len), hmp->rx_ring_dma
- + entry*sizeof(*desc), pkt_len);
+ skb_put_data(skb, hmp->rx_ring_dma
+ + entry*sizeof(*desc), pkt_len);
#endif
pci_dma_sync_single_for_device(hmp->pci_dev,
leXX_to_cpu(hmp->rx_ring[entry].addr),
diff --git a/drivers/net/ethernet/qlogic/qed/Makefile b/drivers/net/ethernet/qlogic/qed/Makefile
index 974929dcc74e..67452380b60e 100644
--- a/drivers/net/ethernet/qlogic/qed/Makefile
+++ b/drivers/net/ethernet/qlogic/qed/Makefile
@@ -5,6 +5,6 @@ qed-y := qed_cxt.o qed_dev.o qed_hw.o qed_init_fw_funcs.o qed_init_ops.o \
qed_selftest.o qed_dcbx.o qed_debug.o qed_ptp.o
qed-$(CONFIG_QED_SRIOV) += qed_sriov.o qed_vf.o
qed-$(CONFIG_QED_LL2) += qed_ll2.o
-qed-$(CONFIG_QED_RDMA) += qed_roce.o
+qed-$(CONFIG_QED_RDMA) += qed_roce.o qed_rdma.o
qed-$(CONFIG_QED_ISCSI) += qed_iscsi.o qed_ooo.o
qed-$(CONFIG_QED_FCOE) += qed_fcoe.o
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
index e2a62c091b80..eaca4578435d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
@@ -44,6 +44,7 @@
#include "qed_hsi.h"
#include "qed_sp.h"
#include "qed_sriov.h"
+#include "qed_rdma.h"
#ifdef CONFIG_DCB
#include <linux/qed/qed_eth_if.h>
#endif
@@ -892,10 +893,33 @@ qed_dcbx_mib_update_event(struct qed_hwfn *p_hwfn,
/* update storm FW with negotiation results */
qed_sp_pf_update(p_hwfn);
+
+ /* for roce PFs, we may want to enable/disable DPM
+ * when DCBx change occurs
+ */
+ if (p_hwfn->hw_info.personality ==
+ QED_PCI_ETH_ROCE)
+ qed_roce_dpm_dcbx(p_hwfn, p_ptt);
}
}
qed_dcbx_get_params(p_hwfn, &p_hwfn->p_dcbx_info->get, type);
+
+ if (type == QED_DCBX_OPERATIONAL_MIB) {
+ struct qed_dcbx_results *p_data;
+ u16 val;
+
+ /* Configure in NIG which protocols support EDPM and should
+ * honor PFC.
+ */
+ p_data = &p_hwfn->p_dcbx_info->results;
+ val = (0x1 << p_data->arr[DCBX_PROTOCOL_ROCE].tc) |
+ (0x1 << p_data->arr[DCBX_PROTOCOL_ROCE_V2].tc);
+ val <<= NIG_REG_TX_EDPM_CTRL_TX_EDPM_TC_EN_SHIFT;
+ val |= NIG_REG_TX_EDPM_CTRL_TX_EDPM_EN;
+ qed_wr(p_hwfn, p_ptt, NIG_REG_TX_EDPM_CTRL, val);
+ }
+
qed_dcbx_aen(p_hwfn, type);
return rc;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 65fe4940f20d..49667ad9042d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -62,7 +62,7 @@
#include "qed_sp.h"
#include "qed_sriov.h"
#include "qed_vf.h"
-#include "qed_roce.h"
+#include "qed_rdma.h"
static DEFINE_SPINLOCK(qm_lock);
@@ -3075,12 +3075,15 @@ static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *p_chain)
}
pbl_size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
- dma_free_coherent(&cdev->pdev->dev,
- pbl_size,
- p_chain->pbl_sp.p_virt_table,
- p_chain->pbl_sp.p_phys_table);
+
+ if (!p_chain->b_external_pbl)
+ dma_free_coherent(&cdev->pdev->dev,
+ pbl_size,
+ p_chain->pbl_sp.p_virt_table,
+ p_chain->pbl_sp.p_phys_table);
out:
vfree(p_chain->pbl.pp_virt_addr_tbl);
+ p_chain->pbl.pp_virt_addr_tbl = NULL;
}
void qed_chain_free(struct qed_dev *cdev, struct qed_chain *p_chain)
@@ -3174,7 +3177,10 @@ qed_chain_alloc_single(struct qed_dev *cdev, struct qed_chain *p_chain)
return 0;
}
-static int qed_chain_alloc_pbl(struct qed_dev *cdev, struct qed_chain *p_chain)
+static int
+qed_chain_alloc_pbl(struct qed_dev *cdev,
+ struct qed_chain *p_chain,
+ struct qed_chain_ext_pbl *ext_pbl)
{
u32 page_cnt = p_chain->page_cnt, size, i;
dma_addr_t p_phys = 0, p_pbl_phys = 0;
@@ -3194,8 +3200,16 @@ static int qed_chain_alloc_pbl(struct qed_dev *cdev, struct qed_chain *p_chain)
* should be saved to allow its freeing during the error flow.
*/
size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
- p_pbl_virt = dma_alloc_coherent(&cdev->pdev->dev,
- size, &p_pbl_phys, GFP_KERNEL);
+
+ if (!ext_pbl) {
+ p_pbl_virt = dma_alloc_coherent(&cdev->pdev->dev,
+ size, &p_pbl_phys, GFP_KERNEL);
+ } else {
+ p_pbl_virt = ext_pbl->p_pbl_virt;
+ p_pbl_phys = ext_pbl->p_pbl_phys;
+ p_chain->b_external_pbl = true;
+ }
+
qed_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys,
pp_virt_addr_tbl);
if (!p_pbl_virt)
@@ -3228,7 +3242,10 @@ int qed_chain_alloc(struct qed_dev *cdev,
enum qed_chain_use_mode intended_use,
enum qed_chain_mode mode,
enum qed_chain_cnt_type cnt_type,
- u32 num_elems, size_t elem_size, struct qed_chain *p_chain)
+ u32 num_elems,
+ size_t elem_size,
+ struct qed_chain *p_chain,
+ struct qed_chain_ext_pbl *ext_pbl)
{
u32 page_cnt;
int rc = 0;
@@ -3259,7 +3276,7 @@ int qed_chain_alloc(struct qed_dev *cdev,
rc = qed_chain_alloc_single(cdev, p_chain);
break;
case QED_CHAIN_MODE_PBL:
- rc = qed_chain_alloc_pbl(cdev, p_chain);
+ rc = qed_chain_alloc_pbl(cdev, p_chain, ext_pbl);
break;
}
if (rc)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
index 12d16c096e36..1f1df1bf127c 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
@@ -307,6 +307,7 @@ int qed_dmae_host2host(struct qed_hwfn *p_hwfn,
* @param num_elems
* @param elem_size
* @param p_chain
+ * @param ext_pbl - a possible external PBL
*
* @return int
*/
@@ -315,7 +316,9 @@ qed_chain_alloc(struct qed_dev *cdev,
enum qed_chain_use_mode intended_use,
enum qed_chain_mode mode,
enum qed_chain_cnt_type cnt_type,
- u32 num_elems, size_t elem_size, struct qed_chain *p_chain);
+ u32 num_elems,
+ size_t elem_size,
+ struct qed_chain *p_chain, struct qed_chain_ext_pbl *ext_pbl);
/**
* @brief qed_chain_free - Free chain DMA memory
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
index 6103723d7118..813c77cc857f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
@@ -62,6 +62,22 @@
#include "qed_sriov.h"
#include "qed_reg_addr.h"
+static int
+qed_iscsi_async_event(struct qed_hwfn *p_hwfn,
+ u8 fw_event_code,
+ u16 echo, union event_ring_data *data, u8 fw_return_code)
+{
+ if (p_hwfn->p_iscsi_info->event_cb) {
+ struct qed_iscsi_info *p_iscsi = p_hwfn->p_iscsi_info;
+
+ return p_iscsi->event_cb(p_iscsi->event_context,
+ fw_event_code, data);
+ } else {
+ DP_NOTICE(p_hwfn, "iSCSI async completion is not set\n");
+ return -EINVAL;
+ }
+}
+
struct qed_iscsi_conn {
struct list_head list_entry;
bool free_on_delete;
@@ -265,6 +281,9 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
p_hwfn->p_iscsi_info->event_context = event_context;
p_hwfn->p_iscsi_info->event_cb = async_event_cb;
+ qed_spq_register_async_cb(p_hwfn, PROTOCOLID_ISCSI,
+ qed_iscsi_async_event);
+
return qed_spq_post(p_hwfn, p_ent, NULL);
}
@@ -631,7 +650,10 @@ static int qed_sp_iscsi_func_stop(struct qed_hwfn *p_hwfn,
p_ramrod = &p_ent->ramrod.iscsi_destroy;
p_ramrod->hdr.op_code = ISCSI_RAMROD_CMD_ID_DESTROY_FUNC;
- return qed_spq_post(p_hwfn, p_ent, NULL);
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+
+ qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_ISCSI);
+ return rc;
}
static void __iomem *qed_iscsi_get_db_addr(struct qed_hwfn *p_hwfn, u32 cid)
@@ -752,7 +774,7 @@ static int qed_iscsi_allocate_connection(struct qed_hwfn *p_hwfn,
QED_CHAIN_USE_TO_CONSUME_PRODUCE,
QED_CHAIN_MODE_PBL,
QED_CHAIN_CNT_TYPE_U16,
- r2tq_num_elements, 0x80, &p_conn->r2tq);
+ r2tq_num_elements, 0x80, &p_conn->r2tq, NULL);
if (rc)
goto nomem_r2tq;
@@ -763,7 +785,7 @@ static int qed_iscsi_allocate_connection(struct qed_hwfn *p_hwfn,
QED_CHAIN_MODE_PBL,
QED_CHAIN_CNT_TYPE_U16,
uhq_num_elements,
- sizeof(struct iscsi_uhqe), &p_conn->uhq);
+ sizeof(struct iscsi_uhqe), &p_conn->uhq, NULL);
if (rc)
goto nomem_uhq;
@@ -773,7 +795,7 @@ static int qed_iscsi_allocate_connection(struct qed_hwfn *p_hwfn,
QED_CHAIN_MODE_PBL,
QED_CHAIN_CNT_TYPE_U16,
xhq_num_elements,
- sizeof(struct iscsi_xhqe), &p_conn->xhq);
+ sizeof(struct iscsi_xhqe), &p_conn->xhq, NULL);
if (rc)
goto nomem;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index 0e26193156e4..17f9b0a7b553 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -61,7 +61,7 @@
#include "qed_ooo.h"
#include "qed_reg_addr.h"
#include "qed_sp.h"
-#include "qed_roce.h"
+#include "qed_rdma.h"
#define QED_LL2_RX_REGISTERED(ll2) ((ll2)->rx_queue.b_cb_registred)
#define QED_LL2_TX_REGISTERED(ll2) ((ll2)->tx_queue.b_cb_registred)
@@ -1056,7 +1056,7 @@ qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn,
QED_CHAIN_CNT_TYPE_U16,
p_ll2_info->input.rx_num_desc,
sizeof(struct core_rx_bd),
- &p_ll2_info->rx_queue.rxq_chain);
+ &p_ll2_info->rx_queue.rxq_chain, NULL);
if (rc) {
DP_NOTICE(p_hwfn, "Failed to allocate ll2 rxq chain\n");
goto out;
@@ -1078,7 +1078,7 @@ qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn,
QED_CHAIN_CNT_TYPE_U16,
p_ll2_info->input.rx_num_desc,
sizeof(struct core_rx_fast_path_cqe),
- &p_ll2_info->rx_queue.rcq_chain);
+ &p_ll2_info->rx_queue.rcq_chain, NULL);
if (rc) {
DP_NOTICE(p_hwfn, "Failed to allocate ll2 rcq chain\n");
goto out;
@@ -1108,7 +1108,7 @@ static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn,
QED_CHAIN_CNT_TYPE_U16,
p_ll2_info->input.tx_num_desc,
sizeof(struct core_tx_bd),
- &p_ll2_info->tx_queue.txq_chain);
+ &p_ll2_info->tx_queue.txq_chain, NULL);
if (rc)
goto out;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
new file mode 100644
index 000000000000..df76e212f86e
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
@@ -0,0 +1,1722 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2017 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include "qed.h"
+#include "qed_cxt.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_init_ops.h"
+#include "qed_int.h"
+#include "qed_ll2.h"
+#include "qed_mcp.h"
+#include "qed_reg_addr.h"
+#include <linux/qed/qed_rdma_if.h>
+#include "qed_rdma.h"
+#include "qed_roce.h"
+#include "qed_sp.h"
+
+
+int qed_rdma_bmap_alloc(struct qed_hwfn *p_hwfn,
+ struct qed_bmap *bmap, u32 max_count, char *name)
+{
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "max_count = %08x\n", max_count);
+
+ bmap->max_count = max_count;
+
+ bmap->bitmap = kcalloc(BITS_TO_LONGS(max_count), sizeof(long),
+ GFP_KERNEL);
+ if (!bmap->bitmap)
+ return -ENOMEM;
+
+ snprintf(bmap->name, QED_RDMA_MAX_BMAP_NAME, "%s", name);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "0\n");
+ return 0;
+}
+
+int qed_rdma_bmap_alloc_id(struct qed_hwfn *p_hwfn,
+ struct qed_bmap *bmap, u32 *id_num)
+{
+ *id_num = find_first_zero_bit(bmap->bitmap, bmap->max_count);
+ if (*id_num >= bmap->max_count)
+ return -EINVAL;
+
+ __set_bit(*id_num, bmap->bitmap);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "%s bitmap: allocated id %d\n",
+ bmap->name, *id_num);
+
+ return 0;
+}
+
+void qed_bmap_set_id(struct qed_hwfn *p_hwfn,
+ struct qed_bmap *bmap, u32 id_num)
+{
+ if (id_num >= bmap->max_count)
+ return;
+
+ __set_bit(id_num, bmap->bitmap);
+}
+
+void qed_bmap_release_id(struct qed_hwfn *p_hwfn,
+ struct qed_bmap *bmap, u32 id_num)
+{
+ bool b_acquired;
+
+ if (id_num >= bmap->max_count)
+ return;
+
+ b_acquired = test_and_clear_bit(id_num, bmap->bitmap);
+ if (!b_acquired) {
+ DP_NOTICE(p_hwfn, "%s bitmap: id %d already released\n",
+ bmap->name, id_num);
+ return;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "%s bitmap: released id %d\n",
+ bmap->name, id_num);
+}
+
+int qed_bmap_test_id(struct qed_hwfn *p_hwfn,
+ struct qed_bmap *bmap, u32 id_num)
+{
+ if (id_num >= bmap->max_count)
+ return -1;
+
+ return test_bit(id_num, bmap->bitmap);
+}
+
+static bool qed_bmap_is_empty(struct qed_bmap *bmap)
+{
+ return bmap->max_count == find_first_bit(bmap->bitmap, bmap->max_count);
+}
+
+u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id)
+{
+ /* First sb id for RoCE is after all the l2 sb */
+ return FEAT_NUM((struct qed_hwfn *)p_hwfn, QED_PF_L2_QUE) + rel_sb_id;
+}
+
+static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_rdma_start_in_params *params)
+{
+ struct qed_rdma_info *p_rdma_info;
+ u32 num_cons, num_tasks;
+ int rc = -ENOMEM;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocating RDMA\n");
+
+ /* Allocate a struct with current pf rdma info */
+ p_rdma_info = kzalloc(sizeof(*p_rdma_info), GFP_KERNEL);
+ if (!p_rdma_info)
+ return rc;
+
+ p_hwfn->p_rdma_info = p_rdma_info;
+ p_rdma_info->proto = PROTOCOLID_ROCE;
+
+ num_cons = qed_cxt_get_proto_cid_count(p_hwfn, p_rdma_info->proto,
+ NULL);
+
+ p_rdma_info->num_qps = num_cons / 2;
+
+ num_tasks = qed_cxt_get_proto_tid_count(p_hwfn, PROTOCOLID_ROCE);
+
+ /* Each MR uses a single task */
+ p_rdma_info->num_mrs = num_tasks;
+
+ /* Queue zone lines are shared between RoCE and L2 in such a way that
+ * they can be used by each without obstructing the other.
+ */
+ p_rdma_info->queue_zone_base = (u16)RESC_START(p_hwfn, QED_L2_QUEUE);
+ p_rdma_info->max_queue_zones = (u16)RESC_NUM(p_hwfn, QED_L2_QUEUE);
+
+ /* Allocate a struct with device params and fill it */
+ p_rdma_info->dev = kzalloc(sizeof(*p_rdma_info->dev), GFP_KERNEL);
+ if (!p_rdma_info->dev)
+ goto free_rdma_info;
+
+ /* Allocate a struct with port params and fill it */
+ p_rdma_info->port = kzalloc(sizeof(*p_rdma_info->port), GFP_KERNEL);
+ if (!p_rdma_info->port)
+ goto free_rdma_dev;
+
+ /* Allocate bit map for pd's */
+ rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->pd_map, RDMA_MAX_PDS,
+ "PD");
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Failed to allocate pd_map, rc = %d\n",
+ rc);
+ goto free_rdma_port;
+ }
+
+ /* Allocate DPI bitmap */
+ rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->dpi_map,
+ p_hwfn->dpi_count, "DPI");
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Failed to allocate DPI bitmap, rc = %d\n", rc);
+ goto free_pd_map;
+ }
+
+ /* Allocate bitmap for cq's. The maximum number of CQs is bounded to
+ * twice the number of QPs.
+ */
+ rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cq_map,
+ p_rdma_info->num_qps * 2, "CQ");
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Failed to allocate cq bitmap, rc = %d\n", rc);
+ goto free_dpi_map;
+ }
+
+ /* Allocate bitmap for toggle bit for cq icids
+ * We toggle the bit every time we create or resize cq for a given icid.
+ * The maximum number of CQs is bounded to twice the number of QPs.
+ */
+ rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->toggle_bits,
+ p_rdma_info->num_qps * 2, "Toggle");
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Failed to allocate toogle bits, rc = %d\n", rc);
+ goto free_cq_map;
+ }
+
+ /* Allocate bitmap for itids */
+ rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->tid_map,
+ p_rdma_info->num_mrs, "MR");
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Failed to allocate itids bitmaps, rc = %d\n", rc);
+ goto free_toggle_map;
+ }
+
+ /* Allocate bitmap for cids used for qps. */
+ rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cid_map, num_cons,
+ "CID");
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Failed to allocate cid bitmap, rc = %d\n", rc);
+ goto free_tid_map;
+ }
+
+ /* Allocate bitmap for cids used for responders/requesters. */
+ rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->real_cid_map, num_cons,
+ "REAL_CID");
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Failed to allocate real cid bitmap, rc = %d\n", rc);
+ goto free_cid_map;
+ }
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocation successful\n");
+ return 0;
+
+free_cid_map:
+ kfree(p_rdma_info->cid_map.bitmap);
+free_tid_map:
+ kfree(p_rdma_info->tid_map.bitmap);
+free_toggle_map:
+ kfree(p_rdma_info->toggle_bits.bitmap);
+free_cq_map:
+ kfree(p_rdma_info->cq_map.bitmap);
+free_dpi_map:
+ kfree(p_rdma_info->dpi_map.bitmap);
+free_pd_map:
+ kfree(p_rdma_info->pd_map.bitmap);
+free_rdma_port:
+ kfree(p_rdma_info->port);
+free_rdma_dev:
+ kfree(p_rdma_info->dev);
+free_rdma_info:
+ kfree(p_rdma_info);
+
+ return rc;
+}
+
+void qed_rdma_bmap_free(struct qed_hwfn *p_hwfn,
+ struct qed_bmap *bmap, bool check)
+{
+ int weight = bitmap_weight(bmap->bitmap, bmap->max_count);
+ int last_line = bmap->max_count / (64 * 8);
+ int last_item = last_line * 8 +
+ DIV_ROUND_UP(bmap->max_count % (64 * 8), 64);
+ u64 *pmap = (u64 *)bmap->bitmap;
+ int line, item, offset;
+ u8 str_last_line[200] = { 0 };
+
+ if (!weight || !check)
+ goto end;
+
+ DP_NOTICE(p_hwfn,
+ "%s bitmap not free - size=%d, weight=%d, 512 bits per line\n",
+ bmap->name, bmap->max_count, weight);
+
+ /* print aligned non-zero lines, if any */
+ for (item = 0, line = 0; line < last_line; line++, item += 8)
+ if (bitmap_weight((unsigned long *)&pmap[item], 64 * 8))
+ DP_NOTICE(p_hwfn,
+ "line 0x%04x: 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n",
+ line,
+ pmap[item],
+ pmap[item + 1],
+ pmap[item + 2],
+ pmap[item + 3],
+ pmap[item + 4],
+ pmap[item + 5],
+ pmap[item + 6], pmap[item + 7]);
+
+ /* print last unaligned non-zero line, if any */
+ if ((bmap->max_count % (64 * 8)) &&
+ (bitmap_weight((unsigned long *)&pmap[item],
+ bmap->max_count - item * 64))) {
+ offset = sprintf(str_last_line, "line 0x%04x: ", line);
+ for (; item < last_item; item++)
+ offset += sprintf(str_last_line + offset,
+ "0x%016llx ", pmap[item]);
+ DP_NOTICE(p_hwfn, "%s\n", str_last_line);
+ }
+
+end:
+ kfree(bmap->bitmap);
+ bmap->bitmap = NULL;
+}
+
+static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn)
+{
+ struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
+
+ qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->cid_map, 1);
+ qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->pd_map, 1);
+ qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, 1);
+ qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->cq_map, 1);
+ qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->toggle_bits, 0);
+ qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->tid_map, 1);
+
+ kfree(p_rdma_info->port);
+ kfree(p_rdma_info->dev);
+
+ kfree(p_rdma_info);
+}
+
+static void qed_rdma_free(struct qed_hwfn *p_hwfn)
+{
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Freeing RDMA\n");
+
+ qed_rdma_resc_free(p_hwfn);
+}
+
+static void qed_rdma_get_guid(struct qed_hwfn *p_hwfn, u8 *guid)
+{
+ guid[0] = p_hwfn->hw_info.hw_mac_addr[0] ^ 2;
+ guid[1] = p_hwfn->hw_info.hw_mac_addr[1];
+ guid[2] = p_hwfn->hw_info.hw_mac_addr[2];
+ guid[3] = 0xff;
+ guid[4] = 0xfe;
+ guid[5] = p_hwfn->hw_info.hw_mac_addr[3];
+ guid[6] = p_hwfn->hw_info.hw_mac_addr[4];
+ guid[7] = p_hwfn->hw_info.hw_mac_addr[5];
+}
+
+static void qed_rdma_init_events(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_start_in_params *params)
+{
+ struct qed_rdma_events *events;
+
+ events = &p_hwfn->p_rdma_info->events;
+
+ events->unaffiliated_event = params->events->unaffiliated_event;
+ events->affiliated_event = params->events->affiliated_event;
+ events->context = params->events->context;
+}
+
+static void qed_rdma_init_devinfo(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_start_in_params *params)
+{
+ struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
+ struct qed_dev *cdev = p_hwfn->cdev;
+ u32 pci_status_control;
+ u32 num_qps;
+
+ /* Vendor specific information */
+ dev->vendor_id = cdev->vendor_id;
+ dev->vendor_part_id = cdev->device_id;
+ dev->hw_ver = 0;
+ dev->fw_ver = (FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) |
+ (FW_REVISION_VERSION << 8) | (FW_ENGINEERING_VERSION);
+
+ qed_rdma_get_guid(p_hwfn, (u8 *)&dev->sys_image_guid);
+ dev->node_guid = dev->sys_image_guid;
+
+ dev->max_sge = min_t(u32, RDMA_MAX_SGE_PER_SQ_WQE,
+ RDMA_MAX_SGE_PER_RQ_WQE);
+
+ if (cdev->rdma_max_sge)
+ dev->max_sge = min_t(u32, cdev->rdma_max_sge, dev->max_sge);
+
+ dev->max_inline = ROCE_REQ_MAX_INLINE_DATA_SIZE;
+
+ dev->max_inline = (cdev->rdma_max_inline) ?
+ min_t(u32, cdev->rdma_max_inline, dev->max_inline) :
+ dev->max_inline;
+
+ dev->max_wqe = QED_RDMA_MAX_WQE;
+ dev->max_cnq = (u8)FEAT_NUM(p_hwfn, QED_RDMA_CNQ);
+
+ /* The number of QPs may be higher than QED_ROCE_MAX_QPS, because
+ * it is up-aligned to 16 and then to ILT page size within qed cxt.
+ * This is OK in terms of ILT but we don't want to configure the FW
+ * above its abilities
+ */
+ num_qps = ROCE_MAX_QPS;
+ num_qps = min_t(u64, num_qps, p_hwfn->p_rdma_info->num_qps);
+ dev->max_qp = num_qps;
+
+ /* CQs uses the same icids that QPs use hence they are limited by the
+ * number of icids. There are two icids per QP.
+ */
+ dev->max_cq = num_qps * 2;
+
+ /* The number of mrs is smaller by 1 since the first is reserved */
+ dev->max_mr = p_hwfn->p_rdma_info->num_mrs - 1;
+ dev->max_mr_size = QED_RDMA_MAX_MR_SIZE;
+
+ /* The maximum CQE capacity per CQ supported.
+ * max number of cqes will be in two layer pbl,
+ * 8 is the pointer size in bytes
+ * 32 is the size of cq element in bytes
+ */
+ if (params->cq_mode == QED_RDMA_CQ_MODE_32_BITS)
+ dev->max_cqe = QED_RDMA_MAX_CQE_32_BIT;
+ else
+ dev->max_cqe = QED_RDMA_MAX_CQE_16_BIT;
+
+ dev->max_mw = 0;
+ dev->max_fmr = QED_RDMA_MAX_FMR;
+ dev->max_mr_mw_fmr_pbl = (PAGE_SIZE / 8) * (PAGE_SIZE / 8);
+ dev->max_mr_mw_fmr_size = dev->max_mr_mw_fmr_pbl * PAGE_SIZE;
+ dev->max_pkey = QED_RDMA_MAX_P_KEY;
+
+ dev->max_qp_resp_rd_atomic_resc = RDMA_RING_PAGE_SIZE /
+ (RDMA_RESP_RD_ATOMIC_ELM_SIZE * 2);
+ dev->max_qp_req_rd_atomic_resc = RDMA_RING_PAGE_SIZE /
+ RDMA_REQ_RD_ATOMIC_ELM_SIZE;
+ dev->max_dev_resp_rd_atomic_resc = dev->max_qp_resp_rd_atomic_resc *
+ p_hwfn->p_rdma_info->num_qps;
+ dev->page_size_caps = QED_RDMA_PAGE_SIZE_CAPS;
+ dev->dev_ack_delay = QED_RDMA_ACK_DELAY;
+ dev->max_pd = RDMA_MAX_PDS;
+ dev->max_ah = p_hwfn->p_rdma_info->num_qps;
+ dev->max_stats_queues = (u8)RESC_NUM(p_hwfn, QED_RDMA_STATS_QUEUE);
+
+ /* Set capablities */
+ dev->dev_caps = 0;
+ SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_RNR_NAK, 1);
+ SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_PORT_ACTIVE_EVENT, 1);
+ SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_PORT_CHANGE_EVENT, 1);
+ SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_RESIZE_CQ, 1);
+ SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_BASE_MEMORY_EXT, 1);
+ SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_BASE_QUEUE_EXT, 1);
+ SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ZBVA, 1);
+ SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_LOCAL_INV_FENCE, 1);
+
+ /* Check atomic operations support in PCI configuration space. */
+ pci_read_config_dword(cdev->pdev,
+ cdev->pdev->pcie_cap + PCI_EXP_DEVCTL2,
+ &pci_status_control);
+
+ if (pci_status_control & PCI_EXP_DEVCTL2_LTR_EN)
+ SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ATOMIC_OP, 1);
+}
+
+static void qed_rdma_init_port(struct qed_hwfn *p_hwfn)
+{
+ struct qed_rdma_port *port = p_hwfn->p_rdma_info->port;
+ struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
+
+ port->port_state = p_hwfn->mcp_info->link_output.link_up ?
+ QED_RDMA_PORT_UP : QED_RDMA_PORT_DOWN;
+
+ port->max_msg_size = min_t(u64,
+ (dev->max_mr_mw_fmr_size *
+ p_hwfn->cdev->rdma_max_sge),
+ BIT(31));
+
+ port->pkey_bad_counter = 0;
+}
+
+static int qed_rdma_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ u32 ll2_ethertype_en;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Initializing HW\n");
+ p_hwfn->b_rdma_enabled_in_prs = false;
+
+ qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0);
+
+ p_hwfn->rdma_prs_search_reg = PRS_REG_SEARCH_ROCE;
+
+ /* We delay writing to this reg until first cid is allocated. See
+ * qed_cxt_dynamic_ilt_alloc function for more details
+ */
+ ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN);
+ qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN,
+ (ll2_ethertype_en | 0x01));
+
+ if (qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_ROCE) % 2) {
+ DP_NOTICE(p_hwfn, "The first RoCE's cid should be even\n");
+ return -EINVAL;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Initializing HW - Done\n");
+ return 0;
+}
+
+static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_start_in_params *params,
+ struct qed_ptt *p_ptt)
+{
+ struct rdma_init_func_ramrod_data *p_ramrod;
+ struct qed_rdma_cnq_params *p_cnq_pbl_list;
+ struct rdma_init_func_hdr *p_params_header;
+ struct rdma_cnq_params *p_cnq_params;
+ struct qed_sp_init_data init_data;
+ struct qed_spq_entry *p_ent;
+ u32 cnq_id, sb_id;
+ u16 igu_sb_id;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Starting FW\n");
+
+ /* Save the number of cnqs for the function close ramrod */
+ p_hwfn->p_rdma_info->num_cnqs = params->desired_cnq;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_FUNC_INIT,
+ p_hwfn->p_rdma_info->proto, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.roce_init_func.rdma;
+
+ p_params_header = &p_ramrod->params_header;
+ p_params_header->cnq_start_offset = (u8)RESC_START(p_hwfn,
+ QED_RDMA_CNQ_RAM);
+ p_params_header->num_cnqs = params->desired_cnq;
+
+ if (params->cq_mode == QED_RDMA_CQ_MODE_16_BITS)
+ p_params_header->cq_ring_mode = 1;
+ else
+ p_params_header->cq_ring_mode = 0;
+
+ for (cnq_id = 0; cnq_id < params->desired_cnq; cnq_id++) {
+ sb_id = qed_rdma_get_sb_id(p_hwfn, cnq_id);
+ igu_sb_id = qed_get_igu_sb_id(p_hwfn, sb_id);
+ p_ramrod->cnq_params[cnq_id].sb_num = cpu_to_le16(igu_sb_id);
+ p_cnq_params = &p_ramrod->cnq_params[cnq_id];
+ p_cnq_pbl_list = &params->cnq_pbl_list[cnq_id];
+
+ p_cnq_params->sb_index = p_hwfn->pf_params.rdma_pf_params.gl_pi;
+ p_cnq_params->num_pbl_pages = p_cnq_pbl_list->num_pbl_pages;
+
+ DMA_REGPAIR_LE(p_cnq_params->pbl_base_addr,
+ p_cnq_pbl_list->pbl_ptr);
+
+ /* we assume here that cnq_id and qz_offset are the same */
+ p_cnq_params->queue_zone_num =
+ cpu_to_le16(p_hwfn->p_rdma_info->queue_zone_base +
+ cnq_id);
+ }
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int qed_rdma_alloc_tid(void *rdma_cxt, u32 *itid)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID\n");
+
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ rc = qed_rdma_bmap_alloc_id(p_hwfn,
+ &p_hwfn->p_rdma_info->tid_map, itid);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+ if (rc)
+ goto out;
+
+ rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_TASK, *itid);
+out:
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID - done, rc = %d\n", rc);
+ return rc;
+}
+
+static int qed_rdma_reserve_lkey(struct qed_hwfn *p_hwfn)
+{
+ struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
+
+ /* The first DPI is reserved for the Kernel */
+ __set_bit(0, p_hwfn->p_rdma_info->dpi_map.bitmap);
+
+ /* Tid 0 will be used as the key for "reserved MR".
+ * The driver should allocate memory for it so it can be loaded but no
+ * ramrod should be passed on it.
+ */
+ qed_rdma_alloc_tid(p_hwfn, &dev->reserved_lkey);
+ if (dev->reserved_lkey != RDMA_RESERVED_LKEY) {
+ DP_NOTICE(p_hwfn,
+ "Reserved lkey should be equal to RDMA_RESERVED_LKEY\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int qed_rdma_setup(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_rdma_start_in_params *params)
+{
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA setup\n");
+
+ spin_lock_init(&p_hwfn->p_rdma_info->lock);
+
+ qed_rdma_init_devinfo(p_hwfn, params);
+ qed_rdma_init_port(p_hwfn);
+ qed_rdma_init_events(p_hwfn, params);
+
+ rc = qed_rdma_reserve_lkey(p_hwfn);
+ if (rc)
+ return rc;
+
+ rc = qed_rdma_init_hw(p_hwfn, p_ptt);
+ if (rc)
+ return rc;
+
+ qed_roce_setup(p_hwfn);
+
+ return qed_rdma_start_fw(p_hwfn, params, p_ptt);
+}
+
+int qed_rdma_stop(void *rdma_cxt)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ struct rdma_close_func_ramrod_data *p_ramrod;
+ struct qed_sp_init_data init_data;
+ struct qed_spq_entry *p_ent;
+ struct qed_ptt *p_ptt;
+ u32 ll2_ethertype_en;
+ int rc = -EBUSY;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA stop\n");
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Failed to acquire PTT\n");
+ return rc;
+ }
+
+ /* Disable RoCE search */
+ qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 0);
+ p_hwfn->b_rdma_enabled_in_prs = false;
+
+ qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0);
+
+ ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN);
+
+ qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN,
+ (ll2_ethertype_en & 0xFFFE));
+
+ qed_roce_stop(p_hwfn);
+ qed_ptt_release(p_hwfn, p_ptt);
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ /* Stop RoCE */
+ rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_FUNC_CLOSE,
+ p_hwfn->p_rdma_info->proto, &init_data);
+ if (rc)
+ goto out;
+
+ p_ramrod = &p_ent->ramrod.rdma_close_func;
+
+ p_ramrod->num_cnqs = p_hwfn->p_rdma_info->num_cnqs;
+ p_ramrod->cnq_start_offset = (u8)RESC_START(p_hwfn, QED_RDMA_CNQ_RAM);
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+
+out:
+ qed_rdma_free(p_hwfn);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA stop done, rc = %d\n", rc);
+ return rc;
+}
+
+static int qed_rdma_add_user(void *rdma_cxt,
+ struct qed_rdma_add_user_out_params *out_params)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ u32 dpi_start_offset;
+ u32 returned_id = 0;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Adding User\n");
+
+ /* Allocate DPI */
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_hwfn->p_rdma_info->dpi_map,
+ &returned_id);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+
+ out_params->dpi = (u16)returned_id;
+
+ /* Calculate the corresponding DPI address */
+ dpi_start_offset = p_hwfn->dpi_start_offset;
+
+ out_params->dpi_addr = (u64)((u8 __iomem *)p_hwfn->doorbells +
+ dpi_start_offset +
+ ((out_params->dpi) * p_hwfn->dpi_size));
+
+ out_params->dpi_phys_addr = p_hwfn->cdev->db_phys_addr +
+ dpi_start_offset +
+ ((out_params->dpi) * p_hwfn->dpi_size);
+
+ out_params->dpi_size = p_hwfn->dpi_size;
+ out_params->wid_count = p_hwfn->wid_count;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Adding user - done, rc = %d\n", rc);
+ return rc;
+}
+
+static struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ struct qed_rdma_port *p_port = p_hwfn->p_rdma_info->port;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA Query port\n");
+
+ /* Link may have changed */
+ p_port->port_state = p_hwfn->mcp_info->link_output.link_up ?
+ QED_RDMA_PORT_UP : QED_RDMA_PORT_DOWN;
+
+ p_port->link_speed = p_hwfn->mcp_info->link_output.speed;
+
+ p_port->max_msg_size = RDMA_MAX_DATA_SIZE_IN_WQE;
+
+ return p_port;
+}
+
+static struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Query device\n");
+
+ /* Return struct with device parameters */
+ return p_hwfn->p_rdma_info->dev;
+}
+
+static void qed_rdma_free_tid(void *rdma_cxt, u32 itid)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid);
+
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->tid_map, itid);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+}
+
+static void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod)
+{
+ struct qed_hwfn *p_hwfn;
+ u16 qz_num;
+ u32 addr;
+
+ p_hwfn = (struct qed_hwfn *)rdma_cxt;
+
+ if (qz_offset > p_hwfn->p_rdma_info->max_queue_zones) {
+ DP_NOTICE(p_hwfn,
+ "queue zone offset %d is too large (max is %d)\n",
+ qz_offset, p_hwfn->p_rdma_info->max_queue_zones);
+ return;
+ }
+
+ qz_num = p_hwfn->p_rdma_info->queue_zone_base + qz_offset;
+ addr = GTT_BAR0_MAP_REG_USDM_RAM +
+ USTORM_COMMON_QUEUE_CONS_OFFSET(qz_num);
+
+ REG_WR16(p_hwfn, addr, prod);
+
+ /* keep prod updates ordered */
+ wmb();
+}
+
+static int qed_fill_rdma_dev_info(struct qed_dev *cdev,
+ struct qed_dev_rdma_info *info)
+{
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+
+ memset(info, 0, sizeof(*info));
+
+ info->rdma_type = QED_RDMA_TYPE_ROCE;
+ info->user_dpm_enabled = (p_hwfn->db_bar_no_edpm == 0);
+
+ qed_fill_dev_info(cdev, &info->common);
+
+ return 0;
+}
+
+static int qed_rdma_get_sb_start(struct qed_dev *cdev)
+{
+ int feat_num;
+
+ if (cdev->num_hwfns > 1)
+ feat_num = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_PF_L2_QUE);
+ else
+ feat_num = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_PF_L2_QUE) *
+ cdev->num_hwfns;
+
+ return feat_num;
+}
+
+static int qed_rdma_get_min_cnq_msix(struct qed_dev *cdev)
+{
+ int n_cnq = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_RDMA_CNQ);
+ int n_msix = cdev->int_params.rdma_msix_cnt;
+
+ return min_t(int, n_cnq, n_msix);
+}
+
+static int qed_rdma_set_int(struct qed_dev *cdev, u16 cnt)
+{
+ int limit = 0;
+
+ /* Mark the fastpath as free/used */
+ cdev->int_params.fp_initialized = cnt ? true : false;
+
+ if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX) {
+ DP_ERR(cdev,
+ "qed roce supports only MSI-X interrupts (detected %d).\n",
+ cdev->int_params.out.int_mode);
+ return -EINVAL;
+ } else if (cdev->int_params.fp_msix_cnt) {
+ limit = cdev->int_params.rdma_msix_cnt;
+ }
+
+ if (!limit)
+ return -ENOMEM;
+
+ return min_t(int, cnt, limit);
+}
+
+static int qed_rdma_get_int(struct qed_dev *cdev, struct qed_int_info *info)
+{
+ memset(info, 0, sizeof(*info));
+
+ if (!cdev->int_params.fp_initialized) {
+ DP_INFO(cdev,
+ "Protocol driver requested interrupt information, but its support is not yet configured\n");
+ return -EINVAL;
+ }
+
+ if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
+ int msix_base = cdev->int_params.rdma_msix_base;
+
+ info->msix_cnt = cdev->int_params.rdma_msix_cnt;
+ info->msix = &cdev->int_params.msix_table[msix_base];
+
+ DP_VERBOSE(cdev, QED_MSG_RDMA, "msix_cnt = %d msix_base=%d\n",
+ info->msix_cnt, msix_base);
+ }
+
+ return 0;
+}
+
+static int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ u32 returned_id;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc PD\n");
+
+ /* Allocates an unused protection domain */
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ rc = qed_rdma_bmap_alloc_id(p_hwfn,
+ &p_hwfn->p_rdma_info->pd_map, &returned_id);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+
+ *pd = (u16)returned_id;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc PD - done, rc = %d\n", rc);
+ return rc;
+}
+
+static void qed_rdma_free_pd(void *rdma_cxt, u16 pd)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "pd = %08x\n", pd);
+
+ /* Returns a previously allocated protection domain for reuse */
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->pd_map, pd);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+}
+
+static enum qed_rdma_toggle_bit
+qed_rdma_toggle_bit_create_resize_cq(struct qed_hwfn *p_hwfn, u16 icid)
+{
+ struct qed_rdma_info *p_info = p_hwfn->p_rdma_info;
+ enum qed_rdma_toggle_bit toggle_bit;
+ u32 bmap_id;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", icid);
+
+ /* the function toggle the bit that is related to a given icid
+ * and returns the new toggle bit's value
+ */
+ bmap_id = icid - qed_cxt_get_proto_cid_start(p_hwfn, p_info->proto);
+
+ spin_lock_bh(&p_info->lock);
+ toggle_bit = !test_and_change_bit(bmap_id,
+ p_info->toggle_bits.bitmap);
+ spin_unlock_bh(&p_info->lock);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QED_RDMA_TOGGLE_BIT_= %d\n",
+ toggle_bit);
+
+ return toggle_bit;
+}
+
+static int qed_rdma_create_cq(void *rdma_cxt,
+ struct qed_rdma_create_cq_in_params *params,
+ u16 *icid)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ struct qed_rdma_info *p_info = p_hwfn->p_rdma_info;
+ struct rdma_create_cq_ramrod_data *p_ramrod;
+ enum qed_rdma_toggle_bit toggle_bit;
+ struct qed_sp_init_data init_data;
+ struct qed_spq_entry *p_ent;
+ u32 returned_id, start_cid;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "cq_handle = %08x%08x\n",
+ params->cq_handle_hi, params->cq_handle_lo);
+
+ /* Allocate icid */
+ spin_lock_bh(&p_info->lock);
+ rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_info->cq_map, &returned_id);
+ spin_unlock_bh(&p_info->lock);
+
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Can't create CQ, rc = %d\n", rc);
+ return rc;
+ }
+
+ start_cid = qed_cxt_get_proto_cid_start(p_hwfn,
+ p_info->proto);
+ *icid = returned_id + start_cid;
+
+ /* Check if icid requires a page allocation */
+ rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, *icid);
+ if (rc)
+ goto err;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = *icid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ /* Send create CQ ramrod */
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ RDMA_RAMROD_CREATE_CQ,
+ p_info->proto, &init_data);
+ if (rc)
+ goto err;
+
+ p_ramrod = &p_ent->ramrod.rdma_create_cq;
+
+ p_ramrod->cq_handle.hi = cpu_to_le32(params->cq_handle_hi);
+ p_ramrod->cq_handle.lo = cpu_to_le32(params->cq_handle_lo);
+ p_ramrod->dpi = cpu_to_le16(params->dpi);
+ p_ramrod->is_two_level_pbl = params->pbl_two_level;
+ p_ramrod->max_cqes = cpu_to_le32(params->cq_size);
+ DMA_REGPAIR_LE(p_ramrod->pbl_addr, params->pbl_ptr);
+ p_ramrod->pbl_num_pages = cpu_to_le16(params->pbl_num_pages);
+ p_ramrod->cnq_id = (u8)RESC_START(p_hwfn, QED_RDMA_CNQ_RAM) +
+ params->cnq_id;
+ p_ramrod->int_timeout = params->int_timeout;
+
+ /* toggle the bit for every resize or create cq for a given icid */
+ toggle_bit = qed_rdma_toggle_bit_create_resize_cq(p_hwfn, *icid);
+
+ p_ramrod->toggle_bit = toggle_bit;
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+ if (rc) {
+ /* restore toggle bit */
+ qed_rdma_toggle_bit_create_resize_cq(p_hwfn, *icid);
+ goto err;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Created CQ, rc = %d\n", rc);
+ return rc;
+
+err:
+ /* release allocated icid */
+ spin_lock_bh(&p_info->lock);
+ qed_bmap_release_id(p_hwfn, &p_info->cq_map, returned_id);
+ spin_unlock_bh(&p_info->lock);
+ DP_NOTICE(p_hwfn, "Create CQ failed, rc = %d\n", rc);
+
+ return rc;
+}
+
+static int
+qed_rdma_destroy_cq(void *rdma_cxt,
+ struct qed_rdma_destroy_cq_in_params *in_params,
+ struct qed_rdma_destroy_cq_out_params *out_params)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ struct rdma_destroy_cq_output_params *p_ramrod_res;
+ struct rdma_destroy_cq_ramrod_data *p_ramrod;
+ struct qed_sp_init_data init_data;
+ struct qed_spq_entry *p_ent;
+ dma_addr_t ramrod_res_phys;
+ enum protocol_type proto;
+ int rc = -ENOMEM;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", in_params->icid);
+
+ p_ramrod_res =
+ (struct rdma_destroy_cq_output_params *)
+ dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(struct rdma_destroy_cq_output_params),
+ &ramrod_res_phys, GFP_KERNEL);
+ if (!p_ramrod_res) {
+ DP_NOTICE(p_hwfn,
+ "qed destroy cq failed: cannot allocate memory (ramrod)\n");
+ return rc;
+ }
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = in_params->icid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+ proto = p_hwfn->p_rdma_info->proto;
+ /* Send destroy CQ ramrod */
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ RDMA_RAMROD_DESTROY_CQ,
+ proto, &init_data);
+ if (rc)
+ goto err;
+
+ p_ramrod = &p_ent->ramrod.rdma_destroy_cq;
+ DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+ if (rc)
+ goto err;
+
+ out_params->num_cq_notif = le16_to_cpu(p_ramrod_res->cnq_num);
+
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(struct rdma_destroy_cq_output_params),
+ p_ramrod_res, ramrod_res_phys);
+
+ /* Free icid */
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+
+ qed_bmap_release_id(p_hwfn,
+ &p_hwfn->p_rdma_info->cq_map,
+ (in_params->icid -
+ qed_cxt_get_proto_cid_start(p_hwfn, proto)));
+
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroyed CQ, rc = %d\n", rc);
+ return rc;
+
+err: dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(struct rdma_destroy_cq_output_params),
+ p_ramrod_res, ramrod_res_phys);
+
+ return rc;
+}
+
+void qed_rdma_set_fw_mac(u16 *p_fw_mac, u8 *p_qed_mac)
+{
+ p_fw_mac[0] = cpu_to_le16((p_qed_mac[0] << 8) + p_qed_mac[1]);
+ p_fw_mac[1] = cpu_to_le16((p_qed_mac[2] << 8) + p_qed_mac[3]);
+ p_fw_mac[2] = cpu_to_le16((p_qed_mac[4] << 8) + p_qed_mac[5]);
+}
+
+static int qed_rdma_query_qp(void *rdma_cxt,
+ struct qed_rdma_qp *qp,
+ struct qed_rdma_query_qp_out_params *out_params)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
+
+ /* The following fields are filled in from qp and not FW as they can't
+ * be modified by FW
+ */
+ out_params->mtu = qp->mtu;
+ out_params->dest_qp = qp->dest_qp;
+ out_params->incoming_atomic_en = qp->incoming_atomic_en;
+ out_params->e2e_flow_control_en = qp->e2e_flow_control_en;
+ out_params->incoming_rdma_read_en = qp->incoming_rdma_read_en;
+ out_params->incoming_rdma_write_en = qp->incoming_rdma_write_en;
+ out_params->dgid = qp->dgid;
+ out_params->flow_label = qp->flow_label;
+ out_params->hop_limit_ttl = qp->hop_limit_ttl;
+ out_params->traffic_class_tos = qp->traffic_class_tos;
+ out_params->timeout = qp->ack_timeout;
+ out_params->rnr_retry = qp->rnr_retry_cnt;
+ out_params->retry_cnt = qp->retry_cnt;
+ out_params->min_rnr_nak_timer = qp->min_rnr_nak_timer;
+ out_params->pkey_index = 0;
+ out_params->max_rd_atomic = qp->max_rd_atomic_req;
+ out_params->max_dest_rd_atomic = qp->max_rd_atomic_resp;
+ out_params->sqd_async = qp->sqd_async;
+
+ rc = qed_roce_query_qp(p_hwfn, qp, out_params);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Query QP, rc = %d\n", rc);
+ return rc;
+}
+
+static int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ int rc = 0;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
+
+ rc = qed_roce_destroy_qp(p_hwfn, qp);
+
+ /* free qp params struct */
+ kfree(qp);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP destroyed\n");
+ return rc;
+}
+
+static struct qed_rdma_qp *
+qed_rdma_create_qp(void *rdma_cxt,
+ struct qed_rdma_create_qp_in_params *in_params,
+ struct qed_rdma_create_qp_out_params *out_params)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ struct qed_rdma_qp *qp;
+ u8 max_stats_queues;
+ int rc;
+
+ if (!rdma_cxt || !in_params || !out_params || !p_hwfn->p_rdma_info) {
+ DP_ERR(p_hwfn->cdev,
+ "qed roce create qp failed due to NULL entry (rdma_cxt=%p, in=%p, out=%p, roce_info=?\n",
+ rdma_cxt, in_params, out_params);
+ return NULL;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "qed rdma create qp called with qp_handle = %08x%08x\n",
+ in_params->qp_handle_hi, in_params->qp_handle_lo);
+
+ /* Some sanity checks... */
+ max_stats_queues = p_hwfn->p_rdma_info->dev->max_stats_queues;
+ if (in_params->stats_queue >= max_stats_queues) {
+ DP_ERR(p_hwfn->cdev,
+ "qed rdma create qp failed due to invalid statistics queue %d. maximum is %d\n",
+ in_params->stats_queue, max_stats_queues);
+ return NULL;
+ }
+
+ qp = kzalloc(sizeof(*qp), GFP_KERNEL);
+ if (!qp)
+ return NULL;
+
+ rc = qed_roce_alloc_cid(p_hwfn, &qp->icid);
+ qp->qpid = ((0xFF << 16) | qp->icid);
+
+ DP_INFO(p_hwfn, "ROCE qpid=%x\n", qp->qpid);
+
+ if (rc) {
+ kfree(qp);
+ return NULL;
+ }
+
+ qp->cur_state = QED_ROCE_QP_STATE_RESET;
+ qp->qp_handle.hi = cpu_to_le32(in_params->qp_handle_hi);
+ qp->qp_handle.lo = cpu_to_le32(in_params->qp_handle_lo);
+ qp->qp_handle_async.hi = cpu_to_le32(in_params->qp_handle_async_hi);
+ qp->qp_handle_async.lo = cpu_to_le32(in_params->qp_handle_async_lo);
+ qp->use_srq = in_params->use_srq;
+ qp->signal_all = in_params->signal_all;
+ qp->fmr_and_reserved_lkey = in_params->fmr_and_reserved_lkey;
+ qp->pd = in_params->pd;
+ qp->dpi = in_params->dpi;
+ qp->sq_cq_id = in_params->sq_cq_id;
+ qp->sq_num_pages = in_params->sq_num_pages;
+ qp->sq_pbl_ptr = in_params->sq_pbl_ptr;
+ qp->rq_cq_id = in_params->rq_cq_id;
+ qp->rq_num_pages = in_params->rq_num_pages;
+ qp->rq_pbl_ptr = in_params->rq_pbl_ptr;
+ qp->srq_id = in_params->srq_id;
+ qp->req_offloaded = false;
+ qp->resp_offloaded = false;
+ qp->e2e_flow_control_en = qp->use_srq ? false : true;
+ qp->stats_queue = in_params->stats_queue;
+
+ out_params->icid = qp->icid;
+ out_params->qp_id = qp->qpid;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Create QP, rc = %d\n", rc);
+ return qp;
+}
+
+static int qed_rdma_modify_qp(void *rdma_cxt,
+ struct qed_rdma_qp *qp,
+ struct qed_rdma_modify_qp_in_params *params)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ enum qed_roce_qp_state prev_state;
+ int rc = 0;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x params->new_state=%d\n",
+ qp->icid, params->new_state);
+
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
+ return rc;
+ }
+
+ if (GET_FIELD(params->modify_flags,
+ QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN)) {
+ qp->incoming_rdma_read_en = params->incoming_rdma_read_en;
+ qp->incoming_rdma_write_en = params->incoming_rdma_write_en;
+ qp->incoming_atomic_en = params->incoming_atomic_en;
+ }
+
+ /* Update QP structure with the updated values */
+ if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_ROCE_MODE))
+ qp->roce_mode = params->roce_mode;
+ if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY))
+ qp->pkey = params->pkey;
+ if (GET_FIELD(params->modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN))
+ qp->e2e_flow_control_en = params->e2e_flow_control_en;
+ if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_DEST_QP))
+ qp->dest_qp = params->dest_qp;
+ if (GET_FIELD(params->modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR)) {
+ /* Indicates that the following parameters have changed:
+ * Traffic class, flow label, hop limit, source GID,
+ * destination GID, loopback indicator
+ */
+ qp->traffic_class_tos = params->traffic_class_tos;
+ qp->flow_label = params->flow_label;
+ qp->hop_limit_ttl = params->hop_limit_ttl;
+
+ qp->sgid = params->sgid;
+ qp->dgid = params->dgid;
+ qp->udp_src_port = 0;
+ qp->vlan_id = params->vlan_id;
+ qp->mtu = params->mtu;
+ qp->lb_indication = params->lb_indication;
+ memcpy((u8 *)&qp->remote_mac_addr[0],
+ (u8 *)&params->remote_mac_addr[0], ETH_ALEN);
+ if (params->use_local_mac) {
+ memcpy((u8 *)&qp->local_mac_addr[0],
+ (u8 *)&params->local_mac_addr[0], ETH_ALEN);
+ } else {
+ memcpy((u8 *)&qp->local_mac_addr[0],
+ (u8 *)&p_hwfn->hw_info.hw_mac_addr, ETH_ALEN);
+ }
+ }
+ if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_RQ_PSN))
+ qp->rq_psn = params->rq_psn;
+ if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_SQ_PSN))
+ qp->sq_psn = params->sq_psn;
+ if (GET_FIELD(params->modify_flags,
+ QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ))
+ qp->max_rd_atomic_req = params->max_rd_atomic_req;
+ if (GET_FIELD(params->modify_flags,
+ QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP))
+ qp->max_rd_atomic_resp = params->max_rd_atomic_resp;
+ if (GET_FIELD(params->modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT))
+ qp->ack_timeout = params->ack_timeout;
+ if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_RETRY_CNT))
+ qp->retry_cnt = params->retry_cnt;
+ if (GET_FIELD(params->modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT))
+ qp->rnr_retry_cnt = params->rnr_retry_cnt;
+ if (GET_FIELD(params->modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER))
+ qp->min_rnr_nak_timer = params->min_rnr_nak_timer;
+
+ qp->sqd_async = params->sqd_async;
+
+ prev_state = qp->cur_state;
+ if (GET_FIELD(params->modify_flags,
+ QED_RDMA_MODIFY_QP_VALID_NEW_STATE)) {
+ qp->cur_state = params->new_state;
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "qp->cur_state=%d\n",
+ qp->cur_state);
+ }
+
+ rc = qed_roce_modify_qp(p_hwfn, qp, prev_state, params);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify QP, rc = %d\n", rc);
+ return rc;
+}
+
+static int
+qed_rdma_register_tid(void *rdma_cxt,
+ struct qed_rdma_register_tid_in_params *params)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ struct rdma_register_tid_ramrod_data *p_ramrod;
+ struct qed_sp_init_data init_data;
+ struct qed_spq_entry *p_ent;
+ enum rdma_tid_type tid_type;
+ u8 fw_return_code;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", params->itid);
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_REGISTER_MR,
+ p_hwfn->p_rdma_info->proto, &init_data);
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
+ return rc;
+ }
+
+ if (p_hwfn->p_rdma_info->last_tid < params->itid)
+ p_hwfn->p_rdma_info->last_tid = params->itid;
+
+ p_ramrod = &p_ent->ramrod.rdma_register_tid;
+
+ p_ramrod->flags = 0;
+ SET_FIELD(p_ramrod->flags,
+ RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL,
+ params->pbl_two_level);
+
+ SET_FIELD(p_ramrod->flags,
+ RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED, params->zbva);
+
+ SET_FIELD(p_ramrod->flags,
+ RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR, params->phy_mr);
+
+ /* Don't initialize D/C field, as it may override other bits. */
+ if (!(params->tid_type == QED_RDMA_TID_FMR) && !(params->dma_mr))
+ SET_FIELD(p_ramrod->flags,
+ RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG,
+ params->page_size_log - 12);
+
+ SET_FIELD(p_ramrod->flags,
+ RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ,
+ params->remote_read);
+
+ SET_FIELD(p_ramrod->flags,
+ RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE,
+ params->remote_write);
+
+ SET_FIELD(p_ramrod->flags,
+ RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC,
+ params->remote_atomic);
+
+ SET_FIELD(p_ramrod->flags,
+ RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE,
+ params->local_write);
+
+ SET_FIELD(p_ramrod->flags,
+ RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ, params->local_read);
+
+ SET_FIELD(p_ramrod->flags,
+ RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND,
+ params->mw_bind);
+
+ SET_FIELD(p_ramrod->flags1,
+ RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG,
+ params->pbl_page_size_log - 12);
+
+ SET_FIELD(p_ramrod->flags2,
+ RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR, params->dma_mr);
+
+ switch (params->tid_type) {
+ case QED_RDMA_TID_REGISTERED_MR:
+ tid_type = RDMA_TID_REGISTERED_MR;
+ break;
+ case QED_RDMA_TID_FMR:
+ tid_type = RDMA_TID_FMR;
+ break;
+ case QED_RDMA_TID_MW_TYPE1:
+ tid_type = RDMA_TID_MW_TYPE1;
+ break;
+ case QED_RDMA_TID_MW_TYPE2A:
+ tid_type = RDMA_TID_MW_TYPE2A;
+ break;
+ default:
+ rc = -EINVAL;
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
+ return rc;
+ }
+ SET_FIELD(p_ramrod->flags1,
+ RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE, tid_type);
+
+ p_ramrod->itid = cpu_to_le32(params->itid);
+ p_ramrod->key = params->key;
+ p_ramrod->pd = cpu_to_le16(params->pd);
+ p_ramrod->length_hi = (u8)(params->length >> 32);
+ p_ramrod->length_lo = DMA_LO_LE(params->length);
+ if (params->zbva) {
+ /* Lower 32 bits of the registered MR address.
+ * In case of zero based MR, will hold FBO
+ */
+ p_ramrod->va.hi = 0;
+ p_ramrod->va.lo = cpu_to_le32(params->fbo);
+ } else {
+ DMA_REGPAIR_LE(p_ramrod->va, params->vaddr);
+ }
+ DMA_REGPAIR_LE(p_ramrod->pbl_base, params->pbl_ptr);
+
+ /* DIF */
+ if (params->dif_enabled) {
+ SET_FIELD(p_ramrod->flags2,
+ RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG, 1);
+ DMA_REGPAIR_LE(p_ramrod->dif_error_addr,
+ params->dif_error_addr);
+ DMA_REGPAIR_LE(p_ramrod->dif_runt_addr, params->dif_runt_addr);
+ }
+
+ rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
+ if (rc)
+ return rc;
+
+ if (fw_return_code != RDMA_RETURN_OK) {
+ DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code);
+ return -EINVAL;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Register TID, rc = %d\n", rc);
+ return rc;
+}
+
+static int qed_rdma_deregister_tid(void *rdma_cxt, u32 itid)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ struct rdma_deregister_tid_ramrod_data *p_ramrod;
+ struct qed_sp_init_data init_data;
+ struct qed_spq_entry *p_ent;
+ struct qed_ptt *p_ptt;
+ u8 fw_return_code;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid);
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_DEREGISTER_MR,
+ p_hwfn->p_rdma_info->proto, &init_data);
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
+ return rc;
+ }
+
+ p_ramrod = &p_ent->ramrod.rdma_deregister_tid;
+ p_ramrod->itid = cpu_to_le32(itid);
+
+ rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
+ return rc;
+ }
+
+ if (fw_return_code == RDMA_RETURN_DEREGISTER_MR_BAD_STATE_ERR) {
+ DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code);
+ return -EINVAL;
+ } else if (fw_return_code == RDMA_RETURN_NIG_DRAIN_REQ) {
+ /* Bit indicating that the TID is in use and a nig drain is
+ * required before sending the ramrod again
+ */
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt) {
+ rc = -EBUSY;
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Failed to acquire PTT\n");
+ return rc;
+ }
+
+ rc = qed_mcp_drain(p_hwfn, p_ptt);
+ if (rc) {
+ qed_ptt_release(p_hwfn, p_ptt);
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Drain failed\n");
+ return rc;
+ }
+
+ qed_ptt_release(p_hwfn, p_ptt);
+
+ /* Resend the ramrod */
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ RDMA_RAMROD_DEREGISTER_MR,
+ p_hwfn->p_rdma_info->proto,
+ &init_data);
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Failed to init sp-element\n");
+ return rc;
+ }
+
+ rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Ramrod failed\n");
+ return rc;
+ }
+
+ if (fw_return_code != RDMA_RETURN_OK) {
+ DP_NOTICE(p_hwfn, "fw_return_code = %d\n",
+ fw_return_code);
+ return rc;
+ }
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "De-registered TID, rc = %d\n", rc);
+ return rc;
+}
+
+static void *qed_rdma_get_rdma_ctx(struct qed_dev *cdev)
+{
+ return QED_LEADING_HWFN(cdev);
+}
+
+bool qed_rdma_allocated_qps(struct qed_hwfn *p_hwfn)
+{
+ bool result;
+
+ /* if rdma info has not been allocated, naturally there are no qps */
+ if (!p_hwfn->p_rdma_info)
+ return false;
+
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ if (!p_hwfn->p_rdma_info->cid_map.bitmap)
+ result = false;
+ else
+ result = !qed_bmap_is_empty(&p_hwfn->p_rdma_info->cid_map);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+ return result;
+}
+
+void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ u32 val;
+
+ val = (p_hwfn->dcbx_no_edpm || p_hwfn->db_bar_no_edpm) ? 0 : 1;
+
+ qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_DPM_ENABLE, val);
+ DP_VERBOSE(p_hwfn, (QED_MSG_DCB | QED_MSG_RDMA),
+ "Changing DPM_EN state to %d (DCBX=%d, DB_BAR=%d)\n",
+ val, p_hwfn->dcbx_no_edpm, p_hwfn->db_bar_no_edpm);
+}
+
+
+void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ p_hwfn->db_bar_no_edpm = true;
+
+ qed_rdma_dpm_conf(p_hwfn, p_ptt);
+}
+
+static int qed_rdma_start(void *rdma_cxt,
+ struct qed_rdma_start_in_params *params)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ struct qed_ptt *p_ptt;
+ int rc = -EBUSY;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "desired_cnq = %08x\n", params->desired_cnq);
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ goto err;
+
+ rc = qed_rdma_alloc(p_hwfn, p_ptt, params);
+ if (rc)
+ goto err1;
+
+ rc = qed_rdma_setup(p_hwfn, p_ptt, params);
+ if (rc)
+ goto err2;
+
+ qed_ptt_release(p_hwfn, p_ptt);
+
+ return rc;
+
+err2:
+ qed_rdma_free(p_hwfn);
+err1:
+ qed_ptt_release(p_hwfn, p_ptt);
+err:
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA start - error, rc = %d\n", rc);
+ return rc;
+}
+
+static int qed_rdma_init(struct qed_dev *cdev,
+ struct qed_rdma_start_in_params *params)
+{
+ return qed_rdma_start(QED_LEADING_HWFN(cdev), params);
+}
+
+static void qed_rdma_remove_user(void *rdma_cxt, u16 dpi)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "dpi = %08x\n", dpi);
+
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, dpi);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+}
+
+static int qed_roce_ll2_set_mac_filter(struct qed_dev *cdev,
+ u8 *old_mac_address,
+ u8 *new_mac_address)
+{
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_ptt *p_ptt;
+ int rc = 0;
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt) {
+ DP_ERR(cdev,
+ "qed roce ll2 mac filter set: failed to acquire PTT\n");
+ return -EINVAL;
+ }
+
+ if (old_mac_address)
+ qed_llh_remove_mac_filter(p_hwfn, p_ptt, old_mac_address);
+ if (new_mac_address)
+ rc = qed_llh_add_mac_filter(p_hwfn, p_ptt, new_mac_address);
+
+ qed_ptt_release(p_hwfn, p_ptt);
+
+ if (rc)
+ DP_ERR(cdev,
+ "qed roce ll2 mac filter set: failed to add MAC filter\n");
+
+ return rc;
+}
+
+static const struct qed_rdma_ops qed_rdma_ops_pass = {
+ .common = &qed_common_ops_pass,
+ .fill_dev_info = &qed_fill_rdma_dev_info,
+ .rdma_get_rdma_ctx = &qed_rdma_get_rdma_ctx,
+ .rdma_init = &qed_rdma_init,
+ .rdma_add_user = &qed_rdma_add_user,
+ .rdma_remove_user = &qed_rdma_remove_user,
+ .rdma_stop = &qed_rdma_stop,
+ .rdma_query_port = &qed_rdma_query_port,
+ .rdma_query_device = &qed_rdma_query_device,
+ .rdma_get_start_sb = &qed_rdma_get_sb_start,
+ .rdma_get_rdma_int = &qed_rdma_get_int,
+ .rdma_set_rdma_int = &qed_rdma_set_int,
+ .rdma_get_min_cnq_msix = &qed_rdma_get_min_cnq_msix,
+ .rdma_cnq_prod_update = &qed_rdma_cnq_prod_update,
+ .rdma_alloc_pd = &qed_rdma_alloc_pd,
+ .rdma_dealloc_pd = &qed_rdma_free_pd,
+ .rdma_create_cq = &qed_rdma_create_cq,
+ .rdma_destroy_cq = &qed_rdma_destroy_cq,
+ .rdma_create_qp = &qed_rdma_create_qp,
+ .rdma_modify_qp = &qed_rdma_modify_qp,
+ .rdma_query_qp = &qed_rdma_query_qp,
+ .rdma_destroy_qp = &qed_rdma_destroy_qp,
+ .rdma_alloc_tid = &qed_rdma_alloc_tid,
+ .rdma_free_tid = &qed_rdma_free_tid,
+ .rdma_register_tid = &qed_rdma_register_tid,
+ .rdma_deregister_tid = &qed_rdma_deregister_tid,
+ .ll2_acquire_connection = &qed_ll2_acquire_connection,
+ .ll2_establish_connection = &qed_ll2_establish_connection,
+ .ll2_terminate_connection = &qed_ll2_terminate_connection,
+ .ll2_release_connection = &qed_ll2_release_connection,
+ .ll2_post_rx_buffer = &qed_ll2_post_rx_buffer,
+ .ll2_prepare_tx_packet = &qed_ll2_prepare_tx_packet,
+ .ll2_set_fragment_of_tx_packet = &qed_ll2_set_fragment_of_tx_packet,
+ .ll2_set_mac_filter = &qed_roce_ll2_set_mac_filter,
+ .ll2_get_stats = &qed_ll2_get_stats,
+};
+
+const struct qed_rdma_ops *qed_get_rdma_ops(void)
+{
+ return &qed_rdma_ops_pass;
+}
+EXPORT_SYMBOL(qed_get_rdma_ops);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.h b/drivers/net/ethernet/qlogic/qed/qed_rdma.h
new file mode 100644
index 000000000000..d91e5c4069a6
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.h
@@ -0,0 +1,201 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2017 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _QED_RDMA_H
+#define _QED_RDMA_H
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/qed/qed_if.h>
+#include <linux/qed/qed_rdma_if.h>
+#include "qed.h"
+#include "qed_dev_api.h"
+#include "qed_hsi.h"
+#include "qed_roce.h"
+
+#define QED_RDMA_MAX_FMR (RDMA_MAX_TIDS)
+#define QED_RDMA_MAX_P_KEY (1)
+#define QED_RDMA_MAX_WQE (0x7FFF)
+#define QED_RDMA_MAX_SRQ_WQE_ELEM (0x7FFF)
+#define QED_RDMA_PAGE_SIZE_CAPS (0xFFFFF000)
+#define QED_RDMA_ACK_DELAY (15)
+#define QED_RDMA_MAX_MR_SIZE (0x10000000000ULL)
+#define QED_RDMA_MAX_CQS (RDMA_MAX_CQS)
+#define QED_RDMA_MAX_MRS (RDMA_MAX_TIDS)
+/* Add 1 for header element */
+#define QED_RDMA_MAX_SRQ_ELEM_PER_WQE (RDMA_MAX_SGE_PER_RQ_WQE + 1)
+#define QED_RDMA_MAX_SGE_PER_SRQ_WQE (RDMA_MAX_SGE_PER_RQ_WQE)
+#define QED_RDMA_SRQ_WQE_ELEM_SIZE (16)
+#define QED_RDMA_MAX_SRQS (32 * 1024)
+
+#define QED_RDMA_MAX_CQE_32_BIT (0x7FFFFFFF - 1)
+#define QED_RDMA_MAX_CQE_16_BIT (0x7FFF - 1)
+
+enum qed_rdma_toggle_bit {
+ QED_RDMA_TOGGLE_BIT_CLEAR = 0,
+ QED_RDMA_TOGGLE_BIT_SET = 1
+};
+
+#define QED_RDMA_MAX_BMAP_NAME (10)
+struct qed_bmap {
+ unsigned long *bitmap;
+ u32 max_count;
+ char name[QED_RDMA_MAX_BMAP_NAME];
+};
+
+struct qed_rdma_info {
+ /* spin lock to protect bitmaps */
+ spinlock_t lock;
+
+ struct qed_bmap cq_map;
+ struct qed_bmap pd_map;
+ struct qed_bmap tid_map;
+ struct qed_bmap qp_map;
+ struct qed_bmap srq_map;
+ struct qed_bmap cid_map;
+ struct qed_bmap real_cid_map;
+ struct qed_bmap dpi_map;
+ struct qed_bmap toggle_bits;
+ struct qed_rdma_events events;
+ struct qed_rdma_device *dev;
+ struct qed_rdma_port *port;
+ u32 last_tid;
+ u8 num_cnqs;
+ u32 num_qps;
+ u32 num_mrs;
+ u16 queue_zone_base;
+ u16 max_queue_zones;
+ enum protocol_type proto;
+};
+
+struct qed_rdma_qp {
+ struct regpair qp_handle;
+ struct regpair qp_handle_async;
+ u32 qpid;
+ u16 icid;
+ enum qed_roce_qp_state cur_state;
+ bool use_srq;
+ bool signal_all;
+ bool fmr_and_reserved_lkey;
+
+ bool incoming_rdma_read_en;
+ bool incoming_rdma_write_en;
+ bool incoming_atomic_en;
+ bool e2e_flow_control_en;
+
+ u16 pd;
+ u16 pkey;
+ u32 dest_qp;
+ u16 mtu;
+ u16 srq_id;
+ u8 traffic_class_tos;
+ u8 hop_limit_ttl;
+ u16 dpi;
+ u32 flow_label;
+ bool lb_indication;
+ u16 vlan_id;
+ u32 ack_timeout;
+ u8 retry_cnt;
+ u8 rnr_retry_cnt;
+ u8 min_rnr_nak_timer;
+ bool sqd_async;
+ union qed_gid sgid;
+ union qed_gid dgid;
+ enum roce_mode roce_mode;
+ u16 udp_src_port;
+ u8 stats_queue;
+
+ /* requeseter */
+ u8 max_rd_atomic_req;
+ u32 sq_psn;
+ u16 sq_cq_id;
+ u16 sq_num_pages;
+ dma_addr_t sq_pbl_ptr;
+ void *orq;
+ dma_addr_t orq_phys_addr;
+ u8 orq_num_pages;
+ bool req_offloaded;
+
+ /* responder */
+ u8 max_rd_atomic_resp;
+ u32 rq_psn;
+ u16 rq_cq_id;
+ u16 rq_num_pages;
+ dma_addr_t rq_pbl_ptr;
+ void *irq;
+ dma_addr_t irq_phys_addr;
+ u8 irq_num_pages;
+ bool resp_offloaded;
+ u32 cq_prod;
+
+ u8 remote_mac_addr[6];
+ u8 local_mac_addr[6];
+
+ void *shared_queue;
+ dma_addr_t shared_queue_phys_addr;
+};
+
+#if IS_ENABLED(CONFIG_QED_RDMA)
+void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+#else
+static inline void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {}
+static inline void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt) {}
+#endif
+
+int
+qed_rdma_bmap_alloc(struct qed_hwfn *p_hwfn,
+ struct qed_bmap *bmap, u32 max_count, char *name);
+
+void
+qed_rdma_bmap_free(struct qed_hwfn *p_hwfn, struct qed_bmap *bmap, bool check);
+
+int
+qed_rdma_bmap_alloc_id(struct qed_hwfn *p_hwfn,
+ struct qed_bmap *bmap, u32 *id_num);
+
+void
+qed_bmap_set_id(struct qed_hwfn *p_hwfn, struct qed_bmap *bmap, u32 id_num);
+
+void
+qed_bmap_release_id(struct qed_hwfn *p_hwfn, struct qed_bmap *bmap, u32 id_num);
+
+int
+qed_bmap_test_id(struct qed_hwfn *p_hwfn, struct qed_bmap *bmap, u32 id_num);
+
+void qed_rdma_set_fw_mac(u16 *p_fw_mac, u8 *p_qed_mac);
+
+bool qed_rdma_allocated_qps(struct qed_hwfn *p_hwfn);
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
index 7e4639c9207a..0cdb4337b3a0 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
@@ -1564,6 +1564,12 @@
#define NIG_REG_TSGEN_FREECNT_UPDATE_K2 0x509008UL
#define CNIG_REG_NIG_PORT0_CONF_K2 0x218200UL
+#define NIG_REG_TX_EDPM_CTRL 0x501f0cUL
+#define NIG_REG_TX_EDPM_CTRL_TX_EDPM_EN (0x1 << 0)
+#define NIG_REG_TX_EDPM_CTRL_TX_EDPM_EN_SHIFT 0
+#define NIG_REG_TX_EDPM_CTRL_TX_EDPM_TC_EN (0xff << 1)
+#define NIG_REG_TX_EDPM_CTRL_TX_EDPM_TC_EN_SHIFT 1
+
#define PRS_REG_SEARCH_GFT 0x1f11bcUL
#define PRS_REG_CM_HDR_GFT 0x1f11c8UL
#define PRS_REG_GFT_CAM 0x1f1100UL
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
index 4bc2f6c47f69..e53adc3d009b 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
@@ -35,11 +35,7 @@
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/errno.h>
-#include <linux/if_ether.h>
-#include <linux/if_vlan.h>
#include <linux/io.h>
-#include <linux/ip.h>
-#include <linux/ipv6.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
@@ -48,10 +44,6 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/string.h>
-#include <linux/tcp.h>
-#include <linux/bitops.h>
-#include <linux/qed/qed_roce_if.h>
-#include <linux/qed/qed_roce_if.h>
#include "qed.h"
#include "qed_cxt.h"
#include "qed_hsi.h"
@@ -61,19 +53,21 @@
#include "qed_ll2.h"
#include "qed_mcp.h"
#include "qed_reg_addr.h"
-#include "qed_sp.h"
+#include <linux/qed/qed_rdma_if.h>
+#include "qed_rdma.h"
#include "qed_roce.h"
-#include "qed_ll2.h"
-#include <linux/qed/qed_ll2_if.h>
+#include "qed_sp.h"
static void qed_roce_free_real_icid(struct qed_hwfn *p_hwfn, u16 icid);
-void qed_roce_async_event(struct qed_hwfn *p_hwfn,
- u8 fw_event_code, union rdma_eqe_data *rdma_data)
+static int
+qed_roce_async_event(struct qed_hwfn *p_hwfn,
+ u8 fw_event_code,
+ u16 echo, union event_ring_data *data, u8 fw_return_code)
{
if (fw_event_code == ROCE_ASYNC_EVENT_DESTROY_QP_DONE) {
u16 icid =
- (u16)le32_to_cpu(rdma_data->rdma_destroy_qp_data.cid);
+ (u16)le32_to_cpu(data->rdma_data.rdma_destroy_qp_data.cid);
/* icid release in this async event can occur only if the icid
* was offloaded to the FW. In case it wasn't offloaded this is
@@ -85,290 +79,15 @@ void qed_roce_async_event(struct qed_hwfn *p_hwfn,
events->affiliated_event(p_hwfn->p_rdma_info->events.context,
fw_event_code,
- &rdma_data->async_handle);
- }
-}
-
-static int qed_rdma_bmap_alloc(struct qed_hwfn *p_hwfn,
- struct qed_bmap *bmap, u32 max_count, char *name)
-{
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "max_count = %08x\n", max_count);
-
- bmap->max_count = max_count;
-
- bmap->bitmap = kzalloc(BITS_TO_LONGS(max_count) * sizeof(long),
- GFP_KERNEL);
- if (!bmap->bitmap) {
- DP_NOTICE(p_hwfn,
- "qed bmap alloc failed: cannot allocate memory (bitmap)\n");
- return -ENOMEM;
- }
-
- snprintf(bmap->name, QED_RDMA_MAX_BMAP_NAME, "%s", name);
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "0\n");
- return 0;
-}
-
-static int qed_rdma_bmap_alloc_id(struct qed_hwfn *p_hwfn,
- struct qed_bmap *bmap, u32 *id_num)
-{
- *id_num = find_first_zero_bit(bmap->bitmap, bmap->max_count);
- if (*id_num >= bmap->max_count)
- return -EINVAL;
-
- __set_bit(*id_num, bmap->bitmap);
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "%s bitmap: allocated id %d\n",
- bmap->name, *id_num);
-
- return 0;
-}
-
-static void qed_bmap_set_id(struct qed_hwfn *p_hwfn,
- struct qed_bmap *bmap, u32 id_num)
-{
- if (id_num >= bmap->max_count)
- return;
-
- __set_bit(id_num, bmap->bitmap);
-}
-
-static void qed_bmap_release_id(struct qed_hwfn *p_hwfn,
- struct qed_bmap *bmap, u32 id_num)
-{
- bool b_acquired;
-
- if (id_num >= bmap->max_count)
- return;
-
- b_acquired = test_and_clear_bit(id_num, bmap->bitmap);
- if (!b_acquired) {
- DP_NOTICE(p_hwfn, "%s bitmap: id %d already released\n",
- bmap->name, id_num);
- return;
- }
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "%s bitmap: released id %d\n",
- bmap->name, id_num);
-}
-
-static int qed_bmap_test_id(struct qed_hwfn *p_hwfn,
- struct qed_bmap *bmap, u32 id_num)
-{
- if (id_num >= bmap->max_count)
- return -1;
-
- return test_bit(id_num, bmap->bitmap);
-}
-
-static u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id)
-{
- /* First sb id for RoCE is after all the l2 sb */
- return FEAT_NUM((struct qed_hwfn *)p_hwfn, QED_PF_L2_QUE) + rel_sb_id;
-}
-
-static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- struct qed_rdma_start_in_params *params)
-{
- struct qed_rdma_info *p_rdma_info;
- u32 num_cons, num_tasks;
- int rc = -ENOMEM;
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocating RDMA\n");
-
- /* Allocate a struct with current pf rdma info */
- p_rdma_info = kzalloc(sizeof(*p_rdma_info), GFP_KERNEL);
- if (!p_rdma_info) {
- DP_NOTICE(p_hwfn,
- "qed rdma alloc failed: cannot allocate memory (rdma info). rc = %d\n",
- rc);
- return rc;
- }
-
- p_hwfn->p_rdma_info = p_rdma_info;
- p_rdma_info->proto = PROTOCOLID_ROCE;
-
- num_cons = qed_cxt_get_proto_cid_count(p_hwfn, p_rdma_info->proto,
- NULL);
-
- p_rdma_info->num_qps = num_cons / 2;
-
- num_tasks = qed_cxt_get_proto_tid_count(p_hwfn, PROTOCOLID_ROCE);
-
- /* Each MR uses a single task */
- p_rdma_info->num_mrs = num_tasks;
-
- /* Queue zone lines are shared between RoCE and L2 in such a way that
- * they can be used by each without obstructing the other.
- */
- p_rdma_info->queue_zone_base = (u16)RESC_START(p_hwfn, QED_L2_QUEUE);
- p_rdma_info->max_queue_zones = (u16)RESC_NUM(p_hwfn, QED_L2_QUEUE);
-
- /* Allocate a struct with device params and fill it */
- p_rdma_info->dev = kzalloc(sizeof(*p_rdma_info->dev), GFP_KERNEL);
- if (!p_rdma_info->dev) {
- DP_NOTICE(p_hwfn,
- "qed rdma alloc failed: cannot allocate memory (rdma info dev). rc = %d\n",
- rc);
- goto free_rdma_info;
- }
-
- /* Allocate a struct with port params and fill it */
- p_rdma_info->port = kzalloc(sizeof(*p_rdma_info->port), GFP_KERNEL);
- if (!p_rdma_info->port) {
- DP_NOTICE(p_hwfn,
- "qed rdma alloc failed: cannot allocate memory (rdma info port). rc = %d\n",
- rc);
- goto free_rdma_dev;
- }
-
- /* Allocate bit map for pd's */
- rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->pd_map, RDMA_MAX_PDS,
- "PD");
- if (rc) {
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
- "Failed to allocate pd_map, rc = %d\n",
- rc);
- goto free_rdma_port;
- }
-
- /* Allocate DPI bitmap */
- rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->dpi_map,
- p_hwfn->dpi_count, "DPI");
- if (rc) {
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
- "Failed to allocate DPI bitmap, rc = %d\n", rc);
- goto free_pd_map;
- }
-
- /* Allocate bitmap for cq's. The maximum number of CQs is bounded to
- * twice the number of QPs.
- */
- rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cq_map,
- p_rdma_info->num_qps * 2, "CQ");
- if (rc) {
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
- "Failed to allocate cq bitmap, rc = %d\n", rc);
- goto free_dpi_map;
- }
-
- /* Allocate bitmap for toggle bit for cq icids
- * We toggle the bit every time we create or resize cq for a given icid.
- * The maximum number of CQs is bounded to twice the number of QPs.
- */
- rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->toggle_bits,
- p_rdma_info->num_qps * 2, "Toggle");
- if (rc) {
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
- "Failed to allocate toogle bits, rc = %d\n", rc);
- goto free_cq_map;
- }
-
- /* Allocate bitmap for itids */
- rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->tid_map,
- p_rdma_info->num_mrs, "MR");
- if (rc) {
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
- "Failed to allocate itids bitmaps, rc = %d\n", rc);
- goto free_toggle_map;
- }
-
- /* Allocate bitmap for cids used for qps. */
- rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cid_map, num_cons,
- "CID");
- if (rc) {
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
- "Failed to allocate cid bitmap, rc = %d\n", rc);
- goto free_tid_map;
+ (void *)&data->rdma_data.async_handle);
}
- /* Allocate bitmap for cids used for responders/requesters. */
- rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->real_cid_map, num_cons,
- "REAL_CID");
- if (rc) {
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
- "Failed to allocate real cid bitmap, rc = %d\n", rc);
- goto free_cid_map;
- }
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocation successful\n");
return 0;
-
-free_cid_map:
- kfree(p_rdma_info->cid_map.bitmap);
-free_tid_map:
- kfree(p_rdma_info->tid_map.bitmap);
-free_toggle_map:
- kfree(p_rdma_info->toggle_bits.bitmap);
-free_cq_map:
- kfree(p_rdma_info->cq_map.bitmap);
-free_dpi_map:
- kfree(p_rdma_info->dpi_map.bitmap);
-free_pd_map:
- kfree(p_rdma_info->pd_map.bitmap);
-free_rdma_port:
- kfree(p_rdma_info->port);
-free_rdma_dev:
- kfree(p_rdma_info->dev);
-free_rdma_info:
- kfree(p_rdma_info);
-
- return rc;
}
-static void qed_rdma_bmap_free(struct qed_hwfn *p_hwfn,
- struct qed_bmap *bmap, bool check)
-{
- int weight = bitmap_weight(bmap->bitmap, bmap->max_count);
- int last_line = bmap->max_count / (64 * 8);
- int last_item = last_line * 8 +
- DIV_ROUND_UP(bmap->max_count % (64 * 8), 64);
- u64 *pmap = (u64 *)bmap->bitmap;
- int line, item, offset;
- u8 str_last_line[200] = { 0 };
-
- if (!weight || !check)
- goto end;
-
- DP_NOTICE(p_hwfn,
- "%s bitmap not free - size=%d, weight=%d, 512 bits per line\n",
- bmap->name, bmap->max_count, weight);
-
- /* print aligned non-zero lines, if any */
- for (item = 0, line = 0; line < last_line; line++, item += 8)
- if (bitmap_weight((unsigned long *)&pmap[item], 64 * 8))
- DP_NOTICE(p_hwfn,
- "line 0x%04x: 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n",
- line,
- pmap[item],
- pmap[item + 1],
- pmap[item + 2],
- pmap[item + 3],
- pmap[item + 4],
- pmap[item + 5],
- pmap[item + 6], pmap[item + 7]);
-
- /* print last unaligned non-zero line, if any */
- if ((bmap->max_count % (64 * 8)) &&
- (bitmap_weight((unsigned long *)&pmap[item],
- bmap->max_count - item * 64))) {
- offset = sprintf(str_last_line, "line 0x%04x: ", line);
- for (; item < last_item; item++)
- offset += sprintf(str_last_line + offset,
- "0x%016llx ", pmap[item]);
- DP_NOTICE(p_hwfn, "%s\n", str_last_line);
- }
-
-end:
- kfree(bmap->bitmap);
- bmap->bitmap = NULL;
-}
-
-static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn)
+void qed_roce_stop(struct qed_hwfn *p_hwfn)
{
struct qed_bmap *rcid_map = &p_hwfn->p_rdma_info->real_cid_map;
- struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
int wait_count = 0;
/* when destroying a_RoCE QP the control is returned to the user after
@@ -383,781 +102,7 @@ static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn)
break;
}
}
-
- qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->cid_map, 1);
- qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->pd_map, 1);
- qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, 1);
- qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->cq_map, 1);
- qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->toggle_bits, 0);
- qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->tid_map, 1);
-
- kfree(p_rdma_info->port);
- kfree(p_rdma_info->dev);
-
- kfree(p_rdma_info);
-}
-
-static void qed_rdma_free(struct qed_hwfn *p_hwfn)
-{
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Freeing RDMA\n");
-
- qed_rdma_resc_free(p_hwfn);
-}
-
-static void qed_rdma_get_guid(struct qed_hwfn *p_hwfn, u8 *guid)
-{
- guid[0] = p_hwfn->hw_info.hw_mac_addr[0] ^ 2;
- guid[1] = p_hwfn->hw_info.hw_mac_addr[1];
- guid[2] = p_hwfn->hw_info.hw_mac_addr[2];
- guid[3] = 0xff;
- guid[4] = 0xfe;
- guid[5] = p_hwfn->hw_info.hw_mac_addr[3];
- guid[6] = p_hwfn->hw_info.hw_mac_addr[4];
- guid[7] = p_hwfn->hw_info.hw_mac_addr[5];
-}
-
-static void qed_rdma_init_events(struct qed_hwfn *p_hwfn,
- struct qed_rdma_start_in_params *params)
-{
- struct qed_rdma_events *events;
-
- events = &p_hwfn->p_rdma_info->events;
-
- events->unaffiliated_event = params->events->unaffiliated_event;
- events->affiliated_event = params->events->affiliated_event;
- events->context = params->events->context;
-}
-
-static void qed_rdma_init_devinfo(struct qed_hwfn *p_hwfn,
- struct qed_rdma_start_in_params *params)
-{
- struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
- struct qed_dev *cdev = p_hwfn->cdev;
- u32 pci_status_control;
- u32 num_qps;
-
- /* Vendor specific information */
- dev->vendor_id = cdev->vendor_id;
- dev->vendor_part_id = cdev->device_id;
- dev->hw_ver = 0;
- dev->fw_ver = (FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) |
- (FW_REVISION_VERSION << 8) | (FW_ENGINEERING_VERSION);
-
- qed_rdma_get_guid(p_hwfn, (u8 *)&dev->sys_image_guid);
- dev->node_guid = dev->sys_image_guid;
-
- dev->max_sge = min_t(u32, RDMA_MAX_SGE_PER_SQ_WQE,
- RDMA_MAX_SGE_PER_RQ_WQE);
-
- if (cdev->rdma_max_sge)
- dev->max_sge = min_t(u32, cdev->rdma_max_sge, dev->max_sge);
-
- dev->max_inline = ROCE_REQ_MAX_INLINE_DATA_SIZE;
-
- dev->max_inline = (cdev->rdma_max_inline) ?
- min_t(u32, cdev->rdma_max_inline, dev->max_inline) :
- dev->max_inline;
-
- dev->max_wqe = QED_RDMA_MAX_WQE;
- dev->max_cnq = (u8)FEAT_NUM(p_hwfn, QED_RDMA_CNQ);
-
- /* The number of QPs may be higher than QED_ROCE_MAX_QPS, because
- * it is up-aligned to 16 and then to ILT page size within qed cxt.
- * This is OK in terms of ILT but we don't want to configure the FW
- * above its abilities
- */
- num_qps = ROCE_MAX_QPS;
- num_qps = min_t(u64, num_qps, p_hwfn->p_rdma_info->num_qps);
- dev->max_qp = num_qps;
-
- /* CQs uses the same icids that QPs use hence they are limited by the
- * number of icids. There are two icids per QP.
- */
- dev->max_cq = num_qps * 2;
-
- /* The number of mrs is smaller by 1 since the first is reserved */
- dev->max_mr = p_hwfn->p_rdma_info->num_mrs - 1;
- dev->max_mr_size = QED_RDMA_MAX_MR_SIZE;
-
- /* The maximum CQE capacity per CQ supported.
- * max number of cqes will be in two layer pbl,
- * 8 is the pointer size in bytes
- * 32 is the size of cq element in bytes
- */
- if (params->cq_mode == QED_RDMA_CQ_MODE_32_BITS)
- dev->max_cqe = QED_RDMA_MAX_CQE_32_BIT;
- else
- dev->max_cqe = QED_RDMA_MAX_CQE_16_BIT;
-
- dev->max_mw = 0;
- dev->max_fmr = QED_RDMA_MAX_FMR;
- dev->max_mr_mw_fmr_pbl = (PAGE_SIZE / 8) * (PAGE_SIZE / 8);
- dev->max_mr_mw_fmr_size = dev->max_mr_mw_fmr_pbl * PAGE_SIZE;
- dev->max_pkey = QED_RDMA_MAX_P_KEY;
-
- dev->max_qp_resp_rd_atomic_resc = RDMA_RING_PAGE_SIZE /
- (RDMA_RESP_RD_ATOMIC_ELM_SIZE * 2);
- dev->max_qp_req_rd_atomic_resc = RDMA_RING_PAGE_SIZE /
- RDMA_REQ_RD_ATOMIC_ELM_SIZE;
- dev->max_dev_resp_rd_atomic_resc = dev->max_qp_resp_rd_atomic_resc *
- p_hwfn->p_rdma_info->num_qps;
- dev->page_size_caps = QED_RDMA_PAGE_SIZE_CAPS;
- dev->dev_ack_delay = QED_RDMA_ACK_DELAY;
- dev->max_pd = RDMA_MAX_PDS;
- dev->max_ah = p_hwfn->p_rdma_info->num_qps;
- dev->max_stats_queues = (u8)RESC_NUM(p_hwfn, QED_RDMA_STATS_QUEUE);
-
- /* Set capablities */
- dev->dev_caps = 0;
- SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_RNR_NAK, 1);
- SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_PORT_ACTIVE_EVENT, 1);
- SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_PORT_CHANGE_EVENT, 1);
- SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_RESIZE_CQ, 1);
- SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_BASE_MEMORY_EXT, 1);
- SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_BASE_QUEUE_EXT, 1);
- SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ZBVA, 1);
- SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_LOCAL_INV_FENCE, 1);
-
- /* Check atomic operations support in PCI configuration space. */
- pci_read_config_dword(cdev->pdev,
- cdev->pdev->pcie_cap + PCI_EXP_DEVCTL2,
- &pci_status_control);
-
- if (pci_status_control & PCI_EXP_DEVCTL2_LTR_EN)
- SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ATOMIC_OP, 1);
-}
-
-static void qed_rdma_init_port(struct qed_hwfn *p_hwfn)
-{
- struct qed_rdma_port *port = p_hwfn->p_rdma_info->port;
- struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
-
- port->port_state = p_hwfn->mcp_info->link_output.link_up ?
- QED_RDMA_PORT_UP : QED_RDMA_PORT_DOWN;
-
- port->max_msg_size = min_t(u64,
- (dev->max_mr_mw_fmr_size *
- p_hwfn->cdev->rdma_max_sge),
- BIT(31));
-
- port->pkey_bad_counter = 0;
-}
-
-static int qed_rdma_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
-{
- u32 ll2_ethertype_en;
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Initializing HW\n");
- p_hwfn->b_rdma_enabled_in_prs = false;
-
- qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0);
-
- p_hwfn->rdma_prs_search_reg = PRS_REG_SEARCH_ROCE;
-
- /* We delay writing to this reg until first cid is allocated. See
- * qed_cxt_dynamic_ilt_alloc function for more details
- */
- ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN);
- qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN,
- (ll2_ethertype_en | 0x01));
-
- if (qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_ROCE) % 2) {
- DP_NOTICE(p_hwfn, "The first RoCE's cid should be even\n");
- return -EINVAL;
- }
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Initializing HW - Done\n");
- return 0;
-}
-
-static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn,
- struct qed_rdma_start_in_params *params,
- struct qed_ptt *p_ptt)
-{
- struct rdma_init_func_ramrod_data *p_ramrod;
- struct qed_rdma_cnq_params *p_cnq_pbl_list;
- struct rdma_init_func_hdr *p_params_header;
- struct rdma_cnq_params *p_cnq_params;
- struct qed_sp_init_data init_data;
- struct qed_spq_entry *p_ent;
- u32 cnq_id, sb_id;
- u16 igu_sb_id;
- int rc;
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Starting FW\n");
-
- /* Save the number of cnqs for the function close ramrod */
- p_hwfn->p_rdma_info->num_cnqs = params->desired_cnq;
-
- /* Get SPQ entry */
- memset(&init_data, 0, sizeof(init_data));
- init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
- init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
-
- rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_FUNC_INIT,
- p_hwfn->p_rdma_info->proto, &init_data);
- if (rc)
- return rc;
-
- p_ramrod = &p_ent->ramrod.roce_init_func.rdma;
-
- p_params_header = &p_ramrod->params_header;
- p_params_header->cnq_start_offset = (u8)RESC_START(p_hwfn,
- QED_RDMA_CNQ_RAM);
- p_params_header->num_cnqs = params->desired_cnq;
-
- if (params->cq_mode == QED_RDMA_CQ_MODE_16_BITS)
- p_params_header->cq_ring_mode = 1;
- else
- p_params_header->cq_ring_mode = 0;
-
- for (cnq_id = 0; cnq_id < params->desired_cnq; cnq_id++) {
- sb_id = qed_rdma_get_sb_id(p_hwfn, cnq_id);
- igu_sb_id = qed_get_igu_sb_id(p_hwfn, sb_id);
- p_ramrod->cnq_params[cnq_id].sb_num = cpu_to_le16(igu_sb_id);
- p_cnq_params = &p_ramrod->cnq_params[cnq_id];
- p_cnq_pbl_list = &params->cnq_pbl_list[cnq_id];
-
- p_cnq_params->sb_index = p_hwfn->pf_params.rdma_pf_params.gl_pi;
- p_cnq_params->num_pbl_pages = p_cnq_pbl_list->num_pbl_pages;
-
- DMA_REGPAIR_LE(p_cnq_params->pbl_base_addr,
- p_cnq_pbl_list->pbl_ptr);
-
- /* we assume here that cnq_id and qz_offset are the same */
- p_cnq_params->queue_zone_num =
- cpu_to_le16(p_hwfn->p_rdma_info->queue_zone_base +
- cnq_id);
- }
-
- return qed_spq_post(p_hwfn, p_ent, NULL);
-}
-
-static int qed_rdma_alloc_tid(void *rdma_cxt, u32 *itid)
-{
- struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
- int rc;
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID\n");
-
- spin_lock_bh(&p_hwfn->p_rdma_info->lock);
- rc = qed_rdma_bmap_alloc_id(p_hwfn,
- &p_hwfn->p_rdma_info->tid_map, itid);
- spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
- if (rc)
- goto out;
-
- rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_TASK, *itid);
-out:
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID - done, rc = %d\n", rc);
- return rc;
-}
-
-static int qed_rdma_reserve_lkey(struct qed_hwfn *p_hwfn)
-{
- struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
-
- /* The first DPI is reserved for the Kernel */
- __set_bit(0, p_hwfn->p_rdma_info->dpi_map.bitmap);
-
- /* Tid 0 will be used as the key for "reserved MR".
- * The driver should allocate memory for it so it can be loaded but no
- * ramrod should be passed on it.
- */
- qed_rdma_alloc_tid(p_hwfn, &dev->reserved_lkey);
- if (dev->reserved_lkey != RDMA_RESERVED_LKEY) {
- DP_NOTICE(p_hwfn,
- "Reserved lkey should be equal to RDMA_RESERVED_LKEY\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int qed_rdma_setup(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- struct qed_rdma_start_in_params *params)
-{
- int rc;
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA setup\n");
-
- spin_lock_init(&p_hwfn->p_rdma_info->lock);
-
- qed_rdma_init_devinfo(p_hwfn, params);
- qed_rdma_init_port(p_hwfn);
- qed_rdma_init_events(p_hwfn, params);
-
- rc = qed_rdma_reserve_lkey(p_hwfn);
- if (rc)
- return rc;
-
- rc = qed_rdma_init_hw(p_hwfn, p_ptt);
- if (rc)
- return rc;
-
- return qed_rdma_start_fw(p_hwfn, params, p_ptt);
-}
-
-static int qed_rdma_stop(void *rdma_cxt)
-{
- struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
- struct rdma_close_func_ramrod_data *p_ramrod;
- struct qed_sp_init_data init_data;
- struct qed_spq_entry *p_ent;
- struct qed_ptt *p_ptt;
- u32 ll2_ethertype_en;
- int rc = -EBUSY;
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA stop\n");
-
- p_ptt = qed_ptt_acquire(p_hwfn);
- if (!p_ptt) {
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Failed to acquire PTT\n");
- return rc;
- }
-
- /* Disable RoCE search */
- qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 0);
- p_hwfn->b_rdma_enabled_in_prs = false;
-
- qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0);
-
- ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN);
-
- qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN,
- (ll2_ethertype_en & 0xFFFE));
-
- qed_ptt_release(p_hwfn, p_ptt);
-
- /* Get SPQ entry */
- memset(&init_data, 0, sizeof(init_data));
- init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
- init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
-
- /* Stop RoCE */
- rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_FUNC_CLOSE,
- p_hwfn->p_rdma_info->proto, &init_data);
- if (rc)
- goto out;
-
- p_ramrod = &p_ent->ramrod.rdma_close_func;
-
- p_ramrod->num_cnqs = p_hwfn->p_rdma_info->num_cnqs;
- p_ramrod->cnq_start_offset = (u8)RESC_START(p_hwfn, QED_RDMA_CNQ_RAM);
-
- rc = qed_spq_post(p_hwfn, p_ent, NULL);
-
-out:
- qed_rdma_free(p_hwfn);
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA stop done, rc = %d\n", rc);
- return rc;
-}
-
-static int qed_rdma_add_user(void *rdma_cxt,
- struct qed_rdma_add_user_out_params *out_params)
-{
- struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
- u32 dpi_start_offset;
- u32 returned_id = 0;
- int rc;
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Adding User\n");
-
- /* Allocate DPI */
- spin_lock_bh(&p_hwfn->p_rdma_info->lock);
- rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_hwfn->p_rdma_info->dpi_map,
- &returned_id);
- spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
-
- out_params->dpi = (u16)returned_id;
-
- /* Calculate the corresponding DPI address */
- dpi_start_offset = p_hwfn->dpi_start_offset;
-
- out_params->dpi_addr = (u64)((u8 __iomem *)p_hwfn->doorbells +
- dpi_start_offset +
- ((out_params->dpi) * p_hwfn->dpi_size));
-
- out_params->dpi_phys_addr = p_hwfn->cdev->db_phys_addr +
- dpi_start_offset +
- ((out_params->dpi) * p_hwfn->dpi_size);
-
- out_params->dpi_size = p_hwfn->dpi_size;
- out_params->wid_count = p_hwfn->wid_count;
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Adding user - done, rc = %d\n", rc);
- return rc;
-}
-
-static struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt)
-{
- struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
- struct qed_rdma_port *p_port = p_hwfn->p_rdma_info->port;
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA Query port\n");
-
- /* Link may have changed */
- p_port->port_state = p_hwfn->mcp_info->link_output.link_up ?
- QED_RDMA_PORT_UP : QED_RDMA_PORT_DOWN;
-
- p_port->link_speed = p_hwfn->mcp_info->link_output.speed;
-
- p_port->max_msg_size = RDMA_MAX_DATA_SIZE_IN_WQE;
-
- return p_port;
-}
-
-static struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt)
-{
- struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Query device\n");
-
- /* Return struct with device parameters */
- return p_hwfn->p_rdma_info->dev;
-}
-
-static void qed_rdma_free_tid(void *rdma_cxt, u32 itid)
-{
- struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid);
-
- spin_lock_bh(&p_hwfn->p_rdma_info->lock);
- qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->tid_map, itid);
- spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
-}
-
-static void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod)
-{
- struct qed_hwfn *p_hwfn;
- u16 qz_num;
- u32 addr;
-
- p_hwfn = (struct qed_hwfn *)rdma_cxt;
-
- if (qz_offset > p_hwfn->p_rdma_info->max_queue_zones) {
- DP_NOTICE(p_hwfn,
- "queue zone offset %d is too large (max is %d)\n",
- qz_offset, p_hwfn->p_rdma_info->max_queue_zones);
- return;
- }
-
- qz_num = p_hwfn->p_rdma_info->queue_zone_base + qz_offset;
- addr = GTT_BAR0_MAP_REG_USDM_RAM +
- USTORM_COMMON_QUEUE_CONS_OFFSET(qz_num);
-
- REG_WR16(p_hwfn, addr, prod);
-
- /* keep prod updates ordered */
- wmb();
-}
-
-static int qed_fill_rdma_dev_info(struct qed_dev *cdev,
- struct qed_dev_rdma_info *info)
-{
- struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
-
- memset(info, 0, sizeof(*info));
-
- info->rdma_type = QED_RDMA_TYPE_ROCE;
- info->user_dpm_enabled = (p_hwfn->db_bar_no_edpm == 0);
-
- qed_fill_dev_info(cdev, &info->common);
-
- return 0;
-}
-
-static int qed_rdma_get_sb_start(struct qed_dev *cdev)
-{
- int feat_num;
-
- if (cdev->num_hwfns > 1)
- feat_num = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_PF_L2_QUE);
- else
- feat_num = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_PF_L2_QUE) *
- cdev->num_hwfns;
-
- return feat_num;
-}
-
-static int qed_rdma_get_min_cnq_msix(struct qed_dev *cdev)
-{
- int n_cnq = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_RDMA_CNQ);
- int n_msix = cdev->int_params.rdma_msix_cnt;
-
- return min_t(int, n_cnq, n_msix);
-}
-
-static int qed_rdma_set_int(struct qed_dev *cdev, u16 cnt)
-{
- int limit = 0;
-
- /* Mark the fastpath as free/used */
- cdev->int_params.fp_initialized = cnt ? true : false;
-
- if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX) {
- DP_ERR(cdev,
- "qed roce supports only MSI-X interrupts (detected %d).\n",
- cdev->int_params.out.int_mode);
- return -EINVAL;
- } else if (cdev->int_params.fp_msix_cnt) {
- limit = cdev->int_params.rdma_msix_cnt;
- }
-
- if (!limit)
- return -ENOMEM;
-
- return min_t(int, cnt, limit);
-}
-
-static int qed_rdma_get_int(struct qed_dev *cdev, struct qed_int_info *info)
-{
- memset(info, 0, sizeof(*info));
-
- if (!cdev->int_params.fp_initialized) {
- DP_INFO(cdev,
- "Protocol driver requested interrupt information, but its support is not yet configured\n");
- return -EINVAL;
- }
-
- if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
- int msix_base = cdev->int_params.rdma_msix_base;
-
- info->msix_cnt = cdev->int_params.rdma_msix_cnt;
- info->msix = &cdev->int_params.msix_table[msix_base];
-
- DP_VERBOSE(cdev, QED_MSG_RDMA, "msix_cnt = %d msix_base=%d\n",
- info->msix_cnt, msix_base);
- }
-
- return 0;
-}
-
-static int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd)
-{
- struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
- u32 returned_id;
- int rc;
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc PD\n");
-
- /* Allocates an unused protection domain */
- spin_lock_bh(&p_hwfn->p_rdma_info->lock);
- rc = qed_rdma_bmap_alloc_id(p_hwfn,
- &p_hwfn->p_rdma_info->pd_map, &returned_id);
- spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
-
- *pd = (u16)returned_id;
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc PD - done, rc = %d\n", rc);
- return rc;
-}
-
-static void qed_rdma_free_pd(void *rdma_cxt, u16 pd)
-{
- struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "pd = %08x\n", pd);
-
- /* Returns a previously allocated protection domain for reuse */
- spin_lock_bh(&p_hwfn->p_rdma_info->lock);
- qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->pd_map, pd);
- spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
-}
-
-static enum qed_rdma_toggle_bit
-qed_rdma_toggle_bit_create_resize_cq(struct qed_hwfn *p_hwfn, u16 icid)
-{
- struct qed_rdma_info *p_info = p_hwfn->p_rdma_info;
- enum qed_rdma_toggle_bit toggle_bit;
- u32 bmap_id;
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", icid);
-
- /* the function toggle the bit that is related to a given icid
- * and returns the new toggle bit's value
- */
- bmap_id = icid - qed_cxt_get_proto_cid_start(p_hwfn, p_info->proto);
-
- spin_lock_bh(&p_info->lock);
- toggle_bit = !test_and_change_bit(bmap_id,
- p_info->toggle_bits.bitmap);
- spin_unlock_bh(&p_info->lock);
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QED_RDMA_TOGGLE_BIT_= %d\n",
- toggle_bit);
-
- return toggle_bit;
-}
-
-static int qed_rdma_create_cq(void *rdma_cxt,
- struct qed_rdma_create_cq_in_params *params,
- u16 *icid)
-{
- struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
- struct qed_rdma_info *p_info = p_hwfn->p_rdma_info;
- struct rdma_create_cq_ramrod_data *p_ramrod;
- enum qed_rdma_toggle_bit toggle_bit;
- struct qed_sp_init_data init_data;
- struct qed_spq_entry *p_ent;
- u32 returned_id, start_cid;
- int rc;
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "cq_handle = %08x%08x\n",
- params->cq_handle_hi, params->cq_handle_lo);
-
- /* Allocate icid */
- spin_lock_bh(&p_info->lock);
- rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_info->cq_map, &returned_id);
- spin_unlock_bh(&p_info->lock);
-
- if (rc) {
- DP_NOTICE(p_hwfn, "Can't create CQ, rc = %d\n", rc);
- return rc;
- }
-
- start_cid = qed_cxt_get_proto_cid_start(p_hwfn,
- p_info->proto);
- *icid = returned_id + start_cid;
-
- /* Check if icid requires a page allocation */
- rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, *icid);
- if (rc)
- goto err;
-
- /* Get SPQ entry */
- memset(&init_data, 0, sizeof(init_data));
- init_data.cid = *icid;
- init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
- init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
-
- /* Send create CQ ramrod */
- rc = qed_sp_init_request(p_hwfn, &p_ent,
- RDMA_RAMROD_CREATE_CQ,
- p_info->proto, &init_data);
- if (rc)
- goto err;
-
- p_ramrod = &p_ent->ramrod.rdma_create_cq;
-
- p_ramrod->cq_handle.hi = cpu_to_le32(params->cq_handle_hi);
- p_ramrod->cq_handle.lo = cpu_to_le32(params->cq_handle_lo);
- p_ramrod->dpi = cpu_to_le16(params->dpi);
- p_ramrod->is_two_level_pbl = params->pbl_two_level;
- p_ramrod->max_cqes = cpu_to_le32(params->cq_size);
- DMA_REGPAIR_LE(p_ramrod->pbl_addr, params->pbl_ptr);
- p_ramrod->pbl_num_pages = cpu_to_le16(params->pbl_num_pages);
- p_ramrod->cnq_id = (u8)RESC_START(p_hwfn, QED_RDMA_CNQ_RAM) +
- params->cnq_id;
- p_ramrod->int_timeout = params->int_timeout;
-
- /* toggle the bit for every resize or create cq for a given icid */
- toggle_bit = qed_rdma_toggle_bit_create_resize_cq(p_hwfn, *icid);
-
- p_ramrod->toggle_bit = toggle_bit;
-
- rc = qed_spq_post(p_hwfn, p_ent, NULL);
- if (rc) {
- /* restore toggle bit */
- qed_rdma_toggle_bit_create_resize_cq(p_hwfn, *icid);
- goto err;
- }
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Created CQ, rc = %d\n", rc);
- return rc;
-
-err:
- /* release allocated icid */
- spin_lock_bh(&p_info->lock);
- qed_bmap_release_id(p_hwfn, &p_info->cq_map, returned_id);
- spin_unlock_bh(&p_info->lock);
- DP_NOTICE(p_hwfn, "Create CQ failed, rc = %d\n", rc);
-
- return rc;
-}
-
-static int
-qed_rdma_destroy_cq(void *rdma_cxt,
- struct qed_rdma_destroy_cq_in_params *in_params,
- struct qed_rdma_destroy_cq_out_params *out_params)
-{
- struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
- struct rdma_destroy_cq_output_params *p_ramrod_res;
- struct rdma_destroy_cq_ramrod_data *p_ramrod;
- struct qed_sp_init_data init_data;
- struct qed_spq_entry *p_ent;
- dma_addr_t ramrod_res_phys;
- int rc = -ENOMEM;
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", in_params->icid);
-
- p_ramrod_res =
- (struct rdma_destroy_cq_output_params *)
- dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
- sizeof(struct rdma_destroy_cq_output_params),
- &ramrod_res_phys, GFP_KERNEL);
- if (!p_ramrod_res) {
- DP_NOTICE(p_hwfn,
- "qed destroy cq failed: cannot allocate memory (ramrod)\n");
- return rc;
- }
-
- /* Get SPQ entry */
- memset(&init_data, 0, sizeof(init_data));
- init_data.cid = in_params->icid;
- init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
- init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
-
- /* Send destroy CQ ramrod */
- rc = qed_sp_init_request(p_hwfn, &p_ent,
- RDMA_RAMROD_DESTROY_CQ,
- p_hwfn->p_rdma_info->proto, &init_data);
- if (rc)
- goto err;
-
- p_ramrod = &p_ent->ramrod.rdma_destroy_cq;
- DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
-
- rc = qed_spq_post(p_hwfn, p_ent, NULL);
- if (rc)
- goto err;
-
- out_params->num_cq_notif = le16_to_cpu(p_ramrod_res->cnq_num);
-
- dma_free_coherent(&p_hwfn->cdev->pdev->dev,
- sizeof(struct rdma_destroy_cq_output_params),
- p_ramrod_res, ramrod_res_phys);
-
- /* Free icid */
- spin_lock_bh(&p_hwfn->p_rdma_info->lock);
-
- qed_bmap_release_id(p_hwfn,
- &p_hwfn->p_rdma_info->cq_map,
- (in_params->icid -
- qed_cxt_get_proto_cid_start(p_hwfn,
- p_hwfn->
- p_rdma_info->proto)));
-
- spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroyed CQ, rc = %d\n", rc);
- return rc;
-
-err: dma_free_coherent(&p_hwfn->cdev->pdev->dev,
- sizeof(struct rdma_destroy_cq_output_params),
- p_ramrod_res, ramrod_res_phys);
-
- return rc;
-}
-
-static void qed_rdma_set_fw_mac(u16 *p_fw_mac, u8 *p_qed_mac)
-{
- p_fw_mac[0] = cpu_to_le16((p_qed_mac[0] << 8) + p_qed_mac[1]);
- p_fw_mac[1] = cpu_to_le16((p_qed_mac[2] << 8) + p_qed_mac[3]);
- p_fw_mac[2] = cpu_to_le16((p_qed_mac[4] << 8) + p_qed_mac[5]);
+ qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_ROCE);
}
static void qed_rdma_copy_gids(struct qed_rdma_qp *qp, __le32 *src_gid,
@@ -1211,7 +156,7 @@ void qed_roce_free_cid_pair(struct qed_hwfn *p_hwfn, u16 cid)
spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
}
-static int qed_roce_alloc_cid(struct qed_hwfn *p_hwfn, u16 *cid)
+int qed_roce_alloc_cid(struct qed_hwfn *p_hwfn, u16 *cid)
{
struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
u32 responder_icid;
@@ -1871,9 +816,9 @@ err:
return rc;
}
-static int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
- struct qed_rdma_qp *qp,
- struct qed_rdma_query_qp_out_params *out_params)
+int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_qp *qp,
+ struct qed_rdma_query_qp_out_params *out_params)
{
struct roce_query_qp_resp_output_params *p_resp_ramrod_res;
struct roce_query_qp_req_output_params *p_req_ramrod_res;
@@ -2012,7 +957,7 @@ err_resp:
return rc;
}
-static int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
+int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
{
u32 num_invalidated_mw = 0;
u32 num_bound_mw = 0;
@@ -2051,138 +996,10 @@ static int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
return 0;
}
-static int qed_rdma_query_qp(void *rdma_cxt,
- struct qed_rdma_qp *qp,
- struct qed_rdma_query_qp_out_params *out_params)
-{
- struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
- int rc;
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
-
- /* The following fields are filled in from qp and not FW as they can't
- * be modified by FW
- */
- out_params->mtu = qp->mtu;
- out_params->dest_qp = qp->dest_qp;
- out_params->incoming_atomic_en = qp->incoming_atomic_en;
- out_params->e2e_flow_control_en = qp->e2e_flow_control_en;
- out_params->incoming_rdma_read_en = qp->incoming_rdma_read_en;
- out_params->incoming_rdma_write_en = qp->incoming_rdma_write_en;
- out_params->dgid = qp->dgid;
- out_params->flow_label = qp->flow_label;
- out_params->hop_limit_ttl = qp->hop_limit_ttl;
- out_params->traffic_class_tos = qp->traffic_class_tos;
- out_params->timeout = qp->ack_timeout;
- out_params->rnr_retry = qp->rnr_retry_cnt;
- out_params->retry_cnt = qp->retry_cnt;
- out_params->min_rnr_nak_timer = qp->min_rnr_nak_timer;
- out_params->pkey_index = 0;
- out_params->max_rd_atomic = qp->max_rd_atomic_req;
- out_params->max_dest_rd_atomic = qp->max_rd_atomic_resp;
- out_params->sqd_async = qp->sqd_async;
-
- rc = qed_roce_query_qp(p_hwfn, qp, out_params);
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Query QP, rc = %d\n", rc);
- return rc;
-}
-
-static int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp)
-{
- struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
- int rc = 0;
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
-
- rc = qed_roce_destroy_qp(p_hwfn, qp);
-
- /* free qp params struct */
- kfree(qp);
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP destroyed\n");
- return rc;
-}
-
-static struct qed_rdma_qp *
-qed_rdma_create_qp(void *rdma_cxt,
- struct qed_rdma_create_qp_in_params *in_params,
- struct qed_rdma_create_qp_out_params *out_params)
-{
- struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
- struct qed_rdma_qp *qp;
- u8 max_stats_queues;
- int rc;
-
- if (!rdma_cxt || !in_params || !out_params || !p_hwfn->p_rdma_info) {
- DP_ERR(p_hwfn->cdev,
- "qed roce create qp failed due to NULL entry (rdma_cxt=%p, in=%p, out=%p, roce_info=?\n",
- rdma_cxt, in_params, out_params);
- return NULL;
- }
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
- "qed rdma create qp called with qp_handle = %08x%08x\n",
- in_params->qp_handle_hi, in_params->qp_handle_lo);
-
- /* Some sanity checks... */
- max_stats_queues = p_hwfn->p_rdma_info->dev->max_stats_queues;
- if (in_params->stats_queue >= max_stats_queues) {
- DP_ERR(p_hwfn->cdev,
- "qed rdma create qp failed due to invalid statistics queue %d. maximum is %d\n",
- in_params->stats_queue, max_stats_queues);
- return NULL;
- }
-
- qp = kzalloc(sizeof(*qp), GFP_KERNEL);
- if (!qp) {
- DP_NOTICE(p_hwfn, "Failed to allocate qed_rdma_qp\n");
- return NULL;
- }
-
- rc = qed_roce_alloc_cid(p_hwfn, &qp->icid);
- qp->qpid = ((0xFF << 16) | qp->icid);
-
- DP_INFO(p_hwfn, "ROCE qpid=%x\n", qp->qpid);
-
- if (rc) {
- kfree(qp);
- return NULL;
- }
-
- qp->cur_state = QED_ROCE_QP_STATE_RESET;
- qp->qp_handle.hi = cpu_to_le32(in_params->qp_handle_hi);
- qp->qp_handle.lo = cpu_to_le32(in_params->qp_handle_lo);
- qp->qp_handle_async.hi = cpu_to_le32(in_params->qp_handle_async_hi);
- qp->qp_handle_async.lo = cpu_to_le32(in_params->qp_handle_async_lo);
- qp->use_srq = in_params->use_srq;
- qp->signal_all = in_params->signal_all;
- qp->fmr_and_reserved_lkey = in_params->fmr_and_reserved_lkey;
- qp->pd = in_params->pd;
- qp->dpi = in_params->dpi;
- qp->sq_cq_id = in_params->sq_cq_id;
- qp->sq_num_pages = in_params->sq_num_pages;
- qp->sq_pbl_ptr = in_params->sq_pbl_ptr;
- qp->rq_cq_id = in_params->rq_cq_id;
- qp->rq_num_pages = in_params->rq_num_pages;
- qp->rq_pbl_ptr = in_params->rq_pbl_ptr;
- qp->srq_id = in_params->srq_id;
- qp->req_offloaded = false;
- qp->resp_offloaded = false;
- qp->e2e_flow_control_en = qp->use_srq ? false : true;
- qp->stats_queue = in_params->stats_queue;
-
- out_params->icid = qp->icid;
- out_params->qp_id = qp->qpid;
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Create QP, rc = %d\n", rc);
- return qp;
-}
-
-static int qed_roce_modify_qp(struct qed_hwfn *p_hwfn,
- struct qed_rdma_qp *qp,
- enum qed_roce_qp_state prev_state,
- struct qed_rdma_modify_qp_in_params *params)
+int qed_roce_modify_qp(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_qp *qp,
+ enum qed_roce_qp_state prev_state,
+ struct qed_rdma_modify_qp_in_params *params)
{
u32 num_invalidated_mw = 0, num_bound_mw = 0;
int rc = 0;
@@ -2287,327 +1104,6 @@ static int qed_roce_modify_qp(struct qed_hwfn *p_hwfn,
return rc;
}
-static int qed_rdma_modify_qp(void *rdma_cxt,
- struct qed_rdma_qp *qp,
- struct qed_rdma_modify_qp_in_params *params)
-{
- struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
- enum qed_roce_qp_state prev_state;
- int rc = 0;
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x params->new_state=%d\n",
- qp->icid, params->new_state);
-
- if (rc) {
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
- return rc;
- }
-
- if (GET_FIELD(params->modify_flags,
- QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN)) {
- qp->incoming_rdma_read_en = params->incoming_rdma_read_en;
- qp->incoming_rdma_write_en = params->incoming_rdma_write_en;
- qp->incoming_atomic_en = params->incoming_atomic_en;
- }
-
- /* Update QP structure with the updated values */
- if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_ROCE_MODE))
- qp->roce_mode = params->roce_mode;
- if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY))
- qp->pkey = params->pkey;
- if (GET_FIELD(params->modify_flags,
- QED_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN))
- qp->e2e_flow_control_en = params->e2e_flow_control_en;
- if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_DEST_QP))
- qp->dest_qp = params->dest_qp;
- if (GET_FIELD(params->modify_flags,
- QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR)) {
- /* Indicates that the following parameters have changed:
- * Traffic class, flow label, hop limit, source GID,
- * destination GID, loopback indicator
- */
- qp->traffic_class_tos = params->traffic_class_tos;
- qp->flow_label = params->flow_label;
- qp->hop_limit_ttl = params->hop_limit_ttl;
-
- qp->sgid = params->sgid;
- qp->dgid = params->dgid;
- qp->udp_src_port = 0;
- qp->vlan_id = params->vlan_id;
- qp->mtu = params->mtu;
- qp->lb_indication = params->lb_indication;
- memcpy((u8 *)&qp->remote_mac_addr[0],
- (u8 *)&params->remote_mac_addr[0], ETH_ALEN);
- if (params->use_local_mac) {
- memcpy((u8 *)&qp->local_mac_addr[0],
- (u8 *)&params->local_mac_addr[0], ETH_ALEN);
- } else {
- memcpy((u8 *)&qp->local_mac_addr[0],
- (u8 *)&p_hwfn->hw_info.hw_mac_addr, ETH_ALEN);
- }
- }
- if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_RQ_PSN))
- qp->rq_psn = params->rq_psn;
- if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_SQ_PSN))
- qp->sq_psn = params->sq_psn;
- if (GET_FIELD(params->modify_flags,
- QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ))
- qp->max_rd_atomic_req = params->max_rd_atomic_req;
- if (GET_FIELD(params->modify_flags,
- QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP))
- qp->max_rd_atomic_resp = params->max_rd_atomic_resp;
- if (GET_FIELD(params->modify_flags,
- QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT))
- qp->ack_timeout = params->ack_timeout;
- if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_RETRY_CNT))
- qp->retry_cnt = params->retry_cnt;
- if (GET_FIELD(params->modify_flags,
- QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT))
- qp->rnr_retry_cnt = params->rnr_retry_cnt;
- if (GET_FIELD(params->modify_flags,
- QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER))
- qp->min_rnr_nak_timer = params->min_rnr_nak_timer;
-
- qp->sqd_async = params->sqd_async;
-
- prev_state = qp->cur_state;
- if (GET_FIELD(params->modify_flags,
- QED_RDMA_MODIFY_QP_VALID_NEW_STATE)) {
- qp->cur_state = params->new_state;
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "qp->cur_state=%d\n",
- qp->cur_state);
- }
-
- rc = qed_roce_modify_qp(p_hwfn, qp, prev_state, params);
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify QP, rc = %d\n", rc);
- return rc;
-}
-
-static int
-qed_rdma_register_tid(void *rdma_cxt,
- struct qed_rdma_register_tid_in_params *params)
-{
- struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
- struct rdma_register_tid_ramrod_data *p_ramrod;
- struct qed_sp_init_data init_data;
- struct qed_spq_entry *p_ent;
- enum rdma_tid_type tid_type;
- u8 fw_return_code;
- int rc;
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", params->itid);
-
- /* Get SPQ entry */
- memset(&init_data, 0, sizeof(init_data));
- init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
- init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
-
- rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_REGISTER_MR,
- p_hwfn->p_rdma_info->proto, &init_data);
- if (rc) {
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
- return rc;
- }
-
- if (p_hwfn->p_rdma_info->last_tid < params->itid)
- p_hwfn->p_rdma_info->last_tid = params->itid;
-
- p_ramrod = &p_ent->ramrod.rdma_register_tid;
-
- p_ramrod->flags = 0;
- SET_FIELD(p_ramrod->flags,
- RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL,
- params->pbl_two_level);
-
- SET_FIELD(p_ramrod->flags,
- RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED, params->zbva);
-
- SET_FIELD(p_ramrod->flags,
- RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR, params->phy_mr);
-
- /* Don't initialize D/C field, as it may override other bits. */
- if (!(params->tid_type == QED_RDMA_TID_FMR) && !(params->dma_mr))
- SET_FIELD(p_ramrod->flags,
- RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG,
- params->page_size_log - 12);
-
- SET_FIELD(p_ramrod->flags,
- RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ,
- params->remote_read);
-
- SET_FIELD(p_ramrod->flags,
- RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE,
- params->remote_write);
-
- SET_FIELD(p_ramrod->flags,
- RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC,
- params->remote_atomic);
-
- SET_FIELD(p_ramrod->flags,
- RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE,
- params->local_write);
-
- SET_FIELD(p_ramrod->flags,
- RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ, params->local_read);
-
- SET_FIELD(p_ramrod->flags,
- RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND,
- params->mw_bind);
-
- SET_FIELD(p_ramrod->flags1,
- RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG,
- params->pbl_page_size_log - 12);
-
- SET_FIELD(p_ramrod->flags2,
- RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR, params->dma_mr);
-
- switch (params->tid_type) {
- case QED_RDMA_TID_REGISTERED_MR:
- tid_type = RDMA_TID_REGISTERED_MR;
- break;
- case QED_RDMA_TID_FMR:
- tid_type = RDMA_TID_FMR;
- break;
- case QED_RDMA_TID_MW_TYPE1:
- tid_type = RDMA_TID_MW_TYPE1;
- break;
- case QED_RDMA_TID_MW_TYPE2A:
- tid_type = RDMA_TID_MW_TYPE2A;
- break;
- default:
- rc = -EINVAL;
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
- return rc;
- }
- SET_FIELD(p_ramrod->flags1,
- RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE, tid_type);
-
- p_ramrod->itid = cpu_to_le32(params->itid);
- p_ramrod->key = params->key;
- p_ramrod->pd = cpu_to_le16(params->pd);
- p_ramrod->length_hi = (u8)(params->length >> 32);
- p_ramrod->length_lo = DMA_LO_LE(params->length);
- if (params->zbva) {
- /* Lower 32 bits of the registered MR address.
- * In case of zero based MR, will hold FBO
- */
- p_ramrod->va.hi = 0;
- p_ramrod->va.lo = cpu_to_le32(params->fbo);
- } else {
- DMA_REGPAIR_LE(p_ramrod->va, params->vaddr);
- }
- DMA_REGPAIR_LE(p_ramrod->pbl_base, params->pbl_ptr);
-
- /* DIF */
- if (params->dif_enabled) {
- SET_FIELD(p_ramrod->flags2,
- RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG, 1);
- DMA_REGPAIR_LE(p_ramrod->dif_error_addr,
- params->dif_error_addr);
- DMA_REGPAIR_LE(p_ramrod->dif_runt_addr, params->dif_runt_addr);
- }
-
- rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
- if (rc)
- return rc;
-
- if (fw_return_code != RDMA_RETURN_OK) {
- DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code);
- return -EINVAL;
- }
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Register TID, rc = %d\n", rc);
- return rc;
-}
-
-static int qed_rdma_deregister_tid(void *rdma_cxt, u32 itid)
-{
- struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
- struct rdma_deregister_tid_ramrod_data *p_ramrod;
- struct qed_sp_init_data init_data;
- struct qed_spq_entry *p_ent;
- struct qed_ptt *p_ptt;
- u8 fw_return_code;
- int rc;
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid);
-
- /* Get SPQ entry */
- memset(&init_data, 0, sizeof(init_data));
- init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
- init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
-
- rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_DEREGISTER_MR,
- p_hwfn->p_rdma_info->proto, &init_data);
- if (rc) {
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
- return rc;
- }
-
- p_ramrod = &p_ent->ramrod.rdma_deregister_tid;
- p_ramrod->itid = cpu_to_le32(itid);
-
- rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
- if (rc) {
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
- return rc;
- }
-
- if (fw_return_code == RDMA_RETURN_DEREGISTER_MR_BAD_STATE_ERR) {
- DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code);
- return -EINVAL;
- } else if (fw_return_code == RDMA_RETURN_NIG_DRAIN_REQ) {
- /* Bit indicating that the TID is in use and a nig drain is
- * required before sending the ramrod again
- */
- p_ptt = qed_ptt_acquire(p_hwfn);
- if (!p_ptt) {
- rc = -EBUSY;
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
- "Failed to acquire PTT\n");
- return rc;
- }
-
- rc = qed_mcp_drain(p_hwfn, p_ptt);
- if (rc) {
- qed_ptt_release(p_hwfn, p_ptt);
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
- "Drain failed\n");
- return rc;
- }
-
- qed_ptt_release(p_hwfn, p_ptt);
-
- /* Resend the ramrod */
- rc = qed_sp_init_request(p_hwfn, &p_ent,
- RDMA_RAMROD_DEREGISTER_MR,
- p_hwfn->p_rdma_info->proto,
- &init_data);
- if (rc) {
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
- "Failed to init sp-element\n");
- return rc;
- }
-
- rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
- if (rc) {
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
- "Ramrod failed\n");
- return rc;
- }
-
- if (fw_return_code != RDMA_RETURN_OK) {
- DP_NOTICE(p_hwfn, "fw_return_code = %d\n",
- fw_return_code);
- return rc;
- }
- }
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "De-registered TID, rc = %d\n", rc);
- return rc;
-}
-
static void qed_roce_free_real_icid(struct qed_hwfn *p_hwfn, u16 icid)
{
struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
@@ -2633,151 +1129,23 @@ static void qed_roce_free_real_icid(struct qed_hwfn *p_hwfn, u16 icid)
spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
}
-static void *qed_rdma_get_rdma_ctx(struct qed_dev *cdev)
-{
- return QED_LEADING_HWFN(cdev);
-}
-
-static void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+void qed_roce_dpm_dcbx(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
- u32 val;
-
- val = (p_hwfn->dcbx_no_edpm || p_hwfn->db_bar_no_edpm) ? 0 : 1;
+ u8 val;
- qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_DPM_ENABLE, val);
- DP_VERBOSE(p_hwfn, (QED_MSG_DCB | QED_MSG_RDMA),
- "Changing DPM_EN state to %d (DCBX=%d, DB_BAR=%d)\n",
- val, p_hwfn->dcbx_no_edpm, p_hwfn->db_bar_no_edpm);
-}
-
-void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
-{
- p_hwfn->db_bar_no_edpm = true;
+ /* if any QPs are already active, we want to disable DPM, since their
+ * context information contains information from before the latest DCBx
+ * update. Otherwise enable it.
+ */
+ val = qed_rdma_allocated_qps(p_hwfn) ? true : false;
+ p_hwfn->dcbx_no_edpm = (u8)val;
qed_rdma_dpm_conf(p_hwfn, p_ptt);
}
-static int qed_rdma_start(void *rdma_cxt,
- struct qed_rdma_start_in_params *params)
-{
- struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
- struct qed_ptt *p_ptt;
- int rc = -EBUSY;
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
- "desired_cnq = %08x\n", params->desired_cnq);
-
- p_ptt = qed_ptt_acquire(p_hwfn);
- if (!p_ptt)
- goto err;
-
- rc = qed_rdma_alloc(p_hwfn, p_ptt, params);
- if (rc)
- goto err1;
-
- rc = qed_rdma_setup(p_hwfn, p_ptt, params);
- if (rc)
- goto err2;
-
- qed_ptt_release(p_hwfn, p_ptt);
-
- return rc;
-
-err2:
- qed_rdma_free(p_hwfn);
-err1:
- qed_ptt_release(p_hwfn, p_ptt);
-err:
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA start - error, rc = %d\n", rc);
- return rc;
-}
-
-static int qed_rdma_init(struct qed_dev *cdev,
- struct qed_rdma_start_in_params *params)
-{
- return qed_rdma_start(QED_LEADING_HWFN(cdev), params);
-}
-
-static void qed_rdma_remove_user(void *rdma_cxt, u16 dpi)
-{
- struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "dpi = %08x\n", dpi);
-
- spin_lock_bh(&p_hwfn->p_rdma_info->lock);
- qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, dpi);
- spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
-}
-
-static int qed_roce_ll2_set_mac_filter(struct qed_dev *cdev,
- u8 *old_mac_address,
- u8 *new_mac_address)
+int qed_roce_setup(struct qed_hwfn *p_hwfn)
{
- struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
- struct qed_ptt *p_ptt;
- int rc = 0;
-
- p_ptt = qed_ptt_acquire(p_hwfn);
- if (!p_ptt) {
- DP_ERR(cdev,
- "qed roce ll2 mac filter set: failed to acquire PTT\n");
- return -EINVAL;
- }
-
- if (old_mac_address)
- qed_llh_remove_mac_filter(p_hwfn, p_ptt, old_mac_address);
- if (new_mac_address)
- rc = qed_llh_add_mac_filter(p_hwfn, p_ptt, new_mac_address);
-
- qed_ptt_release(p_hwfn, p_ptt);
-
- if (rc)
- DP_ERR(cdev,
- "qed roce ll2 mac filter set: failed to add MAC filter\n");
-
- return rc;
+ return qed_spq_register_async_cb(p_hwfn, PROTOCOLID_ROCE,
+ qed_roce_async_event);
}
-static const struct qed_rdma_ops qed_rdma_ops_pass = {
- .common = &qed_common_ops_pass,
- .fill_dev_info = &qed_fill_rdma_dev_info,
- .rdma_get_rdma_ctx = &qed_rdma_get_rdma_ctx,
- .rdma_init = &qed_rdma_init,
- .rdma_add_user = &qed_rdma_add_user,
- .rdma_remove_user = &qed_rdma_remove_user,
- .rdma_stop = &qed_rdma_stop,
- .rdma_query_port = &qed_rdma_query_port,
- .rdma_query_device = &qed_rdma_query_device,
- .rdma_get_start_sb = &qed_rdma_get_sb_start,
- .rdma_get_rdma_int = &qed_rdma_get_int,
- .rdma_set_rdma_int = &qed_rdma_set_int,
- .rdma_get_min_cnq_msix = &qed_rdma_get_min_cnq_msix,
- .rdma_cnq_prod_update = &qed_rdma_cnq_prod_update,
- .rdma_alloc_pd = &qed_rdma_alloc_pd,
- .rdma_dealloc_pd = &qed_rdma_free_pd,
- .rdma_create_cq = &qed_rdma_create_cq,
- .rdma_destroy_cq = &qed_rdma_destroy_cq,
- .rdma_create_qp = &qed_rdma_create_qp,
- .rdma_modify_qp = &qed_rdma_modify_qp,
- .rdma_query_qp = &qed_rdma_query_qp,
- .rdma_destroy_qp = &qed_rdma_destroy_qp,
- .rdma_alloc_tid = &qed_rdma_alloc_tid,
- .rdma_free_tid = &qed_rdma_free_tid,
- .rdma_register_tid = &qed_rdma_register_tid,
- .rdma_deregister_tid = &qed_rdma_deregister_tid,
- .ll2_acquire_connection = &qed_ll2_acquire_connection,
- .ll2_establish_connection = &qed_ll2_establish_connection,
- .ll2_terminate_connection = &qed_ll2_terminate_connection,
- .ll2_release_connection = &qed_ll2_release_connection,
- .ll2_post_rx_buffer = &qed_ll2_post_rx_buffer,
- .ll2_prepare_tx_packet = &qed_ll2_prepare_tx_packet,
- .ll2_set_fragment_of_tx_packet = &qed_ll2_set_fragment_of_tx_packet,
- .ll2_set_mac_filter = &qed_roce_ll2_set_mac_filter,
- .ll2_get_stats = &qed_ll2_get_stats,
-};
-
-const struct qed_rdma_ops *qed_get_rdma_ops(void)
-{
- return &qed_rdma_ops_pass;
-}
-EXPORT_SYMBOL(qed_get_rdma_ops);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.h b/drivers/net/ethernet/qlogic/qed/qed_roce.h
index 94be3b5a39c4..f801f39fde61 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.h
@@ -32,148 +32,28 @@
#ifndef _QED_ROCE_H
#define _QED_ROCE_H
#include <linux/types.h>
-#include <linux/bitops.h>
-#include <linux/kernel.h>
-#include <linux/list.h>
#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/qed/qed_if.h>
-#include <linux/qed/qed_roce_if.h>
-#include "qed.h"
-#include "qed_dev_api.h"
-#include "qed_hsi.h"
-#include "qed_ll2.h"
-#define QED_RDMA_MAX_FMR (RDMA_MAX_TIDS)
-#define QED_RDMA_MAX_P_KEY (1)
-#define QED_RDMA_MAX_WQE (0x7FFF)
-#define QED_RDMA_MAX_SRQ_WQE_ELEM (0x7FFF)
-#define QED_RDMA_PAGE_SIZE_CAPS (0xFFFFF000)
-#define QED_RDMA_ACK_DELAY (15)
-#define QED_RDMA_MAX_MR_SIZE (0x10000000000ULL)
-#define QED_RDMA_MAX_CQS (RDMA_MAX_CQS)
-#define QED_RDMA_MAX_MRS (RDMA_MAX_TIDS)
-/* Add 1 for header element */
-#define QED_RDMA_MAX_SRQ_ELEM_PER_WQE (RDMA_MAX_SGE_PER_RQ_WQE + 1)
-#define QED_RDMA_MAX_SGE_PER_SRQ_WQE (RDMA_MAX_SGE_PER_RQ_WQE)
-#define QED_RDMA_SRQ_WQE_ELEM_SIZE (16)
-#define QED_RDMA_MAX_SRQS (32 * 1024)
-
-#define QED_RDMA_MAX_CQE_32_BIT (0x7FFFFFFF - 1)
-#define QED_RDMA_MAX_CQE_16_BIT (0x7FFF - 1)
-
-enum qed_rdma_toggle_bit {
- QED_RDMA_TOGGLE_BIT_CLEAR = 0,
- QED_RDMA_TOGGLE_BIT_SET = 1
-};
-
-#define QED_RDMA_MAX_BMAP_NAME (10)
-struct qed_bmap {
- unsigned long *bitmap;
- u32 max_count;
- char name[QED_RDMA_MAX_BMAP_NAME];
-};
-
-struct qed_rdma_info {
- /* spin lock to protect bitmaps */
- spinlock_t lock;
-
- struct qed_bmap cq_map;
- struct qed_bmap pd_map;
- struct qed_bmap tid_map;
- struct qed_bmap qp_map;
- struct qed_bmap srq_map;
- struct qed_bmap cid_map;
- struct qed_bmap real_cid_map;
- struct qed_bmap dpi_map;
- struct qed_bmap toggle_bits;
- struct qed_rdma_events events;
- struct qed_rdma_device *dev;
- struct qed_rdma_port *port;
- u32 last_tid;
- u8 num_cnqs;
- u32 num_qps;
- u32 num_mrs;
- u16 queue_zone_base;
- u16 max_queue_zones;
- enum protocol_type proto;
-};
-
-struct qed_rdma_qp {
- struct regpair qp_handle;
- struct regpair qp_handle_async;
- u32 qpid;
- u16 icid;
- enum qed_roce_qp_state cur_state;
- bool use_srq;
- bool signal_all;
- bool fmr_and_reserved_lkey;
-
- bool incoming_rdma_read_en;
- bool incoming_rdma_write_en;
- bool incoming_atomic_en;
- bool e2e_flow_control_en;
-
- u16 pd;
- u16 pkey;
- u32 dest_qp;
- u16 mtu;
- u16 srq_id;
- u8 traffic_class_tos;
- u8 hop_limit_ttl;
- u16 dpi;
- u32 flow_label;
- bool lb_indication;
- u16 vlan_id;
- u32 ack_timeout;
- u8 retry_cnt;
- u8 rnr_retry_cnt;
- u8 min_rnr_nak_timer;
- bool sqd_async;
- union qed_gid sgid;
- union qed_gid dgid;
- enum roce_mode roce_mode;
- u16 udp_src_port;
- u8 stats_queue;
-
- /* requeseter */
- u8 max_rd_atomic_req;
- u32 sq_psn;
- u16 sq_cq_id;
- u16 sq_num_pages;
- dma_addr_t sq_pbl_ptr;
- void *orq;
- dma_addr_t orq_phys_addr;
- u8 orq_num_pages;
- bool req_offloaded;
+#if IS_ENABLED(CONFIG_QED_RDMA)
+void qed_roce_dpm_dcbx(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+#else
+static inline void qed_roce_dpm_dcbx(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt) {}
+#endif
- /* responder */
- u8 max_rd_atomic_resp;
- u32 rq_psn;
- u16 rq_cq_id;
- u16 rq_num_pages;
- dma_addr_t rq_pbl_ptr;
- void *irq;
- dma_addr_t irq_phys_addr;
- u8 irq_num_pages;
- bool resp_offloaded;
- u32 cq_prod;
+int qed_roce_setup(struct qed_hwfn *p_hwfn);
+void qed_roce_stop(struct qed_hwfn *p_hwfn);
+int qed_roce_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+int qed_roce_alloc_cid(struct qed_hwfn *p_hwfn, u16 *cid);
+int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp);
- u8 remote_mac_addr[6];
- u8 local_mac_addr[6];
+int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_qp *qp,
+ struct qed_rdma_query_qp_out_params *out_params);
- void *shared_queue;
- dma_addr_t shared_queue_phys_addr;
-};
+int qed_roce_modify_qp(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_qp *qp,
+ enum qed_roce_qp_state prev_state,
+ struct qed_rdma_modify_qp_in_params *params);
-#if IS_ENABLED(CONFIG_QED_RDMA)
-void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
-void qed_roce_async_event(struct qed_hwfn *p_hwfn,
- u8 fw_event_code, union rdma_eqe_data *rdma_data);
-#else
-static inline void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {}
-static inline void qed_roce_async_event(struct qed_hwfn *p_hwfn,
- u8 fw_event_code,
- union rdma_eqe_data *rdma_data) {}
-#endif
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h
index 00dd50f8c42f..56c95fb9a26d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h
@@ -174,6 +174,22 @@ struct qed_consq {
struct qed_chain chain;
};
+typedef int
+(*qed_spq_async_comp_cb)(struct qed_hwfn *p_hwfn,
+ u8 opcode,
+ u16 echo,
+ union event_ring_data *data,
+ u8 fw_return_code);
+
+int
+qed_spq_register_async_cb(struct qed_hwfn *p_hwfn,
+ enum protocol_type protocol_id,
+ qed_spq_async_comp_cb cb);
+
+void
+qed_spq_unregister_async_cb(struct qed_hwfn *p_hwfn,
+ enum protocol_type protocol_id);
+
struct qed_spq {
spinlock_t lock; /* SPQ lock */
@@ -203,6 +219,7 @@ struct qed_spq {
u32 comp_count;
u32 cid;
+ qed_spq_async_comp_cb async_comp_cb[MAX_PROTOCOL_TYPE];
};
/**
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index dede73f41e61..be48d9abd001 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -54,7 +54,7 @@
#include "qed_reg_addr.h"
#include "qed_sp.h"
#include "qed_sriov.h"
-#include "qed_roce.h"
+#include "qed_rdma.h"
/***************************************************************************
* Structures & Definitions
@@ -302,32 +302,16 @@ static int
qed_async_event_completion(struct qed_hwfn *p_hwfn,
struct event_ring_entry *p_eqe)
{
- switch (p_eqe->protocol_id) {
-#if IS_ENABLED(CONFIG_QED_RDMA)
- case PROTOCOLID_ROCE:
- qed_roce_async_event(p_hwfn, p_eqe->opcode,
- &p_eqe->data.rdma_data);
- return 0;
-#endif
- case PROTOCOLID_COMMON:
- return qed_sriov_eqe_event(p_hwfn,
- p_eqe->opcode,
- p_eqe->echo, &p_eqe->data);
- case PROTOCOLID_ISCSI:
- if (!IS_ENABLED(CONFIG_QED_ISCSI))
- return -EINVAL;
+ qed_spq_async_comp_cb cb;
- if (p_hwfn->p_iscsi_info->event_cb) {
- struct qed_iscsi_info *p_iscsi = p_hwfn->p_iscsi_info;
+ if (!p_hwfn->p_spq || (p_eqe->protocol_id >= MAX_PROTOCOL_TYPE))
+ return -EINVAL;
- return p_iscsi->event_cb(p_iscsi->event_context,
- p_eqe->opcode, &p_eqe->data);
- } else {
- DP_NOTICE(p_hwfn,
- "iSCSI async completion is not set\n");
- return -EINVAL;
- }
- default:
+ cb = p_hwfn->p_spq->async_comp_cb[p_eqe->protocol_id];
+ if (cb) {
+ return cb(p_hwfn, p_eqe->opcode, p_eqe->echo,
+ &p_eqe->data, p_eqe->fw_return_code);
+ } else {
DP_NOTICE(p_hwfn,
"Unknown Async completion for protocol: %d\n",
p_eqe->protocol_id);
@@ -335,6 +319,28 @@ qed_async_event_completion(struct qed_hwfn *p_hwfn,
}
}
+int
+qed_spq_register_async_cb(struct qed_hwfn *p_hwfn,
+ enum protocol_type protocol_id,
+ qed_spq_async_comp_cb cb)
+{
+ if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE))
+ return -EINVAL;
+
+ p_hwfn->p_spq->async_comp_cb[protocol_id] = cb;
+ return 0;
+}
+
+void
+qed_spq_unregister_async_cb(struct qed_hwfn *p_hwfn,
+ enum protocol_type protocol_id)
+{
+ if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE))
+ return;
+
+ p_hwfn->p_spq->async_comp_cb[protocol_id] = NULL;
+}
+
/***************************************************************************
* EQ API
***************************************************************************/
@@ -419,7 +425,7 @@ int qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem)
QED_CHAIN_CNT_TYPE_U16,
num_elem,
sizeof(union event_ring_element),
- &p_eq->chain))
+ &p_eq->chain, NULL))
goto eq_allocate_fail;
/* register EQ completion on the SP SB */
@@ -547,7 +553,7 @@ int qed_spq_alloc(struct qed_hwfn *p_hwfn)
QED_CHAIN_CNT_TYPE_U16,
0, /* N/A when the mode is SINGLE */
sizeof(struct slow_path_element),
- &p_spq->chain))
+ &p_spq->chain, NULL))
goto spq_allocate_fail;
/* allocate and fill the SPQ elements (incl. ramrod data list) */
@@ -953,7 +959,7 @@ int qed_consq_alloc(struct qed_hwfn *p_hwfn)
QED_CHAIN_MODE_PBL,
QED_CHAIN_CNT_TYPE_U16,
QED_CHAIN_PAGE_SIZE / 0x80,
- 0x80, &p_consq->chain))
+ 0x80, &p_consq->chain, NULL))
goto consq_allocate_fail;
p_hwfn->p_consq = p_consq;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index e39ad22947cf..2cfd3bd9a031 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -44,6 +44,11 @@
#include "qed_sp.h"
#include "qed_sriov.h"
#include "qed_vf.h"
+static int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
+ u8 opcode,
+ __le16 echo,
+ union event_ring_data *data, u8 fw_return_code);
+
static u8 qed_vf_calculate_legacy(struct qed_vf_info *p_vf)
{
@@ -565,6 +570,9 @@ int qed_iov_alloc(struct qed_hwfn *p_hwfn)
p_hwfn->pf_iov_info = p_sriov;
+ qed_spq_register_async_cb(p_hwfn, PROTOCOLID_COMMON,
+ qed_sriov_eqe_event);
+
return qed_iov_allocate_vfdb(p_hwfn);
}
@@ -578,6 +586,8 @@ void qed_iov_setup(struct qed_hwfn *p_hwfn)
void qed_iov_free(struct qed_hwfn *p_hwfn)
{
+ qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_COMMON);
+
if (IS_PF_SRIOV_ALLOC(p_hwfn)) {
qed_iov_free_vfdb(p_hwfn);
kfree(p_hwfn->pf_iov_info);
@@ -3833,8 +3843,10 @@ static void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn,
}
}
-int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
- u8 opcode, __le16 echo, union event_ring_data *data)
+static int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
+ u8 opcode,
+ __le16 echo,
+ union event_ring_data *data, u8 fw_return_code)
{
switch (opcode) {
case COMMON_EVENT_VF_PF_CHANNEL:
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
index 95f55ae2ee8b..c2e44bce398c 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
@@ -344,17 +344,6 @@ void qed_iov_free(struct qed_hwfn *p_hwfn);
void qed_iov_free_hw_info(struct qed_dev *cdev);
/**
- * @brief qed_sriov_eqe_event - handle async sriov event arrived on eqe.
- *
- * @param p_hwfn
- * @param opcode
- * @param echo
- * @param data
- */
-int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
- u8 opcode, __le16 echo, union event_ring_data *data);
-
-/**
* @brief Mark structs of vfs that have been FLR-ed.
*
* @param p_hwfn
@@ -418,13 +407,6 @@ static inline void qed_iov_free_hw_info(struct qed_dev *cdev)
{
}
-static inline int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
- u8 opcode,
- __le16 echo, union event_ring_data *data)
-{
- return -EINVAL;
-}
-
static inline bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn,
u32 *disabled_vfs)
{
diff --git a/drivers/net/ethernet/qlogic/qede/Makefile b/drivers/net/ethernet/qlogic/qede/Makefile
index bc5f7c3b277d..75408fbb7680 100644
--- a/drivers/net/ethernet/qlogic/qede/Makefile
+++ b/drivers/net/ethernet/qlogic/qede/Makefile
@@ -2,4 +2,4 @@ obj-$(CONFIG_QEDE) := qede.o
qede-y := qede_main.o qede_fp.o qede_filter.o qede_ethtool.o qede_ptp.o
qede-$(CONFIG_DCB) += qede_dcbnl.o
-qede-$(CONFIG_QED_RDMA) += qede_roce.o
+qede-$(CONFIG_QED_RDMA) += qede_rdma.o
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index 694c09b8997e..4dfb238221f9 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -40,6 +40,7 @@
#include <linux/kernel.h>
#include <linux/mutex.h>
#include <linux/bpf.h>
+#include <linux/qed/qede_rdma.h>
#include <linux/io.h>
#ifdef CONFIG_RFS_ACCEL
#include <linux/cpu_rmap.h>
@@ -153,8 +154,8 @@ struct qede_vlan {
struct qede_rdma_dev {
struct qedr_dev *qedr_dev;
struct list_head entry;
- struct list_head roce_event_list;
- struct workqueue_struct *roce_wq;
+ struct list_head rdma_event_list;
+ struct workqueue_struct *rdma_wq;
};
struct qede_ptp;
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index fdf04bc5406e..06ca13dd9ddb 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -60,7 +60,6 @@
#include <net/ip6_checksum.h>
#include <linux/bitops.h>
#include <linux/vmalloc.h>
-#include <linux/qed/qede_roce.h>
#include "qede.h"
#include "qede_ptp.h"
@@ -263,7 +262,7 @@ static int qede_netdev_event(struct notifier_block *this, unsigned long event,
break;
case NETDEV_CHANGEADDR:
edev = netdev_priv(ndev);
- qede_roce_event_changeaddr(edev);
+ qede_rdma_event_changeaddr(edev);
break;
}
@@ -978,7 +977,7 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
qede_init_ndev(edev);
- rc = qede_roce_dev_add(edev);
+ rc = qede_rdma_dev_add(edev);
if (rc)
goto err3;
@@ -1014,7 +1013,7 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
return 0;
err4:
- qede_roce_dev_remove(edev);
+ qede_rdma_dev_remove(edev);
err3:
free_netdev(edev->ndev);
err2:
@@ -1065,7 +1064,7 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
qede_ptp_disable(edev);
- qede_roce_dev_remove(edev);
+ qede_rdma_dev_remove(edev);
edev->ops->common->set_power_state(cdev, PCI_D0);
@@ -1317,8 +1316,7 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
QED_CHAIN_CNT_TYPE_U16,
RX_RING_SIZE,
sizeof(struct eth_rx_bd),
- &rxq->rx_bd_ring);
-
+ &rxq->rx_bd_ring, NULL);
if (rc)
goto err;
@@ -1329,7 +1327,7 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
QED_CHAIN_CNT_TYPE_U16,
RX_RING_SIZE,
sizeof(union eth_rx_cqe),
- &rxq->rx_comp_ring);
+ &rxq->rx_comp_ring, NULL);
if (rc)
goto err;
@@ -1387,7 +1385,8 @@ static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
QED_CHAIN_MODE_PBL,
QED_CHAIN_CNT_TYPE_U16,
txq->num_tx_buffers,
- sizeof(*p_virt), &txq->tx_pbl);
+ sizeof(*p_virt),
+ &txq->tx_pbl, NULL);
if (rc)
goto err;
@@ -1965,7 +1964,7 @@ static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode,
edev->state = QEDE_STATE_CLOSED;
- qede_roce_dev_event_close(edev);
+ qede_rdma_dev_event_close(edev);
/* Close OS Tx */
netif_tx_disable(edev->ndev);
@@ -2070,7 +2069,7 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode,
link_params.link_up = true;
edev->ops->common->set_link(edev->cdev, &link_params);
- qede_roce_dev_event_open(edev);
+ qede_rdma_dev_event_open(edev);
edev->state = QEDE_STATE_OPEN;
diff --git a/drivers/net/ethernet/qlogic/qede/qede_roce.c b/drivers/net/ethernet/qlogic/qede/qede_rdma.c
index c0030fb8d842..50b142fad6b8 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_roce.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_rdma.c
@@ -33,19 +33,19 @@
#include <linux/netdevice.h>
#include <linux/list.h>
#include <linux/mutex.h>
-#include <linux/qed/qede_roce.h>
+#include <linux/qed/qede_rdma.h>
#include "qede.h"
static struct qedr_driver *qedr_drv;
static LIST_HEAD(qedr_dev_list);
static DEFINE_MUTEX(qedr_dev_list_lock);
-bool qede_roce_supported(struct qede_dev *dev)
+bool qede_rdma_supported(struct qede_dev *dev)
{
return dev->dev_info.common.rdma_supported;
}
-static void _qede_roce_dev_add(struct qede_dev *edev)
+static void _qede_rdma_dev_add(struct qede_dev *edev)
{
if (!qedr_drv)
return;
@@ -54,11 +54,11 @@ static void _qede_roce_dev_add(struct qede_dev *edev)
edev->ndev);
}
-static int qede_roce_create_wq(struct qede_dev *edev)
+static int qede_rdma_create_wq(struct qede_dev *edev)
{
- INIT_LIST_HEAD(&edev->rdma_info.roce_event_list);
- edev->rdma_info.roce_wq = create_singlethread_workqueue("roce_wq");
- if (!edev->rdma_info.roce_wq) {
+ INIT_LIST_HEAD(&edev->rdma_info.rdma_event_list);
+ edev->rdma_info.rdma_wq = create_singlethread_workqueue("rdma_wq");
+ if (!edev->rdma_info.rdma_wq) {
DP_NOTICE(edev, "qedr: Could not create workqueue\n");
return -ENOMEM;
}
@@ -66,14 +66,14 @@ static int qede_roce_create_wq(struct qede_dev *edev)
return 0;
}
-static void qede_roce_cleanup_event(struct qede_dev *edev)
+static void qede_rdma_cleanup_event(struct qede_dev *edev)
{
- struct list_head *head = &edev->rdma_info.roce_event_list;
- struct qede_roce_event_work *event_node;
+ struct list_head *head = &edev->rdma_info.rdma_event_list;
+ struct qede_rdma_event_work *event_node;
- flush_workqueue(edev->rdma_info.roce_wq);
+ flush_workqueue(edev->rdma_info.rdma_wq);
while (!list_empty(head)) {
- event_node = list_entry(head->next, struct qede_roce_event_work,
+ event_node = list_entry(head->next, struct qede_rdma_event_work,
list);
cancel_work_sync(&event_node->work);
list_del(&event_node->list);
@@ -81,85 +81,85 @@ static void qede_roce_cleanup_event(struct qede_dev *edev)
}
}
-static void qede_roce_destroy_wq(struct qede_dev *edev)
+static void qede_rdma_destroy_wq(struct qede_dev *edev)
{
- qede_roce_cleanup_event(edev);
- destroy_workqueue(edev->rdma_info.roce_wq);
+ qede_rdma_cleanup_event(edev);
+ destroy_workqueue(edev->rdma_info.rdma_wq);
}
-int qede_roce_dev_add(struct qede_dev *edev)
+int qede_rdma_dev_add(struct qede_dev *edev)
{
int rc = 0;
- if (qede_roce_supported(edev)) {
- rc = qede_roce_create_wq(edev);
+ if (qede_rdma_supported(edev)) {
+ rc = qede_rdma_create_wq(edev);
if (rc)
return rc;
INIT_LIST_HEAD(&edev->rdma_info.entry);
mutex_lock(&qedr_dev_list_lock);
list_add_tail(&edev->rdma_info.entry, &qedr_dev_list);
- _qede_roce_dev_add(edev);
+ _qede_rdma_dev_add(edev);
mutex_unlock(&qedr_dev_list_lock);
}
return rc;
}
-static void _qede_roce_dev_remove(struct qede_dev *edev)
+static void _qede_rdma_dev_remove(struct qede_dev *edev)
{
if (qedr_drv && qedr_drv->remove && edev->rdma_info.qedr_dev)
qedr_drv->remove(edev->rdma_info.qedr_dev);
edev->rdma_info.qedr_dev = NULL;
}
-void qede_roce_dev_remove(struct qede_dev *edev)
+void qede_rdma_dev_remove(struct qede_dev *edev)
{
- if (!qede_roce_supported(edev))
+ if (!qede_rdma_supported(edev))
return;
- qede_roce_destroy_wq(edev);
+ qede_rdma_destroy_wq(edev);
mutex_lock(&qedr_dev_list_lock);
- _qede_roce_dev_remove(edev);
+ _qede_rdma_dev_remove(edev);
list_del(&edev->rdma_info.entry);
mutex_unlock(&qedr_dev_list_lock);
}
-static void _qede_roce_dev_open(struct qede_dev *edev)
+static void _qede_rdma_dev_open(struct qede_dev *edev)
{
if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify)
qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_UP);
}
-static void qede_roce_dev_open(struct qede_dev *edev)
+static void qede_rdma_dev_open(struct qede_dev *edev)
{
- if (!qede_roce_supported(edev))
+ if (!qede_rdma_supported(edev))
return;
mutex_lock(&qedr_dev_list_lock);
- _qede_roce_dev_open(edev);
+ _qede_rdma_dev_open(edev);
mutex_unlock(&qedr_dev_list_lock);
}
-static void _qede_roce_dev_close(struct qede_dev *edev)
+static void _qede_rdma_dev_close(struct qede_dev *edev)
{
if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify)
qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_DOWN);
}
-static void qede_roce_dev_close(struct qede_dev *edev)
+static void qede_rdma_dev_close(struct qede_dev *edev)
{
- if (!qede_roce_supported(edev))
+ if (!qede_rdma_supported(edev))
return;
mutex_lock(&qedr_dev_list_lock);
- _qede_roce_dev_close(edev);
+ _qede_rdma_dev_close(edev);
mutex_unlock(&qedr_dev_list_lock);
}
-static void qede_roce_dev_shutdown(struct qede_dev *edev)
+static void qede_rdma_dev_shutdown(struct qede_dev *edev)
{
- if (!qede_roce_supported(edev))
+ if (!qede_rdma_supported(edev))
return;
mutex_lock(&qedr_dev_list_lock);
@@ -168,7 +168,7 @@ static void qede_roce_dev_shutdown(struct qede_dev *edev)
mutex_unlock(&qedr_dev_list_lock);
}
-int qede_roce_register_driver(struct qedr_driver *drv)
+int qede_rdma_register_driver(struct qedr_driver *drv)
{
struct qede_dev *edev;
u8 qedr_counter = 0;
@@ -184,52 +184,52 @@ int qede_roce_register_driver(struct qedr_driver *drv)
struct net_device *ndev;
qedr_counter++;
- _qede_roce_dev_add(edev);
+ _qede_rdma_dev_add(edev);
ndev = edev->ndev;
if (netif_running(ndev) && netif_oper_up(ndev))
- _qede_roce_dev_open(edev);
+ _qede_rdma_dev_open(edev);
}
mutex_unlock(&qedr_dev_list_lock);
- pr_notice("qedr: discovered and registered %d RoCE funcs\n",
+ pr_notice("qedr: discovered and registered %d RDMA funcs\n",
qedr_counter);
return 0;
}
-EXPORT_SYMBOL(qede_roce_register_driver);
+EXPORT_SYMBOL(qede_rdma_register_driver);
-void qede_roce_unregister_driver(struct qedr_driver *drv)
+void qede_rdma_unregister_driver(struct qedr_driver *drv)
{
struct qede_dev *edev;
mutex_lock(&qedr_dev_list_lock);
list_for_each_entry(edev, &qedr_dev_list, rdma_info.entry) {
if (edev->rdma_info.qedr_dev)
- _qede_roce_dev_remove(edev);
+ _qede_rdma_dev_remove(edev);
}
qedr_drv = NULL;
mutex_unlock(&qedr_dev_list_lock);
}
-EXPORT_SYMBOL(qede_roce_unregister_driver);
+EXPORT_SYMBOL(qede_rdma_unregister_driver);
-static void qede_roce_changeaddr(struct qede_dev *edev)
+static void qede_rdma_changeaddr(struct qede_dev *edev)
{
- if (!qede_roce_supported(edev))
+ if (!qede_rdma_supported(edev))
return;
if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify)
qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_CHANGE_ADDR);
}
-static struct qede_roce_event_work *
-qede_roce_get_free_event_node(struct qede_dev *edev)
+static struct qede_rdma_event_work *
+qede_rdma_get_free_event_node(struct qede_dev *edev)
{
- struct qede_roce_event_work *event_node = NULL;
+ struct qede_rdma_event_work *event_node = NULL;
struct list_head *list_node = NULL;
bool found = false;
- list_for_each(list_node, &edev->rdma_info.roce_event_list) {
- event_node = list_entry(list_node, struct qede_roce_event_work,
+ list_for_each(list_node, &edev->rdma_info.rdma_event_list) {
+ event_node = list_entry(list_node, struct qede_rdma_event_work,
list);
if (!work_pending(&event_node->work)) {
found = true;
@@ -241,74 +241,74 @@ qede_roce_get_free_event_node(struct qede_dev *edev)
event_node = kzalloc(sizeof(*event_node), GFP_KERNEL);
if (!event_node) {
DP_NOTICE(edev,
- "qedr: Could not allocate memory for roce work\n");
+ "qedr: Could not allocate memory for rdma work\n");
return NULL;
}
list_add_tail(&event_node->list,
- &edev->rdma_info.roce_event_list);
+ &edev->rdma_info.rdma_event_list);
}
return event_node;
}
-static void qede_roce_handle_event(struct work_struct *work)
+static void qede_rdma_handle_event(struct work_struct *work)
{
- struct qede_roce_event_work *event_node;
- enum qede_roce_event event;
+ struct qede_rdma_event_work *event_node;
+ enum qede_rdma_event event;
struct qede_dev *edev;
- event_node = container_of(work, struct qede_roce_event_work, work);
+ event_node = container_of(work, struct qede_rdma_event_work, work);
event = event_node->event;
edev = event_node->ptr;
switch (event) {
case QEDE_UP:
- qede_roce_dev_open(edev);
+ qede_rdma_dev_open(edev);
break;
case QEDE_DOWN:
- qede_roce_dev_close(edev);
+ qede_rdma_dev_close(edev);
break;
case QEDE_CLOSE:
- qede_roce_dev_shutdown(edev);
+ qede_rdma_dev_shutdown(edev);
break;
case QEDE_CHANGE_ADDR:
- qede_roce_changeaddr(edev);
+ qede_rdma_changeaddr(edev);
break;
default:
- DP_NOTICE(edev, "Invalid roce event %d", event);
+ DP_NOTICE(edev, "Invalid rdma event %d", event);
}
}
-static void qede_roce_add_event(struct qede_dev *edev,
- enum qede_roce_event event)
+static void qede_rdma_add_event(struct qede_dev *edev,
+ enum qede_rdma_event event)
{
- struct qede_roce_event_work *event_node;
+ struct qede_rdma_event_work *event_node;
if (!edev->rdma_info.qedr_dev)
return;
- event_node = qede_roce_get_free_event_node(edev);
+ event_node = qede_rdma_get_free_event_node(edev);
if (!event_node)
return;
event_node->event = event;
event_node->ptr = edev;
- INIT_WORK(&event_node->work, qede_roce_handle_event);
- queue_work(edev->rdma_info.roce_wq, &event_node->work);
+ INIT_WORK(&event_node->work, qede_rdma_handle_event);
+ queue_work(edev->rdma_info.rdma_wq, &event_node->work);
}
-void qede_roce_dev_event_open(struct qede_dev *edev)
+void qede_rdma_dev_event_open(struct qede_dev *edev)
{
- qede_roce_add_event(edev, QEDE_UP);
+ qede_rdma_add_event(edev, QEDE_UP);
}
-void qede_roce_dev_event_close(struct qede_dev *edev)
+void qede_rdma_dev_event_close(struct qede_dev *edev)
{
- qede_roce_add_event(edev, QEDE_DOWN);
+ qede_rdma_add_event(edev, QEDE_DOWN);
}
-void qede_roce_event_changeaddr(struct qede_dev *edev)
+void qede_rdma_event_changeaddr(struct qede_dev *edev)
{
- qede_roce_add_event(edev, QEDE_CHANGE_ADDR);
+ qede_rdma_add_event(edev, QEDE_CHANGE_ADDR);
}
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c
index 18c184ee1f3c..29ba37a08372 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c
@@ -297,6 +297,14 @@ static const struct of_device_id emac_sgmii_dt_match[] = {
{}
};
+/* Dummy function for systems without an internal PHY. This avoids having
+ * to check for NULL pointers before calling the functions.
+ */
+static int emac_sgmii_dummy(struct emac_adapter *adpt)
+{
+ return 0;
+}
+
int emac_sgmii_config(struct platform_device *pdev, struct emac_adapter *adpt)
{
struct platform_device *sgmii_pdev = NULL;
@@ -311,8 +319,19 @@ int emac_sgmii_config(struct platform_device *pdev, struct emac_adapter *adpt)
emac_sgmii_acpi_match);
if (!dev) {
- dev_err(&pdev->dev, "cannot find internal phy node\n");
- return -ENODEV;
+ dev_warn(&pdev->dev, "cannot find internal phy node\n");
+ /* There is typically no internal PHY on emulation
+ * systems, so if we can't find the node, assume
+ * we are on an emulation system and stub-out
+ * support for the internal PHY. These systems only
+ * use ACPI.
+ */
+ phy->open = emac_sgmii_dummy;
+ phy->close = emac_sgmii_dummy;
+ phy->link_up = emac_sgmii_dummy;
+ phy->link_down = emac_sgmii_dummy;
+
+ return 0;
}
sgmii_pdev = to_platform_device(dev);
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index 98a326faea29..746d94e28470 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -683,8 +683,6 @@ static int emac_probe(struct platform_device *pdev)
goto err_undo_mdiobus;
}
- emac_mac_reset(adpt);
-
/* set hw features */
netdev->features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM |
NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_CTAG_RX |
@@ -762,6 +760,19 @@ static int emac_remove(struct platform_device *pdev)
return 0;
}
+static void emac_shutdown(struct platform_device *pdev)
+{
+ struct net_device *netdev = dev_get_drvdata(&pdev->dev);
+ struct emac_adapter *adpt = netdev_priv(netdev);
+ struct emac_sgmii *sgmii = &adpt->phy;
+
+ /* Closing the SGMII turns off its interrupts */
+ sgmii->close(adpt);
+
+ /* Resetting the MAC turns off all DMA and its interrupts */
+ emac_mac_reset(adpt);
+}
+
static struct platform_driver emac_platform_driver = {
.probe = emac_probe,
.remove = emac_remove,
@@ -770,6 +781,7 @@ static struct platform_driver emac_platform_driver = {
.of_match_table = emac_dt_match,
.acpi_match_table = ACPI_PTR(emac_acpi_match),
},
+ .shutdown = emac_shutdown,
};
module_platform_driver(emac_platform_driver);
diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c
index bd0e3f157e9e..600e30e8f0be 100644
--- a/drivers/net/ethernet/rocker/rocker_ofdpa.c
+++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c
@@ -1409,8 +1409,8 @@ static int ofdpa_port_ipv4_nh(struct ofdpa_port *ofdpa_port,
*index = entry->index;
resolved = false;
} else if (removing) {
- ofdpa_neigh_del(found);
*index = found->index;
+ ofdpa_neigh_del(found);
} else if (updating) {
ofdpa_neigh_update(found, NULL, false);
resolved = !is_zero_ether_addr(found->eth_dst);
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index ad9c4ded2b90..761c518b2f92 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -4172,7 +4172,7 @@ found:
* recipients
*/
if (is_mc_recip) {
- MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN);
unsigned int depth, i;
memset(inbuf, 0, sizeof(inbuf));
@@ -4320,7 +4320,7 @@ static int efx_ef10_filter_remove_internal(struct efx_nic *efx,
efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
} else {
efx_mcdi_display_error(efx, MC_CMD_FILTER_OP,
- MC_CMD_FILTER_OP_IN_LEN,
+ MC_CMD_FILTER_OP_EXT_IN_LEN,
NULL, 0, rc);
}
}
@@ -4453,7 +4453,7 @@ static s32 efx_ef10_filter_rfs_insert(struct efx_nic *efx,
struct efx_filter_spec *spec)
{
struct efx_ef10_filter_table *table = efx->filter_state;
- MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN);
struct efx_filter_spec *saved_spec;
unsigned int hash, i, depth = 1;
bool replacing = false;
@@ -4940,7 +4940,7 @@ not_restored:
static void efx_ef10_filter_table_remove(struct efx_nic *efx)
{
struct efx_ef10_filter_table *table = efx->filter_state;
- MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN);
struct efx_filter_spec *spec;
unsigned int filter_idx;
int rc;
@@ -5105,6 +5105,7 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
/* Insert/renew filters */
for (i = 0; i < addr_count; i++) {
+ EFX_WARN_ON_PARANOID(ids[i] != EFX_EF10_FILTER_ID_INVALID);
efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
efx_filter_set_eth_local(&spec, vlan->vid, addr_list[i].addr);
rc = efx_ef10_filter_insert(efx, &spec, true);
@@ -5122,11 +5123,11 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
}
return rc;
} else {
- /* mark as not inserted, and carry on */
- rc = EFX_EF10_FILTER_ID_INVALID;
+ /* keep invalid ID, and carry on */
}
+ } else {
+ ids[i] = efx_ef10_filter_get_unsafe_id(rc);
}
- ids[i] = efx_ef10_filter_get_unsafe_id(rc);
}
if (multicast && rollback) {
diff --git a/drivers/net/ethernet/sfc/ef10_sriov.c b/drivers/net/ethernet/sfc/ef10_sriov.c
index b7e4345c990d..019cef1d3cf7 100644
--- a/drivers/net/ethernet/sfc/ef10_sriov.c
+++ b/drivers/net/ethernet/sfc/ef10_sriov.c
@@ -661,8 +661,6 @@ restore_filters:
up_write(&vf->efx->filter_sem);
mutex_unlock(&vf->efx->mac_lock);
- up_write(&vf->efx->filter_sem);
-
rc2 = efx_net_open(vf->efx->net_dev);
if (rc2)
goto reset_nic;
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 0d230b125c6c..080428762858 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -2485,7 +2485,7 @@ static int smc_drv_resume(struct device *dev)
return 0;
}
-static struct dev_pm_ops smc_drv_pm_ops = {
+static const struct dev_pm_ops smc_drv_pm_ops = {
.suspend = smc_drv_suspend,
.resume = smc_drv_resume,
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index 54f93ee53ef7..6c2d1da05588 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -81,6 +81,12 @@ static const struct emac_variant emac_variant_h3 = {
.support_rgmii = true
};
+static const struct emac_variant emac_variant_v3s = {
+ .default_syscon_value = 0x38000,
+ .internal_phy = PHY_INTERFACE_MODE_MII,
+ .support_mii = true
+};
+
static const struct emac_variant emac_variant_a83t = {
.default_syscon_value = 0,
.internal_phy = 0,
@@ -185,6 +191,7 @@ static const struct emac_variant emac_variant_a64 = {
/* H3 specific bits for EPHY */
#define H3_EPHY_ADDR_SHIFT 20
+#define H3_EPHY_CLK_SEL BIT(18) /* 1: 24MHz, 0: 25MHz */
#define H3_EPHY_LED_POL BIT(17) /* 1: active low, 0: active high */
#define H3_EPHY_SHUTDOWN BIT(16) /* 1: shutdown, 0: power up */
#define H3_EPHY_SELECT BIT(15) /* 1: internal PHY, 0: external PHY */
@@ -631,7 +638,7 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
{
struct sunxi_priv_data *gmac = priv->plat->bsp_priv;
struct device_node *node = priv->device->of_node;
- int ret;
+ int ret, phy_interface;
u32 reg, val;
regmap_read(gmac->regmap, SYSCON_EMAC_REG, &val);
@@ -656,6 +663,9 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
else
reg &= ~H3_EPHY_LED_POL;
+ /* Force EPHY xtal frequency to 24MHz. */
+ reg |= H3_EPHY_CLK_SEL;
+
ret = of_mdio_parse_addr(priv->device,
priv->plat->phy_node);
if (ret < 0) {
@@ -708,7 +718,11 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
if (gmac->variant->support_rmii)
reg &= ~SYSCON_RMII_EN;
- switch (priv->plat->interface) {
+ phy_interface = priv->plat->interface;
+ /* if PHY is internal, select the mode (xMII) used by the SoC */
+ if (gmac->use_internal_phy)
+ phy_interface = gmac->variant->internal_phy;
+ switch (phy_interface) {
case PHY_INTERFACE_MODE_MII:
/* default */
break;
@@ -922,7 +936,7 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
}
plat_dat->interface = of_get_phy_mode(dev->of_node);
- if (plat_dat->interface == gmac->variant->internal_phy) {
+ if (plat_dat->interface == PHY_INTERFACE_MODE_INTERNAL) {
dev_info(&pdev->dev, "Will use internal PHY\n");
gmac->use_internal_phy = true;
gmac->ephy_clk = of_clk_get(plat_dat->phy_node, 0);
@@ -971,6 +985,8 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
static const struct of_device_id sun8i_dwmac_match[] = {
{ .compatible = "allwinner,sun8i-h3-emac",
.data = &emac_variant_h3 },
+ { .compatible = "allwinner,sun8i-v3s-emac",
+ .data = &emac_variant_v3s },
{ .compatible = "allwinner,sun8i-a83t-emac",
.data = &emac_variant_a83t },
{ .compatible = "allwinner,sun50i-a64-emac",
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
index 471a9aa6ac94..22cf6353ba04 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
@@ -205,8 +205,8 @@ static void dwmac1000_dump_dma_regs(void __iomem *ioaddr, u32 *reg_space)
{
int i;
- for (i = 0; i < 22; i++)
- if ((i < 9) || (i > 17))
+ for (i = 0; i < 23; i++)
+ if ((i < 12) || (i > 17))
reg_space[DMA_BUS_MODE / 4 + i] =
readl(ioaddr + DMA_BUS_MODE + i * 4);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
index eec8463057fd..e84831e1b63b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
@@ -71,9 +71,9 @@ static void dwmac4_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
writel(value, ioaddr + DMA_SYS_BUS_MODE);
}
-void dwmac4_dma_init_rx_chan(void __iomem *ioaddr,
- struct stmmac_dma_cfg *dma_cfg,
- u32 dma_rx_phy, u32 chan)
+static void dwmac4_dma_init_rx_chan(void __iomem *ioaddr,
+ struct stmmac_dma_cfg *dma_cfg,
+ u32 dma_rx_phy, u32 chan)
{
u32 value;
u32 rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl;
@@ -85,9 +85,9 @@ void dwmac4_dma_init_rx_chan(void __iomem *ioaddr,
writel(dma_rx_phy, ioaddr + DMA_CHAN_RX_BASE_ADDR(chan));
}
-void dwmac4_dma_init_tx_chan(void __iomem *ioaddr,
- struct stmmac_dma_cfg *dma_cfg,
- u32 dma_tx_phy, u32 chan)
+static void dwmac4_dma_init_tx_chan(void __iomem *ioaddr,
+ struct stmmac_dma_cfg *dma_cfg,
+ u32 dma_tx_phy, u32 chan)
{
u32 value;
u32 txpbl = dma_cfg->txpbl ?: dma_cfg->pbl;
@@ -99,8 +99,8 @@ void dwmac4_dma_init_tx_chan(void __iomem *ioaddr,
writel(dma_tx_phy, ioaddr + DMA_CHAN_TX_BASE_ADDR(chan));
}
-void dwmac4_dma_init_channel(void __iomem *ioaddr,
- struct stmmac_dma_cfg *dma_cfg, u32 chan)
+static void dwmac4_dma_init_channel(void __iomem *ioaddr,
+ struct stmmac_dma_cfg *dma_cfg, u32 chan)
{
u32 value;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 743170d57f62..babb39c646ff 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -29,7 +29,7 @@
#include "stmmac.h"
#include "dwmac_dma.h"
-#define REG_SPACE_SIZE 0x1054
+#define REG_SPACE_SIZE 0x1060
#define MAC100_ETHTOOL_NAME "st_mac100"
#define GMAC_ETHTOOL_NAME "st_gmac"
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 6a1cb59728fe..19bba6281dab 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -2830,7 +2830,6 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
tx_q->tx_skbuff_dma[first_entry].buf = des;
tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
- tx_q->tx_skbuff[first_entry] = skb;
first->des0 = cpu_to_le32(des);
@@ -2864,6 +2863,14 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
+ /* Only the last descriptor gets to point to the skb. */
+ tx_q->tx_skbuff[tx_q->cur_tx] = skb;
+
+ /* We've used all descriptors we need for this skb, however,
+ * advance cur_tx so that it references a fresh descriptor.
+ * ndo_start_xmit will fill this descriptor the next time it's
+ * called and stmmac_tx_clean may clean up to this descriptor.
+ */
tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
@@ -2965,7 +2972,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
/* Manage oversized TCP frames for GMAC4 device */
if (skb_is_gso(skb) && priv->tso) {
- if (ip_hdr(skb)->protocol == IPPROTO_TCP)
+ if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
return stmmac_tso_xmit(skb, dev);
}
@@ -2996,8 +3003,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
first = desc;
- tx_q->tx_skbuff[first_entry] = skb;
-
enh_desc = priv->plat->enh_desc;
/* To program the descriptors according to the size of the frame */
if (enh_desc)
@@ -3045,8 +3050,15 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
skb->len);
}
- entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
+ /* Only the last descriptor gets to point to the skb. */
+ tx_q->tx_skbuff[entry] = skb;
+ /* We've used all descriptors we need for this skb, however,
+ * advance cur_tx so that it references a fresh descriptor.
+ * ndo_start_xmit will fill this descriptor the next time it's
+ * called and stmmac_tx_clean may clean up to this descriptor.
+ */
+ entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
tx_q->cur_tx = entry;
if (netif_msg_pktdata(priv)) {
@@ -4126,7 +4138,7 @@ int stmmac_dvr_probe(struct device *device,
NETIF_F_RXCSUM;
if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
- ndev->hw_features |= NETIF_F_TSO;
+ ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
priv->tso = true;
dev_info(priv->device, "TSO feature enabled\n");
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 22f910795be4..8d375e51a526 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -30,42 +30,39 @@
* negative value of the address means that MAC controller is not connected
* with PHY.
*/
-struct stmmac_pci_dmi_data {
- const char *name;
- const char *asset_tag;
+struct stmmac_pci_func_data {
unsigned int func;
int phy_addr;
};
+struct stmmac_pci_dmi_data {
+ const struct stmmac_pci_func_data *func;
+ size_t nfuncs;
+};
+
struct stmmac_pci_info {
- struct pci_dev *pdev;
- int (*setup)(struct plat_stmmacenet_data *plat,
- struct stmmac_pci_info *info);
- struct stmmac_pci_dmi_data *dmi;
+ int (*setup)(struct pci_dev *pdev, struct plat_stmmacenet_data *plat);
};
-static int stmmac_pci_find_phy_addr(struct stmmac_pci_info *info)
+static int stmmac_pci_find_phy_addr(struct pci_dev *pdev,
+ const struct dmi_system_id *dmi_list)
{
- const char *name = dmi_get_system_info(DMI_BOARD_NAME);
- const char *asset_tag = dmi_get_system_info(DMI_BOARD_ASSET_TAG);
- unsigned int func = PCI_FUNC(info->pdev->devfn);
- struct stmmac_pci_dmi_data *dmi;
+ const struct stmmac_pci_func_data *func_data;
+ const struct stmmac_pci_dmi_data *dmi_data;
+ const struct dmi_system_id *dmi_id;
+ int func = PCI_FUNC(pdev->devfn);
+ size_t n;
- /*
- * Galileo boards with old firmware don't support DMI. We always return
- * 1 here, so at least first found MAC controller would be probed.
- */
- if (!name)
- return 1;
-
- for (dmi = info->dmi; dmi->name && *dmi->name; dmi++) {
- if (!strcmp(dmi->name, name) && dmi->func == func) {
- /* If asset tag is provided, match on it as well. */
- if (dmi->asset_tag && strcmp(dmi->asset_tag, asset_tag))
- continue;
- return dmi->phy_addr;
- }
- }
+ dmi_id = dmi_first_match(dmi_list);
+ if (!dmi_id)
+ return -ENODEV;
+
+ dmi_data = dmi_id->driver_data;
+ func_data = dmi_data->func;
+
+ for (n = 0; n < dmi_data->nfuncs; n++, func_data++)
+ if (func_data->func == func)
+ return func_data->phy_addr;
return -ENODEV;
}
@@ -100,7 +97,8 @@ static void common_default_data(struct plat_stmmacenet_data *plat)
plat->rx_queues_cfg[0].pkt_route = 0x0;
}
-static void stmmac_default_data(struct plat_stmmacenet_data *plat)
+static int stmmac_default_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
{
/* Set common default data first */
common_default_data(plat);
@@ -112,12 +110,77 @@ static void stmmac_default_data(struct plat_stmmacenet_data *plat)
plat->dma_cfg->pbl = 32;
plat->dma_cfg->pblx8 = true;
/* TODO: AXI */
+
+ return 0;
}
-static int quark_default_data(struct plat_stmmacenet_data *plat,
- struct stmmac_pci_info *info)
+static const struct stmmac_pci_info stmmac_pci_info = {
+ .setup = stmmac_default_data,
+};
+
+static const struct stmmac_pci_func_data galileo_stmmac_func_data[] = {
+ {
+ .func = 6,
+ .phy_addr = 1,
+ },
+};
+
+static const struct stmmac_pci_dmi_data galileo_stmmac_dmi_data = {
+ .func = galileo_stmmac_func_data,
+ .nfuncs = ARRAY_SIZE(galileo_stmmac_func_data),
+};
+
+static const struct stmmac_pci_func_data iot2040_stmmac_func_data[] = {
+ {
+ .func = 6,
+ .phy_addr = 1,
+ },
+ {
+ .func = 7,
+ .phy_addr = 1,
+ },
+};
+
+static const struct stmmac_pci_dmi_data iot2040_stmmac_dmi_data = {
+ .func = iot2040_stmmac_func_data,
+ .nfuncs = ARRAY_SIZE(iot2040_stmmac_func_data),
+};
+
+static const struct dmi_system_id quark_pci_dmi[] = {
+ {
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "Galileo"),
+ },
+ .driver_data = (void *)&galileo_stmmac_dmi_data,
+ },
+ {
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "GalileoGen2"),
+ },
+ .driver_data = (void *)&galileo_stmmac_dmi_data,
+ },
+ {
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"),
+ DMI_EXACT_MATCH(DMI_BOARD_ASSET_TAG,
+ "6ES7647-0AA00-0YA2"),
+ },
+ .driver_data = (void *)&galileo_stmmac_dmi_data,
+ },
+ {
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"),
+ DMI_EXACT_MATCH(DMI_BOARD_ASSET_TAG,
+ "6ES7647-0AA00-1YA2"),
+ },
+ .driver_data = (void *)&iot2040_stmmac_dmi_data,
+ },
+ {}
+};
+
+static int quark_default_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
{
- struct pci_dev *pdev = info->pdev;
int ret;
/* Set common default data first */
@@ -127,9 +190,19 @@ static int quark_default_data(struct plat_stmmacenet_data *plat,
* Refuse to load the driver and register net device if MAC controller
* does not connect to any PHY interface.
*/
- ret = stmmac_pci_find_phy_addr(info);
- if (ret < 0)
- return ret;
+ ret = stmmac_pci_find_phy_addr(pdev, quark_pci_dmi);
+ if (ret < 0) {
+ /* Return error to the caller on DMI enabled boards. */
+ if (dmi_get_system_info(DMI_BOARD_NAME))
+ return ret;
+
+ /*
+ * Galileo boards with old firmware don't support DMI. We always
+ * use 1 here as PHY address, so at least the first found MAC
+ * controller would be probed.
+ */
+ ret = 1;
+ }
plat->bus_id = PCI_DEVID(pdev->bus->number, pdev->devfn);
plat->phy_addr = ret;
@@ -143,41 +216,8 @@ static int quark_default_data(struct plat_stmmacenet_data *plat,
return 0;
}
-static struct stmmac_pci_dmi_data quark_pci_dmi_data[] = {
- {
- .name = "Galileo",
- .func = 6,
- .phy_addr = 1,
- },
- {
- .name = "GalileoGen2",
- .func = 6,
- .phy_addr = 1,
- },
- {
- .name = "SIMATIC IOT2000",
- .asset_tag = "6ES7647-0AA00-0YA2",
- .func = 6,
- .phy_addr = 1,
- },
- {
- .name = "SIMATIC IOT2000",
- .asset_tag = "6ES7647-0AA00-1YA2",
- .func = 6,
- .phy_addr = 1,
- },
- {
- .name = "SIMATIC IOT2000",
- .asset_tag = "6ES7647-0AA00-1YA2",
- .func = 7,
- .phy_addr = 1,
- },
- {}
-};
-
-static struct stmmac_pci_info quark_pci_info = {
+static const struct stmmac_pci_info quark_pci_info = {
.setup = quark_default_data,
- .dmi = quark_pci_dmi_data,
};
/**
@@ -236,15 +276,9 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
pci_set_master(pdev);
- if (info) {
- info->pdev = pdev;
- if (info->setup) {
- ret = info->setup(plat, info);
- if (ret)
- return ret;
- }
- } else
- stmmac_default_data(plat);
+ ret = info->setup(pdev, plat);
+ if (ret)
+ return ret;
pci_enable_msi(pdev);
@@ -270,14 +304,21 @@ static void stmmac_pci_remove(struct pci_dev *pdev)
static SIMPLE_DEV_PM_OPS(stmmac_pm_ops, stmmac_suspend, stmmac_resume);
-#define STMMAC_VENDOR_ID 0x700
+/* synthetic ID, no official vendor */
+#define PCI_VENDOR_ID_STMMAC 0x700
+
#define STMMAC_QUARK_ID 0x0937
#define STMMAC_DEVICE_ID 0x1108
+#define STMMAC_DEVICE(vendor_id, dev_id, info) { \
+ PCI_VDEVICE(vendor_id, dev_id), \
+ .driver_data = (kernel_ulong_t)&info \
+ }
+
static const struct pci_device_id stmmac_id_table[] = {
- {PCI_DEVICE(STMMAC_VENDOR_ID, STMMAC_DEVICE_ID)},
- {PCI_DEVICE(PCI_VENDOR_ID_STMICRO, PCI_DEVICE_ID_STMICRO_MAC)},
- {PCI_VDEVICE(INTEL, STMMAC_QUARK_ID), (kernel_ulong_t)&quark_pci_info},
+ STMMAC_DEVICE(STMMAC, STMMAC_DEVICE_ID, stmmac_pci_info),
+ STMMAC_DEVICE(STMICRO, PCI_DEVICE_ID_STMICRO_MAC, stmmac_pci_info),
+ STMMAC_DEVICE(INTEL, STMMAC_QUARK_ID, quark_pci_info),
{}
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 3840529344ed..a366b3747eeb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -313,6 +313,7 @@ static int stmmac_dt_phy(struct plat_stmmacenet_data *plat,
{ .compatible = "snps,dwc-qos-ethernet-4.10" },
{ .compatible = "allwinner,sun8i-a83t-emac" },
{ .compatible = "allwinner,sun8i-h3-emac" },
+ { .compatible = "allwinner,sun8i-v3s-emac" },
{ .compatible = "allwinner,sun50i-a64-emac" },
};
diff --git a/drivers/net/ethernet/ti/cpsw-common.c b/drivers/net/ethernet/ti/cpsw-common.c
index 1562ab4151e1..56ba411421f0 100644
--- a/drivers/net/ethernet/ti/cpsw-common.c
+++ b/drivers/net/ethernet/ti/cpsw-common.c
@@ -90,7 +90,7 @@ int ti_cm_get_macid(struct device *dev, int slave, u8 *mac_addr)
if (of_device_is_compatible(dev->of_node, "ti,dm816-emac"))
return cpsw_am33xx_cm_get_macid(dev, 0x30, slave, mac_addr);
- if (of_machine_is_compatible("ti,am4372"))
+ if (of_machine_is_compatible("ti,am43"))
return cpsw_am33xx_cm_get_macid(dev, 0x630, slave, mac_addr);
if (of_machine_is_compatible("ti,dra7"))
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index b7a0f5eeab62..1850e348f555 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1236,6 +1236,7 @@ static inline int cpsw_tx_packet_submit(struct cpsw_priv *priv,
{
struct cpsw_common *cpsw = priv->cpsw;
+ skb_tx_timestamp(skb);
return cpdma_chan_submit(txch, skb, skb->data, skb->len,
priv->emac_port + cpsw->data.dual_emac);
}
@@ -1597,6 +1598,7 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
{
struct cpsw_priv *priv = netdev_priv(ndev);
struct cpsw_common *cpsw = priv->cpsw;
+ struct cpts *cpts = cpsw->cpts;
struct netdev_queue *txq;
struct cpdma_chan *txch;
int ret, q_idx;
@@ -1608,11 +1610,9 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
}
if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
- cpts_is_tx_enabled(cpsw->cpts))
+ cpts_is_tx_enabled(cpts) && cpts_can_timestamp(cpts, skb))
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
- skb_tx_timestamp(skb);
-
q_idx = skb_get_queue_mapping(skb);
if (q_idx >= cpsw->tx_ch_num)
q_idx = q_idx % cpsw->tx_ch_num;
diff --git a/drivers/net/ethernet/ti/cpts.h b/drivers/net/ethernet/ti/cpts.h
index c96eca2b1b46..01ea82ba9cdc 100644
--- a/drivers/net/ethernet/ti/cpts.h
+++ b/drivers/net/ethernet/ti/cpts.h
@@ -30,6 +30,7 @@
#include <linux/of.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/skbuff.h>
+#include <linux/ptp_classify.h>
#include <linux/timecounter.h>
struct cpsw_cpts {
@@ -155,6 +156,16 @@ static inline bool cpts_is_tx_enabled(struct cpts *cpts)
return !!cpts->tx_enable;
}
+static inline bool cpts_can_timestamp(struct cpts *cpts, struct sk_buff *skb)
+{
+ unsigned int class = ptp_classify_raw(skb);
+
+ if (class == PTP_CLASS_NONE)
+ return false;
+
+ return true;
+}
+
#else
struct cpts;
@@ -203,6 +214,11 @@ static inline bool cpts_is_tx_enabled(struct cpts *cpts)
{
return false;
}
+
+static inline bool cpts_can_timestamp(struct cpts *cpts, struct sk_buff *skb)
+{
+ return false;
+}
#endif
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index 0847a8f48cfe..28cb38af1a34 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -2503,24 +2503,8 @@ static bool gbe_need_txtstamp(struct gbe_intf *gbe_intf,
const struct netcp_packet *p_info)
{
struct sk_buff *skb = p_info->skb;
- unsigned int class = ptp_classify_raw(skb);
- if (class == PTP_CLASS_NONE)
- return false;
-
- switch (class) {
- case PTP_CLASS_V1_IPV4:
- case PTP_CLASS_V1_IPV6:
- case PTP_CLASS_V2_IPV4:
- case PTP_CLASS_V2_IPV6:
- case PTP_CLASS_V2_L2:
- case (PTP_CLASS_V2_VLAN | PTP_CLASS_L2):
- case (PTP_CLASS_V2_VLAN | PTP_CLASS_IPV4):
- case (PTP_CLASS_V2_VLAN | PTP_CLASS_IPV6):
- return true;
- }
-
- return false;
+ return cpts_can_timestamp(gbe_intf->gbe_dev->cpts, skb);
}
static int gbe_txtstamp_mark_pkt(struct gbe_intf *gbe_intf,
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index d586ad93aaff..eb77201cb718 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -1058,7 +1058,8 @@ static const struct nla_policy geneve_policy[IFLA_GENEVE_MAX + 1] = {
[IFLA_GENEVE_UDP_ZERO_CSUM6_RX] = { .type = NLA_U8 },
};
-static int geneve_validate(struct nlattr *tb[], struct nlattr *data[])
+static int geneve_validate(struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
{
if (tb[IFLA_ADDRESS]) {
if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
@@ -1181,7 +1182,8 @@ static void init_tnl_info(struct ip_tunnel_info *info, __u16 dst_port)
}
static int geneve_newlink(struct net *net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[])
+ struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
{
bool use_udp6_rx_checksums = false;
struct ip_tunnel_info info;
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index 8e333a8a2295..1542e837fdfa 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -636,7 +636,8 @@ static void gtp_hashtable_free(struct gtp_dev *gtp);
static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[]);
static int gtp_newlink(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[])
+ struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
{
struct gtp_dev *gtp;
struct gtp_net *gn;
@@ -697,7 +698,8 @@ static const struct nla_policy gtp_policy[IFLA_GTP_MAX + 1] = {
[IFLA_GTP_ROLE] = { .type = NLA_U32 },
};
-static int gtp_validate(struct nlattr *tb[], struct nlattr *data[])
+static int gtp_validate(struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
{
if (!data)
return -EINVAL;
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index b30a3c2f772b..d6c25580f8dd 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -146,7 +146,6 @@ struct hv_netvsc_packet {
struct netvsc_device_info {
unsigned char mac_adr[ETH_ALEN];
- bool link_state; /* 0 - link up, 1 - link down */
int ring_size;
u32 max_num_vrss_chns;
u32 num_chn;
@@ -165,7 +164,7 @@ struct rndis_device {
struct net_device *ndev;
enum rndis_device_state state;
- bool link_state;
+
atomic_t new_req_id;
spinlock_t request_lock;
@@ -173,6 +172,8 @@ struct rndis_device {
struct work_struct mcast_work;
+ bool link_state; /* 0 - link up, 1 - link down */
+
u8 hw_mac_adr[ETH_ALEN];
u8 rss_key[NETVSC_HASH_KEYLEN];
u16 ind_table[ITAB_NUM];
@@ -716,6 +717,8 @@ struct net_device_context {
u32 vf_alloc;
/* Serial number of the VF to team with */
u32 vf_serial;
+
+ bool datapath; /* 0 - synthetic, 1 - VF nic */
};
/* Per channel data */
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 7c5ed8fe7a4f..0a9167dd72fb 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -57,6 +57,8 @@ void netvsc_switch_datapath(struct net_device *ndev, bool vf)
sizeof(struct nvsp_message),
(unsigned long)init_pkt,
VM_PKT_DATA_INBAND, 0);
+
+ net_device_ctx->datapath = vf;
}
static struct netvsc_device *alloc_net_device(void)
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 9a6c5864bc04..63c98bbbc596 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -68,7 +68,8 @@ static void netvsc_set_multicast_list(struct net_device *net)
static int netvsc_open(struct net_device *net)
{
- struct netvsc_device *nvdev = net_device_to_netvsc_device(net);
+ struct net_device_context *ndev_ctx = netdev_priv(net);
+ struct netvsc_device *nvdev = ndev_ctx->nvdev;
struct rndis_device *rdev;
int ret = 0;
@@ -84,7 +85,7 @@ static int netvsc_open(struct net_device *net)
netif_tx_wake_all_queues(net);
rdev = nvdev->extension;
- if (!rdev->link_state)
+ if (!rdev->link_state && !ndev_ctx->datapath)
netif_carrier_on(net);
return ret;
@@ -751,7 +752,7 @@ static int netvsc_set_channels(struct net_device *net,
channels->rx_count || channels->tx_count || channels->other_count)
return -EINVAL;
- if (count > net->num_tx_queues || count > net->num_rx_queues)
+ if (count > net->num_tx_queues || count > VRSS_CHANNEL_MAX)
return -EINVAL;
if (!nvdev || nvdev->destroy)
@@ -1178,7 +1179,7 @@ static int netvsc_set_rxfh(struct net_device *dev, const u32 *indir,
rndis_dev = ndev->extension;
if (indir) {
for (i = 0; i < ITAB_NUM; i++)
- if (indir[i] >= dev->num_rx_queues)
+ if (indir[i] >= VRSS_CHANNEL_MAX)
return -EINVAL;
for (i = 0; i < ITAB_NUM; i++)
@@ -1284,7 +1285,8 @@ static void netvsc_link_change(struct work_struct *w)
case RNDIS_STATUS_MEDIA_CONNECT:
if (rdev->link_state) {
rdev->link_state = false;
- netif_carrier_on(net);
+ if (!ndev_ctx->datapath)
+ netif_carrier_on(net);
netif_tx_wake_all_queues(net);
} else {
notify = true;
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index cb79cd081f42..85c00e1c52b6 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -1185,11 +1185,9 @@ int rndis_filter_device_add(struct hv_device *dev,
rndis_filter_query_device_link_status(rndis_device);
- device_info->link_state = rndis_device->link_state;
-
netdev_dbg(net, "Device MAC %pM link state %s\n",
rndis_device->hw_mac_adr,
- device_info->link_state ? "down" : "up");
+ rndis_device->link_state ? "down" : "up");
if (net_device->nvsp_version < NVSP_PROTOCOL_VERSION_5)
return 0;
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index 144ea5ae8ab4..8870bd2a2e8a 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -273,7 +273,8 @@ static int ifb_open(struct net_device *dev)
return 0;
}
-static int ifb_validate(struct nlattr *tb[], struct nlattr *data[])
+static int ifb_validate(struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
{
if (tb[IFLA_ADDRESS]) {
if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
diff --git a/drivers/net/ipvlan/ipvlan.h b/drivers/net/ipvlan/ipvlan.h
index 7919369c0a72..ba8173a0b62e 100644
--- a/drivers/net/ipvlan/ipvlan.h
+++ b/drivers/net/ipvlan/ipvlan.h
@@ -140,7 +140,8 @@ unsigned int ipvlan_nf_input(void *priv, struct sk_buff *skb,
void ipvlan_count_rx(const struct ipvl_dev *ipvlan,
unsigned int len, bool success, bool mcast);
int ipvlan_link_new(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[]);
+ struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack);
void ipvlan_link_delete(struct net_device *dev, struct list_head *head);
void ipvlan_link_setup(struct net_device *dev);
int ipvlan_link_register(struct rtnl_link_ops *ops);
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index dc888dd344eb..f37e3c1fd4e7 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -455,7 +455,8 @@ static const struct ethtool_ops ipvlan_ethtool_ops = {
};
static int ipvlan_nl_changelink(struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[])
+ struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
{
struct ipvl_dev *ipvlan = netdev_priv(dev);
struct ipvl_port *port = ipvlan_port_get_rtnl(ipvlan->phy_dev);
@@ -476,7 +477,8 @@ static size_t ipvlan_nl_getsize(const struct net_device *dev)
);
}
-static int ipvlan_nl_validate(struct nlattr *tb[], struct nlattr *data[])
+static int ipvlan_nl_validate(struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
{
if (data && data[IFLA_IPVLAN_MODE]) {
u16 mode = nla_get_u16(data[IFLA_IPVLAN_MODE]);
@@ -508,7 +510,8 @@ err:
}
int ipvlan_link_new(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[])
+ struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
{
struct ipvl_dev *ipvlan = netdev_priv(dev);
struct ipvl_port *port;
diff --git a/drivers/net/ipvlan/ipvtap.c b/drivers/net/ipvlan/ipvtap.c
index 2b713b63b62c..22f133ea8d7b 100644
--- a/drivers/net/ipvlan/ipvtap.c
+++ b/drivers/net/ipvlan/ipvtap.c
@@ -73,10 +73,9 @@ static void ipvtap_update_features(struct tap_dev *tap,
netdev_update_features(vlan->dev);
}
-static int ipvtap_newlink(struct net *src_net,
- struct net_device *dev,
- struct nlattr *tb[],
- struct nlattr *data[])
+static int ipvtap_newlink(struct net *src_net, struct net_device *dev,
+ struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
{
struct ipvtap_dev *vlantap = netdev_priv(dev);
int err;
@@ -98,7 +97,7 @@ static int ipvtap_newlink(struct net *src_net,
/* Don't put anything that may fail after macvlan_common_newlink
* because we can't undo what it does.
*/
- err = ipvlan_link_new(src_net, dev, tb, data);
+ err = ipvlan_link_new(src_net, dev, tb, data, extack);
if (err) {
netdev_rx_handler_unregister(dev);
return err;
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index e370d7c894cb..5e1ab1160856 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -3056,7 +3056,8 @@ static void macsec_changelink_common(struct net_device *dev,
}
static int macsec_changelink(struct net_device *dev, struct nlattr *tb[],
- struct nlattr *data[])
+ struct nlattr *data[],
+ struct netlink_ext_ack *extack)
{
if (!data)
return 0;
@@ -3203,7 +3204,8 @@ static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len)
}
static int macsec_newlink(struct net *net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[])
+ struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
{
struct macsec_dev *macsec = macsec_priv(dev);
struct net_device *real_dev;
@@ -3285,7 +3287,8 @@ unregister:
return err;
}
-static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[])
+static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
{
u64 csid = MACSEC_DEFAULT_CIPHER_ID;
u8 icv_len = DEFAULT_ICV_LEN;
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 8ca274c6df3d..0f581ee74fe4 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -39,16 +39,20 @@
#define MACVLAN_HASH_SIZE (1<<MACVLAN_HASH_BITS)
#define MACVLAN_BC_QUEUE_LEN 1000
+#define MACVLAN_F_PASSTHRU 1
+#define MACVLAN_F_ADDRCHANGE 2
+
struct macvlan_port {
struct net_device *dev;
struct hlist_head vlan_hash[MACVLAN_HASH_SIZE];
struct list_head vlans;
struct sk_buff_head bc_queue;
struct work_struct bc_work;
- bool passthru;
+ u32 flags;
int count;
struct hlist_head vlan_source_hash[MACVLAN_HASH_SIZE];
DECLARE_BITMAP(mc_filter, MACVLAN_MC_FILTER_SZ);
+ unsigned char perm_addr[ETH_ALEN];
};
struct macvlan_source_entry {
@@ -66,6 +70,31 @@ struct macvlan_skb_cb {
static void macvlan_port_destroy(struct net_device *dev);
+static inline bool macvlan_passthru(const struct macvlan_port *port)
+{
+ return port->flags & MACVLAN_F_PASSTHRU;
+}
+
+static inline void macvlan_set_passthru(struct macvlan_port *port)
+{
+ port->flags |= MACVLAN_F_PASSTHRU;
+}
+
+static inline bool macvlan_addr_change(const struct macvlan_port *port)
+{
+ return port->flags & MACVLAN_F_ADDRCHANGE;
+}
+
+static inline void macvlan_set_addr_change(struct macvlan_port *port)
+{
+ port->flags |= MACVLAN_F_ADDRCHANGE;
+}
+
+static inline void macvlan_clear_addr_change(struct macvlan_port *port)
+{
+ port->flags &= ~MACVLAN_F_ADDRCHANGE;
+}
+
/* Hash Ethernet address */
static u32 macvlan_eth_hash(const unsigned char *addr)
{
@@ -181,11 +210,12 @@ static void macvlan_hash_change_addr(struct macvlan_dev *vlan,
static bool macvlan_addr_busy(const struct macvlan_port *port,
const unsigned char *addr)
{
- /* Test to see if the specified multicast address is
+ /* Test to see if the specified address is
* currently in use by the underlying device or
* another macvlan.
*/
- if (ether_addr_equal_64bits(port->dev->dev_addr, addr))
+ if (!macvlan_passthru(port) && !macvlan_addr_change(port) &&
+ ether_addr_equal_64bits(port->dev->dev_addr, addr))
return true;
if (macvlan_hash_lookup(port, addr))
@@ -445,7 +475,7 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
}
macvlan_forward_source(skb, port, eth->h_source);
- if (port->passthru)
+ if (macvlan_passthru(port))
vlan = list_first_or_null_rcu(&port->vlans,
struct macvlan_dev, list);
else
@@ -574,7 +604,7 @@ static int macvlan_open(struct net_device *dev)
struct net_device *lowerdev = vlan->lowerdev;
int err;
- if (vlan->port->passthru) {
+ if (macvlan_passthru(vlan->port)) {
if (!(vlan->flags & MACVLAN_FLAG_NOPROMISC)) {
err = dev_set_promiscuity(lowerdev, 1);
if (err < 0)
@@ -649,7 +679,7 @@ static int macvlan_stop(struct net_device *dev)
dev_uc_unsync(lowerdev, dev);
dev_mc_unsync(lowerdev, dev);
- if (vlan->port->passthru) {
+ if (macvlan_passthru(vlan->port)) {
if (!(vlan->flags & MACVLAN_FLAG_NOPROMISC))
dev_set_promiscuity(lowerdev, -1);
goto hash_del;
@@ -672,6 +702,7 @@ static int macvlan_sync_address(struct net_device *dev, unsigned char *addr)
{
struct macvlan_dev *vlan = netdev_priv(dev);
struct net_device *lowerdev = vlan->lowerdev;
+ struct macvlan_port *port = vlan->port;
int err;
if (!(dev->flags & IFF_UP)) {
@@ -682,7 +713,7 @@ static int macvlan_sync_address(struct net_device *dev, unsigned char *addr)
if (macvlan_addr_busy(vlan->port, addr))
return -EBUSY;
- if (!vlan->port->passthru) {
+ if (!macvlan_passthru(port)) {
err = dev_uc_add(lowerdev, addr);
if (err)
return err;
@@ -692,6 +723,15 @@ static int macvlan_sync_address(struct net_device *dev, unsigned char *addr)
macvlan_hash_change_addr(vlan, addr);
}
+ if (macvlan_passthru(port) && !macvlan_addr_change(port)) {
+ /* Since addr_change isn't set, we are here due to lower
+ * device change. Save the lower-dev address so we can
+ * restore it later.
+ */
+ ether_addr_copy(vlan->port->perm_addr,
+ lowerdev->dev_addr);
+ }
+ macvlan_clear_addr_change(port);
return 0;
}
@@ -703,8 +743,14 @@ static int macvlan_set_mac_address(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- if (vlan->mode == MACVLAN_MODE_PASSTHRU)
+ /* If the addresses are the same, this is a no-op */
+ if (ether_addr_equal(dev->dev_addr, addr->sa_data))
+ return 0;
+
+ if (vlan->mode == MACVLAN_MODE_PASSTHRU) {
+ macvlan_set_addr_change(vlan->port);
return dev_set_mac_address(vlan->lowerdev, addr);
+ }
return macvlan_sync_address(dev, addr->sa_data);
}
@@ -926,7 +972,7 @@ static int macvlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
/* Support unicast filter only on passthru devices.
* Multicast filter should be allowed on all devices.
*/
- if (!vlan->port->passthru && is_unicast_ether_addr(addr))
+ if (!macvlan_passthru(vlan->port) && is_unicast_ether_addr(addr))
return -EOPNOTSUPP;
if (flags & NLM_F_REPLACE)
@@ -950,7 +996,7 @@ static int macvlan_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
/* Support unicast filter only on passthru devices.
* Multicast filter should be allowed on all devices.
*/
- if (!vlan->port->passthru && is_unicast_ether_addr(addr))
+ if (!macvlan_passthru(vlan->port) && is_unicast_ether_addr(addr))
return -EOPNOTSUPP;
if (is_unicast_ether_addr(addr))
@@ -1118,8 +1164,8 @@ static int macvlan_port_create(struct net_device *dev)
if (port == NULL)
return -ENOMEM;
- port->passthru = false;
port->dev = dev;
+ ether_addr_copy(port->perm_addr, dev->dev_addr);
INIT_LIST_HEAD(&port->vlans);
for (i = 0; i < MACVLAN_HASH_SIZE; i++)
INIT_HLIST_HEAD(&port->vlan_hash[i]);
@@ -1159,10 +1205,23 @@ static void macvlan_port_destroy(struct net_device *dev)
kfree_skb(skb);
}
+ /* If the lower device address has been changed by passthru
+ * macvlan, put it back.
+ */
+ if (macvlan_passthru(port) &&
+ !ether_addr_equal(port->dev->dev_addr, port->perm_addr)) {
+ struct sockaddr sa;
+
+ sa.sa_family = port->dev->type;
+ memcpy(&sa.sa_data, port->perm_addr, port->dev->addr_len);
+ dev_set_mac_address(port->dev, &sa);
+ }
+
kfree(port);
}
-static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[])
+static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
{
if (tb[IFLA_ADDRESS]) {
if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
@@ -1324,7 +1383,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
port = macvlan_port_get_rtnl(lowerdev);
/* Only 1 macvlan device can be created in passthru mode */
- if (port->passthru) {
+ if (macvlan_passthru(port)) {
/* The macvlan port must be not created this time,
* still goto destroy_macvlan_port for readability.
*/
@@ -1350,7 +1409,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
err = -EINVAL;
goto destroy_macvlan_port;
}
- port->passthru = true;
+ macvlan_set_passthru(port);
eth_hw_addr_inherit(dev, lowerdev);
}
@@ -1390,7 +1449,8 @@ destroy_macvlan_port:
EXPORT_SYMBOL_GPL(macvlan_common_newlink);
static int macvlan_newlink(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[])
+ struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
{
return macvlan_common_newlink(src_net, dev, tb, data);
}
@@ -1408,7 +1468,8 @@ void macvlan_dellink(struct net_device *dev, struct list_head *head)
EXPORT_SYMBOL_GPL(macvlan_dellink);
static int macvlan_changelink(struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[])
+ struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
{
struct macvlan_dev *vlan = netdev_priv(dev);
enum macvlan_mode mode;
@@ -1432,7 +1493,7 @@ static int macvlan_changelink(struct net_device *dev,
if (data && data[IFLA_MACVLAN_FLAGS]) {
__u16 flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]);
bool promisc = (flags ^ vlan->flags) & MACVLAN_FLAG_NOPROMISC;
- if (vlan->port->passthru && promisc) {
+ if (macvlan_passthru(vlan->port) && promisc) {
int err;
if (flags & MACVLAN_FLAG_NOPROMISC)
@@ -1595,7 +1656,7 @@ static int macvlan_device_event(struct notifier_block *unused,
}
break;
case NETDEV_CHANGEADDR:
- if (!port->passthru)
+ if (!macvlan_passthru(port))
return NOTIFY_DONE;
vlan = list_first_entry_or_null(&port->vlans,
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index da85057680d6..91e7b19bbf86 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -77,10 +77,9 @@ static void macvtap_update_features(struct tap_dev *tap,
netdev_update_features(vlan->dev);
}
-static int macvtap_newlink(struct net *src_net,
- struct net_device *dev,
- struct nlattr *tb[],
- struct nlattr *data[])
+static int macvtap_newlink(struct net *src_net, struct net_device *dev,
+ struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
{
struct macvtap_dev *vlantap = netdev_priv(dev);
int err;
diff --git a/drivers/net/nlmon.c b/drivers/net/nlmon.c
index c4b3362da4a2..4b22955de191 100644
--- a/drivers/net/nlmon.c
+++ b/drivers/net/nlmon.c
@@ -127,7 +127,8 @@ static void nlmon_setup(struct net_device *dev)
dev->min_mtu = sizeof(struct nlmsghdr);
}
-static int nlmon_validate(struct nlattr *tb[], struct nlattr *data[])
+static int nlmon_validate(struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
{
if (tb[IFLA_ADDRESS])
return -EINVAL;
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index ed0d10f54f26..c3065236ffcc 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -908,7 +908,7 @@ static void decode_txts(struct dp83640_private *dp83640,
if (overflow) {
pr_debug("tx timestamp queue overflow, count %d\n", overflow);
while (skb) {
- skb_complete_tx_timestamp(skb, NULL);
+ kfree_skb(skb);
skb = skb_dequeue(&dp83640->tx_queue);
}
return;
diff --git a/drivers/net/phy/lxt.c b/drivers/net/phy/lxt.c
index 8d198a1f0031..09d215177fff 100644
--- a/drivers/net/phy/lxt.c
+++ b/drivers/net/phy/lxt.c
@@ -152,7 +152,6 @@ static int lxt973a2_read_status(struct phy_device *phydev)
int adv;
int err;
int lpa;
- int lpagb = 0;
/* Update the link, but return if there was an error */
err = lxt973a2_update_link(phydev);
@@ -178,18 +177,15 @@ static int lxt973a2_read_status(struct phy_device *phydev)
*/
} while (lpa == adv && retry--);
+ phydev->lp_advertising = mii_lpa_to_ethtool_lpa_t(lpa);
+
lpa &= adv;
phydev->speed = SPEED_10;
phydev->duplex = DUPLEX_HALF;
phydev->pause = phydev->asym_pause = 0;
- if (lpagb & (LPA_1000FULL | LPA_1000HALF)) {
- phydev->speed = SPEED_1000;
-
- if (lpagb & LPA_1000FULL)
- phydev->duplex = DUPLEX_FULL;
- } else if (lpa & (LPA_100FULL | LPA_100HALF)) {
+ if (lpa & (LPA_100FULL | LPA_100HALF)) {
phydev->speed = SPEED_100;
if (lpa & LPA_100FULL)
@@ -222,6 +218,7 @@ static int lxt973a2_read_status(struct phy_device *phydev)
phydev->speed = SPEED_10;
phydev->pause = phydev->asym_pause = 0;
+ phydev->lp_advertising = 0;
}
return 0;
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 9365b0792309..fdb43dd9b5cd 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -620,6 +620,8 @@ static int ksz9031_read_status(struct phy_device *phydev)
if ((regval & 0xFF) == 0xFF) {
phy_init_hw(phydev);
phydev->link = 0;
+ if (phydev->drv->config_intr && phy_interrupt_is_valid(phydev))
+ phydev->drv->config_intr(phydev);
}
return 0;
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index 1b8204be064c..2306bfae057f 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -163,7 +163,7 @@ static void smsc_get_strings(struct phy_device *phydev, u8 *data)
int i;
for (i = 0; i < ARRAY_SIZE(smsc_hw_stats); i++) {
- memcpy(data + i * ETH_GSTRING_LEN,
+ strncpy(data + i * ETH_GSTRING_LEN,
smsc_hw_stats[i].string, ETH_GSTRING_LEN);
}
}
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index d42091f11eb8..13028833bee3 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -1061,7 +1061,8 @@ static const struct nla_policy ppp_nl_policy[IFLA_PPP_MAX + 1] = {
[IFLA_PPP_DEV_FD] = { .type = NLA_S32 },
};
-static int ppp_nl_validate(struct nlattr *tb[], struct nlattr *data[])
+static int ppp_nl_validate(struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
{
if (!data)
return -EINVAL;
@@ -1075,7 +1076,8 @@ static int ppp_nl_validate(struct nlattr *tb[], struct nlattr *data[])
}
static int ppp_nl_newlink(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[])
+ struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
{
struct ppp_config conf = {
.unit = -1,
diff --git a/drivers/net/ppp/ppp_synctty.c b/drivers/net/ppp/ppp_synctty.c
index ef08590db873..7868c29071d4 100644
--- a/drivers/net/ppp/ppp_synctty.c
+++ b/drivers/net/ppp/ppp_synctty.c
@@ -697,7 +697,7 @@ ppp_sync_input(struct syncppp *ap, const unsigned char *buf,
goto err;
}
- p = skb_put_data(skb, buf, count);
+ skb_put_data(skb, buf, count);
/* strip address/control field if present */
p = skb->data;
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index 2f22e318a67f..eac499c58aa7 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -506,7 +506,6 @@ static int pptp_release(struct socket *sock)
{
struct sock *sk = sock->sk;
struct pppox_sock *po;
- struct pptp_opt *opt;
int error = 0;
if (!sk)
@@ -520,7 +519,6 @@ static int pptp_release(struct socket *sock)
}
po = pppox_sk(sk);
- opt = &po->proto.pptp;
del_chan(po);
pppox_unbind_sock(sk);
diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
index 300bb1479b3a..e9f101c9bae2 100644
--- a/drivers/net/rionet.c
+++ b/drivers/net/rionet.c
@@ -201,7 +201,7 @@ static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
rionet_queue_tx_msg(skb, ndev,
nets[rnet->mport->id].active[i]);
if (count)
- atomic_inc(&skb->users);
+ refcount_inc(&skb->users);
count++;
}
} else if (RIONET_MAC_MATCH(eth->h_dest)) {
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 629a412dc690..464570409796 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -2101,7 +2101,8 @@ static void team_setup(struct net_device *dev)
}
static int team_newlink(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[])
+ struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
{
if (tb[IFLA_ADDRESS] == NULL)
eth_hw_addr_random(dev);
@@ -2109,7 +2110,8 @@ static int team_newlink(struct net *src_net, struct net_device *dev,
return register_netdevice(dev);
}
-static int team_validate(struct nlattr *tb[], struct nlattr *data[])
+static int team_validate(struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
{
if (tb[IFLA_ADDRESS]) {
if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index ae49f4b99b67..3d4c24572ecd 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1580,7 +1580,8 @@ static void tun_setup(struct net_device *dev)
/* Trivial set of netlink ops to allow deleting tun or tap
* device with netlink.
*/
-static int tun_validate(struct nlattr *tb[], struct nlattr *data[])
+static int tun_validate(struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
{
return -EINVAL;
}
diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
index 90facc5ecab0..7847436c441e 100644
--- a/drivers/net/usb/asix_common.c
+++ b/drivers/net/usb/asix_common.c
@@ -113,7 +113,6 @@ int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb,
while (offset + sizeof(u16) <= skb->len) {
u16 copy_length;
- unsigned char *data;
if (!rx->remaining) {
if (skb->len - offset == sizeof(u16)) {
@@ -167,8 +166,8 @@ int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb,
}
if (rx->ax_skb) {
- data = skb_put_data(rx->ax_skb, skb->data + offset,
- copy_length);
+ skb_put_data(rx->ax_skb, skb->data + offset,
+ copy_length);
if (!rx->remaining)
usbnet_skb_return(dev, rx->ax_skb);
}
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index 793ce900dffa..f32261ecd215 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -1725,6 +1725,18 @@ static const struct driver_info lenovo_info = {
.tx_fixup = ax88179_tx_fixup,
};
+static const struct driver_info belkin_info = {
+ .description = "Belkin USB Ethernet Adapter",
+ .bind = ax88179_bind,
+ .unbind = ax88179_unbind,
+ .status = ax88179_status,
+ .link_reset = ax88179_link_reset,
+ .reset = ax88179_reset,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
+ .rx_fixup = ax88179_rx_fixup,
+ .tx_fixup = ax88179_tx_fixup,
+};
+
static const struct usb_device_id products[] = {
{
/* ASIX AX88179 10/100/1000 */
@@ -1754,6 +1766,10 @@ static const struct usb_device_id products[] = {
/* Lenovo OneLinkDock Gigabit LAN */
USB_DEVICE(0x17ef, 0x304b),
.driver_info = (unsigned long)&lenovo_info,
+}, {
+ /* Belkin B2B128 USB 3.0 Hub + Gigabit Ethernet Adapter */
+ USB_DEVICE(0x050d, 0x0128),
+ .driver_info = (unsigned long)&belkin_info,
},
{ },
};
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index bcb974707118..2067743f51ca 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -1069,7 +1069,7 @@ static struct usb_cdc_ncm_ndp16 *cdc_ncm_ndp(struct cdc_ncm_ctx *ctx, struct sk_
/* push a new empty NDP */
if (!(ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END))
- ndp16 = (struct usb_cdc_ncm_ndp16 *)memset(skb_put(skb, ctx->max_ndp_size), 0, ctx->max_ndp_size);
+ ndp16 = skb_put_zero(skb, ctx->max_ndp_size);
else
ndp16 = ctx->delayed_ndp16;
@@ -1120,7 +1120,7 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
goto exit_no_skb;
}
/* fill out the initial 16-bit NTB header */
- nth16 = (struct usb_cdc_ncm_nth16 *)memset(skb_put(skb_out, sizeof(struct usb_cdc_ncm_nth16)), 0, sizeof(struct usb_cdc_ncm_nth16));
+ nth16 = skb_put_zero(skb_out, sizeof(struct usb_cdc_ncm_nth16));
nth16->dwSignature = cpu_to_le32(USB_CDC_NCM_NTH16_SIGN);
nth16->wHeaderLength = cpu_to_le16(sizeof(struct usb_cdc_ncm_nth16));
nth16->wSequence = cpu_to_le16(ctx->tx_seq++);
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 908ada4ca21c..d7a3379ea668 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -861,7 +861,6 @@ static void packetizeRx(struct hso_net *odev, unsigned char *ip_pkt,
unsigned short temp_bytes;
unsigned short buffer_offset = 0;
unsigned short frame_len;
- unsigned char *tmp_rx_buf;
/* log if needed */
hso_dbg(0x1, "Rx %d bytes\n", count);
@@ -911,9 +910,9 @@ static void packetizeRx(struct hso_net *odev, unsigned char *ip_pkt,
/* Copy what we got so far. make room for iphdr
* after tail. */
- tmp_rx_buf = skb_put_data(odev->skb_rx_buf,
- (char *)&(odev->rx_ip_hdr),
- sizeof(struct iphdr));
+ skb_put_data(odev->skb_rx_buf,
+ (char *)&(odev->rx_ip_hdr),
+ sizeof(struct iphdr));
/* ETH_HLEN */
odev->rx_buf_size = sizeof(struct iphdr);
@@ -932,9 +931,9 @@ static void packetizeRx(struct hso_net *odev, unsigned char *ip_pkt,
/* Copy the rest of the bytes that are left in the
* buffer into the waiting sk_buf. */
/* Make room for temp_bytes after tail. */
- tmp_rx_buf = skb_put_data(odev->skb_rx_buf,
- ip_pkt + buffer_offset,
- temp_bytes);
+ skb_put_data(odev->skb_rx_buf,
+ ip_pkt + buffer_offset,
+ temp_bytes);
odev->rx_buf_missing -= temp_bytes;
count -= temp_bytes;
diff --git a/drivers/net/usb/int51x1.c b/drivers/net/usb/int51x1.c
index be63a829b8fe..ae2b2563460b 100644
--- a/drivers/net/usb/int51x1.c
+++ b/drivers/net/usb/int51x1.c
@@ -110,7 +110,7 @@ static struct sk_buff *int51x1_tx_fixup(struct usbnet *dev,
*len = cpu_to_le16(pack_len);
if(need_tail)
- memset(__skb_put(skb, need_tail), 0, need_tail);
+ __skb_put_zero(skb, need_tail);
return skb;
}
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 8bc4573e0cd4..6cfffeff6108 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -569,7 +569,6 @@ enum rtl_register_content {
#define RTL8152_MAX_TX 4
#define RTL8152_MAX_RX 10
#define INTBUFSIZE 2
-#define CRC_SIZE 4
#define TX_ALIGN 4
#define RX_ALIGN 8
@@ -588,12 +587,13 @@ enum rtl_register_content {
#define BYTE_EN_END_MASK 0xf0
#define RTL8153_MAX_PACKET 9216 /* 9K */
-#define RTL8153_MAX_MTU (RTL8153_MAX_PACKET - VLAN_ETH_HLEN - VLAN_HLEN)
-#define RTL8152_RMS (VLAN_ETH_FRAME_LEN + VLAN_HLEN)
+#define RTL8153_MAX_MTU (RTL8153_MAX_PACKET - VLAN_ETH_HLEN - \
+ ETH_FCS_LEN)
+#define RTL8152_RMS (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
#define RTL8153_RMS RTL8153_MAX_PACKET
#define RTL8152_TX_TIMEOUT (5 * HZ)
#define RTL8152_NAPI_WEIGHT 64
-#define rx_reserved_size(x) ((x) + VLAN_ETH_HLEN + CRC_SIZE + \
+#define rx_reserved_size(x) ((x) + VLAN_ETH_HLEN + ETH_FCS_LEN + \
sizeof(struct rx_desc) + RX_ALIGN)
/* rtl8152 flags */
@@ -770,7 +770,7 @@ static const int multicast_filter_limit = 32;
static unsigned int agg_buf_sz = 16384;
#define RTL_LIMITED_TSO_SIZE (agg_buf_sz - sizeof(struct tx_desc) - \
- VLAN_ETH_HLEN - VLAN_HLEN)
+ VLAN_ETH_HLEN - ETH_FCS_LEN)
static
int get_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
@@ -1928,7 +1928,7 @@ static int rx_bottom(struct r8152 *tp, int budget)
if (urb->actual_length < len_used)
break;
- pkt_len -= CRC_SIZE;
+ pkt_len -= ETH_FCS_LEN;
rx_data += sizeof(struct rx_desc);
skb = napi_alloc_skb(napi, pkt_len);
@@ -1952,7 +1952,7 @@ static int rx_bottom(struct r8152 *tp, int budget)
}
find_next_rx:
- rx_data = rx_agg_align(rx_data + pkt_len + CRC_SIZE);
+ rx_data = rx_agg_align(rx_data + pkt_len + ETH_FCS_LEN);
rx_desc = (struct rx_desc *)rx_data;
len_used = (int)(rx_data - (u8 *)agg->head);
len_used += sizeof(struct rx_desc);
@@ -2242,7 +2242,7 @@ static void set_tx_qlen(struct r8152 *tp)
{
struct net_device *netdev = tp->netdev;
- tp->tx_qlen = agg_buf_sz / (netdev->mtu + VLAN_ETH_HLEN + VLAN_HLEN +
+ tp->tx_qlen = agg_buf_sz / (netdev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN +
sizeof(struct tx_desc));
}
@@ -3439,7 +3439,7 @@ static void r8153_first_init(struct r8152 *tp)
rtl_rx_vlan_en(tp, tp->netdev->features & NETIF_F_HW_VLAN_CTAG_RX);
- ocp_data = tp->netdev->mtu + VLAN_ETH_HLEN + CRC_SIZE;
+ ocp_data = tp->netdev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, ocp_data);
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_MTPS, MTPS_JUMBO);
@@ -3489,7 +3489,7 @@ static void r8153_enter_oob(struct r8152 *tp)
usleep_range(1000, 2000);
}
- ocp_data = tp->netdev->mtu + VLAN_ETH_HLEN + CRC_SIZE;
+ ocp_data = tp->netdev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, ocp_data);
switch (tp->version) {
@@ -4957,7 +4957,7 @@ static int rtl8152_change_mtu(struct net_device *dev, int new_mtu)
dev->mtu = new_mtu;
if (netif_running(dev)) {
- u32 rms = new_mtu + VLAN_ETH_HLEN + CRC_SIZE;
+ u32 rms = new_mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, rms);
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 0156fe8cac17..f5438d0978ca 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -329,7 +329,8 @@ static void veth_setup(struct net_device *dev)
* netlink interface
*/
-static int veth_validate(struct nlattr *tb[], struct nlattr *data[])
+static int veth_validate(struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
{
if (tb[IFLA_ADDRESS]) {
if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
@@ -347,7 +348,8 @@ static int veth_validate(struct nlattr *tb[], struct nlattr *data[])
static struct rtnl_link_ops veth_link_ops;
static int veth_newlink(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[])
+ struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
{
int err;
struct net_device *peer;
@@ -373,7 +375,7 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
if (err < 0)
return err;
- err = veth_validate(peer_tb, NULL);
+ err = veth_validate(peer_tb, NULL, extack);
if (err < 0)
return err;
@@ -383,7 +385,7 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
tbp = tb;
}
- if (tbp[IFLA_IFNAME]) {
+ if (ifmp && tbp[IFLA_IFNAME]) {
nla_strlcpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ);
name_assign_type = NET_NAME_USER;
} else {
@@ -402,7 +404,7 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
return PTR_ERR(peer);
}
- if (tbp[IFLA_ADDRESS] == NULL)
+ if (!ifmp || !tbp[IFLA_ADDRESS])
eth_hw_addr_random(peer);
if (ifmp && (dev->ifindex != 0))
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 5c6388fb7dd1..2e69bcdc5b07 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1802,6 +1802,7 @@ static void virtnet_freeze_down(struct virtio_device *vdev)
flush_work(&vi->config_work);
netif_device_detach(vi->dev);
+ netif_tx_disable(vi->dev);
cancel_delayed_work_sync(&vi->refill);
if (netif_running(vi->dev)) {
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 997ef25189fd..f4d0054981c6 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -1372,7 +1372,8 @@ static void vrf_setup(struct net_device *dev)
dev->priv_flags |= IFF_NO_QUEUE;
}
-static int vrf_validate(struct nlattr *tb[], struct nlattr *data[])
+static int vrf_validate(struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
{
if (tb[IFLA_ADDRESS]) {
if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
@@ -1389,7 +1390,8 @@ static void vrf_dellink(struct net_device *dev, struct list_head *head)
}
static int vrf_newlink(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[])
+ struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
{
struct net_vrf *vrf = netdev_priv(dev);
bool *add_fib_rules;
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 94ce98229828..fd0ff97e3d81 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -226,7 +226,8 @@ static struct vxlan_sock *vxlan_find_sock(struct net *net, sa_family_t family,
return NULL;
}
-static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, __be32 vni)
+static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, int ifindex,
+ __be32 vni)
{
struct vxlan_dev *vxlan;
@@ -235,17 +236,27 @@ static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, __be32 vni)
vni = 0;
hlist_for_each_entry_rcu(vxlan, vni_head(vs, vni), hlist) {
- if (vxlan->default_dst.remote_vni == vni)
- return vxlan;
+ if (vxlan->default_dst.remote_vni != vni)
+ continue;
+
+ if (IS_ENABLED(CONFIG_IPV6)) {
+ const struct vxlan_config *cfg = &vxlan->cfg;
+
+ if ((cfg->flags & VXLAN_F_IPV6_LINKLOCAL) &&
+ cfg->remote_ifindex != ifindex)
+ continue;
+ }
+
+ return vxlan;
}
return NULL;
}
/* Look up VNI in a per net namespace table */
-static struct vxlan_dev *vxlan_find_vni(struct net *net, __be32 vni,
- sa_family_t family, __be16 port,
- u32 flags)
+static struct vxlan_dev *vxlan_find_vni(struct net *net, int ifindex,
+ __be32 vni, sa_family_t family,
+ __be16 port, u32 flags)
{
struct vxlan_sock *vs;
@@ -253,7 +264,7 @@ static struct vxlan_dev *vxlan_find_vni(struct net *net, __be32 vni,
if (!vs)
return NULL;
- return vxlan_vs_find_vni(vs, vni);
+ return vxlan_vs_find_vni(vs, ifindex, vni);
}
/* Fill in neighbour message in skbuff. */
@@ -305,7 +316,7 @@ static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
if (rdst->remote_vni != vxlan->default_dst.remote_vni &&
nla_put_u32(skb, NDA_VNI, be32_to_cpu(rdst->remote_vni)))
goto nla_put_failure;
- if ((vxlan->flags & VXLAN_F_COLLECT_METADATA) && fdb->vni &&
+ if ((vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA) && fdb->vni &&
nla_put_u32(skb, NDA_SRC_VNI,
be32_to_cpu(fdb->vni)))
goto nla_put_failure;
@@ -419,7 +430,7 @@ static u32 eth_vni_hash(const unsigned char *addr, __be32 vni)
static inline struct hlist_head *vxlan_fdb_head(struct vxlan_dev *vxlan,
const u8 *mac, __be32 vni)
{
- if (vxlan->flags & VXLAN_F_COLLECT_METADATA)
+ if (vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA)
return &vxlan->fdb_head[eth_vni_hash(mac, vni)];
else
return &vxlan->fdb_head[eth_hash(mac)];
@@ -434,7 +445,7 @@ static struct vxlan_fdb *__vxlan_find_mac(struct vxlan_dev *vxlan,
hlist_for_each_entry_rcu(f, head, hlist) {
if (ether_addr_equal(mac, f->eth_addr)) {
- if (vxlan->flags & VXLAN_F_COLLECT_METADATA) {
+ if (vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA) {
if (vni == f->vni)
return f;
} else {
@@ -957,16 +968,24 @@ out:
*/
static bool vxlan_snoop(struct net_device *dev,
union vxlan_addr *src_ip, const u8 *src_mac,
- __be32 vni)
+ u32 src_ifindex, __be32 vni)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_fdb *f;
+ u32 ifindex = 0;
+
+#if IS_ENABLED(CONFIG_IPV6)
+ if (src_ip->sa.sa_family == AF_INET6 &&
+ (ipv6_addr_type(&src_ip->sin6.sin6_addr) & IPV6_ADDR_LINKLOCAL))
+ ifindex = src_ifindex;
+#endif
f = vxlan_find_mac(vxlan, src_mac, vni);
if (likely(f)) {
struct vxlan_rdst *rdst = first_remote_rcu(f);
- if (likely(vxlan_addr_equal(&rdst->remote_ip, src_ip)))
+ if (likely(vxlan_addr_equal(&rdst->remote_ip, src_ip) &&
+ rdst->remote_ifindex == ifindex))
return false;
/* Don't migrate static entries, drop packets */
@@ -993,7 +1012,7 @@ static bool vxlan_snoop(struct net_device *dev,
vxlan->cfg.dst_port,
vni,
vxlan->default_dst.remote_vni,
- 0, NTF_SELF);
+ ifindex, NTF_SELF);
spin_unlock(&vxlan->hash_lock);
}
@@ -1264,6 +1283,7 @@ static bool vxlan_set_mac(struct vxlan_dev *vxlan,
struct sk_buff *skb, __be32 vni)
{
union vxlan_addr saddr;
+ u32 ifindex = skb->dev->ifindex;
skb_reset_mac_header(skb);
skb->protocol = eth_type_trans(skb, vxlan->dev);
@@ -1284,8 +1304,8 @@ static bool vxlan_set_mac(struct vxlan_dev *vxlan,
#endif
}
- if ((vxlan->flags & VXLAN_F_LEARN) &&
- vxlan_snoop(skb->dev, &saddr, eth_hdr(skb)->h_source, vni))
+ if ((vxlan->cfg.flags & VXLAN_F_LEARN) &&
+ vxlan_snoop(skb->dev, &saddr, eth_hdr(skb)->h_source, ifindex, vni))
return false;
return true;
@@ -1351,7 +1371,7 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
vni = vxlan_vni(vxlan_hdr(skb)->vx_vni);
- vxlan = vxlan_vs_find_vni(vs, vni);
+ vxlan = vxlan_vs_find_vni(vs, skb->dev->ifindex, vni);
if (!vxlan)
goto drop;
@@ -1507,7 +1527,7 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
if (netif_rx_ni(reply) == NET_RX_DROP)
dev->stats.rx_dropped++;
- } else if (vxlan->flags & VXLAN_F_L3MISS) {
+ } else if (vxlan->cfg.flags & VXLAN_F_L3MISS) {
union vxlan_addr ipa = {
.sin.sin_addr.s_addr = tip,
.sin.sin_family = AF_INET,
@@ -1665,7 +1685,7 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
if (netif_rx_ni(reply) == NET_RX_DROP)
dev->stats.rx_dropped++;
- } else if (vxlan->flags & VXLAN_F_L3MISS) {
+ } else if (vxlan->cfg.flags & VXLAN_F_L3MISS) {
union vxlan_addr ipa = {
.sin6.sin6_addr = msg->target,
.sin6.sin6_family = AF_INET6,
@@ -1698,7 +1718,7 @@ static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb)
return false;
pip = ip_hdr(skb);
n = neigh_lookup(&arp_tbl, &pip->daddr, dev);
- if (!n && (vxlan->flags & VXLAN_F_L3MISS)) {
+ if (!n && (vxlan->cfg.flags & VXLAN_F_L3MISS)) {
union vxlan_addr ipa = {
.sin.sin_addr.s_addr = pip->daddr,
.sin.sin_family = AF_INET,
@@ -1719,7 +1739,7 @@ static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb)
return false;
pip6 = ipv6_hdr(skb);
n = neigh_lookup(ipv6_stub->nd_tbl, &pip6->daddr, dev);
- if (!n && (vxlan->flags & VXLAN_F_L3MISS)) {
+ if (!n && (vxlan->cfg.flags & VXLAN_F_L3MISS)) {
union vxlan_addr ipa = {
.sin6.sin6_addr = pip6->daddr,
.sin6.sin6_family = AF_INET6,
@@ -1993,8 +2013,9 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
#endif
}
- if (dst_vxlan->flags & VXLAN_F_LEARN)
- vxlan_snoop(skb->dev, &loopback, eth_hdr(skb)->h_source, vni);
+ if (dst_vxlan->cfg.flags & VXLAN_F_LEARN)
+ vxlan_snoop(skb->dev, &loopback, eth_hdr(skb)->h_source, 0,
+ vni);
u64_stats_update_begin(&tx_stats->syncp);
tx_stats->tx_packets++;
@@ -2012,8 +2033,10 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
}
static int encap_bypass_if_local(struct sk_buff *skb, struct net_device *dev,
- struct vxlan_dev *vxlan, union vxlan_addr *daddr,
- __be16 dst_port, __be32 vni, struct dst_entry *dst,
+ struct vxlan_dev *vxlan,
+ union vxlan_addr *daddr,
+ __be16 dst_port, int dst_ifindex, __be32 vni,
+ struct dst_entry *dst,
u32 rt_flags)
{
#if IS_ENABLED(CONFIG_IPV6)
@@ -2029,9 +2052,9 @@ static int encap_bypass_if_local(struct sk_buff *skb, struct net_device *dev,
struct vxlan_dev *dst_vxlan;
dst_release(dst);
- dst_vxlan = vxlan_find_vni(vxlan->net, vni,
+ dst_vxlan = vxlan_find_vni(vxlan->net, dst_ifindex, vni,
daddr->sa.sa_family, dst_port,
- vxlan->flags);
+ vxlan->cfg.flags);
if (!dst_vxlan) {
dev->stats.tx_errors++;
kfree_skb(skb);
@@ -2061,8 +2084,9 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
struct dst_entry *ndst = NULL;
__be32 vni, label;
__u8 tos, ttl;
+ int ifindex;
int err;
- u32 flags = vxlan->flags;
+ u32 flags = vxlan->cfg.flags;
bool udp_sum = false;
bool xnet = !net_eq(vxlan->net, dev_net(vxlan->dev));
@@ -2081,6 +2105,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
dst_port = rdst->remote_port ? rdst->remote_port : vxlan->cfg.dst_port;
vni = (rdst->remote_vni) ? : default_vni;
+ ifindex = rdst->remote_ifindex;
local_ip = vxlan->cfg.saddr;
dst_cache = &rdst->dst_cache;
md->gbp = skb->mark;
@@ -2114,6 +2139,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
dst = &remote_ip;
dst_port = info->key.tp_dst ? : vxlan->cfg.dst_port;
vni = tunnel_id_to_key32(info->key.tun_id);
+ ifindex = 0;
dst_cache = &info->dst_cache;
if (info->options_len)
md = ip_tunnel_info_opts(info);
@@ -2131,8 +2157,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
struct rtable *rt;
__be16 df = 0;
- rt = vxlan_get_route(vxlan, dev, sock4, skb,
- rdst ? rdst->remote_ifindex : 0, tos,
+ rt = vxlan_get_route(vxlan, dev, sock4, skb, ifindex, tos,
dst->sin.sin_addr.s_addr,
&local_ip.sin.sin_addr.s_addr,
dst_port, src_port,
@@ -2145,8 +2170,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
/* Bypass encapsulation if the destination is local */
if (!info) {
err = encap_bypass_if_local(skb, dev, vxlan, dst,
- dst_port, vni, &rt->dst,
- rt->rt_flags);
+ dst_port, ifindex, vni,
+ &rt->dst, rt->rt_flags);
if (err)
goto out_unlock;
} else if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT) {
@@ -2168,8 +2193,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
} else {
struct vxlan_sock *sock6 = rcu_dereference(vxlan->vn6_sock);
- ndst = vxlan6_get_route(vxlan, dev, sock6, skb,
- rdst ? rdst->remote_ifindex : 0, tos,
+ ndst = vxlan6_get_route(vxlan, dev, sock6, skb, ifindex, tos,
label, &dst->sin6.sin6_addr,
&local_ip.sin6.sin6_addr,
dst_port, src_port,
@@ -2184,8 +2208,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
u32 rt6i_flags = ((struct rt6_info *)ndst)->rt6i_flags;
err = encap_bypass_if_local(skb, dev, vxlan, dst,
- dst_port, vni, ndst,
- rt6i_flags);
+ dst_port, ifindex, vni,
+ ndst, rt6i_flags);
if (err)
goto out_unlock;
}
@@ -2244,7 +2268,7 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
skb_reset_mac_header(skb);
- if (vxlan->flags & VXLAN_F_COLLECT_METADATA) {
+ if (vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA) {
if (info && info->mode & IP_TUNNEL_INFO_BRIDGE &&
info->mode & IP_TUNNEL_INFO_TX) {
vni = tunnel_id_to_key32(info->key.tun_id);
@@ -2257,7 +2281,7 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
}
}
- if (vxlan->flags & VXLAN_F_PROXY) {
+ if (vxlan->cfg.flags & VXLAN_F_PROXY) {
eth = eth_hdr(skb);
if (ntohs(eth->h_proto) == ETH_P_ARP)
return arp_reduce(dev, skb, vni);
@@ -2277,7 +2301,7 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
f = vxlan_find_mac(vxlan, eth->h_dest, vni);
did_rsc = false;
- if (f && (f->flags & NTF_ROUTER) && (vxlan->flags & VXLAN_F_RSC) &&
+ if (f && (f->flags & NTF_ROUTER) && (vxlan->cfg.flags & VXLAN_F_RSC) &&
(ntohs(eth->h_proto) == ETH_P_IP ||
ntohs(eth->h_proto) == ETH_P_IPV6)) {
did_rsc = route_shortcircuit(dev, skb);
@@ -2288,7 +2312,7 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
if (f == NULL) {
f = vxlan_find_mac(vxlan, all_zeros_mac, vni);
if (f == NULL) {
- if ((vxlan->flags & VXLAN_F_L2MISS) &&
+ if ((vxlan->cfg.flags & VXLAN_F_L2MISS) &&
!is_multicast_ether_addr(eth->h_dest))
vxlan_fdb_miss(vxlan, eth->h_dest);
@@ -2484,10 +2508,7 @@ static int vxlan_change_mtu(struct net_device *dev, int new_mtu)
struct vxlan_rdst *dst = &vxlan->default_dst;
struct net_device *lowerdev = __dev_get_by_index(vxlan->net,
dst->remote_ifindex);
- bool use_ipv6 = false;
-
- if (dst->remote_ip.sa.sa_family == AF_INET6)
- use_ipv6 = true;
+ bool use_ipv6 = !!(vxlan->cfg.flags & VXLAN_F_IPV6);
/* This check is different than dev->max_mtu, because it looks at
* the lowerdev->mtu, rather than the static dev->max_mtu
@@ -2623,6 +2644,10 @@ static void vxlan_setup(struct net_device *dev)
netif_keep_dst(dev);
dev->priv_flags |= IFF_NO_QUEUE;
+ /* MTU range: 68 - 65535 */
+ dev->min_mtu = ETH_MIN_MTU;
+ dev->max_mtu = ETH_MAX_MTU;
+
INIT_LIST_HEAD(&vxlan->next);
spin_lock_init(&vxlan->hash_lock);
@@ -2630,9 +2655,8 @@ static void vxlan_setup(struct net_device *dev)
vxlan->age_timer.function = vxlan_cleanup;
vxlan->age_timer.data = (unsigned long) vxlan;
- vxlan->cfg.dst_port = htons(vxlan_port);
-
vxlan->dev = dev;
+ vxlan->net = dev_net(dev);
gro_cells_init(&vxlan->gro_cells, dev);
@@ -2687,7 +2711,8 @@ static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = {
[IFLA_VXLAN_REMCSUM_NOPARTIAL] = { .type = NLA_FLAG },
};
-static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[])
+static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
{
if (tb[IFLA_ADDRESS]) {
if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) {
@@ -2701,11 +2726,19 @@ static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[])
}
}
+ if (tb[IFLA_MTU]) {
+ u32 mtu = nla_get_u32(tb[IFLA_MTU]);
+
+ if (mtu < ETH_MIN_MTU || mtu > ETH_MAX_MTU)
+ return -EINVAL;
+ }
+
if (!data)
return -EINVAL;
if (data[IFLA_VXLAN_ID]) {
- __u32 id = nla_get_u32(data[IFLA_VXLAN_ID]);
+ u32 id = nla_get_u32(data[IFLA_VXLAN_ID]);
+
if (id >= VXLAN_N_VID)
return -ERANGE;
}
@@ -2821,7 +2854,7 @@ static int __vxlan_sock_add(struct vxlan_dev *vxlan, bool ipv6)
if (!vxlan->cfg.no_share) {
spin_lock(&vn->sock_lock);
vs = vxlan_find_sock(vxlan->net, ipv6 ? AF_INET6 : AF_INET,
- vxlan->cfg.dst_port, vxlan->flags);
+ vxlan->cfg.dst_port, vxlan->cfg.flags);
if (vs && !atomic_add_unless(&vs->refcnt, 1, 0)) {
spin_unlock(&vn->sock_lock);
return -EBUSY;
@@ -2830,7 +2863,7 @@ static int __vxlan_sock_add(struct vxlan_dev *vxlan, bool ipv6)
}
if (!vs)
vs = vxlan_socket_create(vxlan->net, ipv6,
- vxlan->cfg.dst_port, vxlan->flags);
+ vxlan->cfg.dst_port, vxlan->cfg.flags);
if (IS_ERR(vs))
return PTR_ERR(vs);
#if IS_ENABLED(CONFIG_IPV6)
@@ -2845,8 +2878,8 @@ static int __vxlan_sock_add(struct vxlan_dev *vxlan, bool ipv6)
static int vxlan_sock_add(struct vxlan_dev *vxlan)
{
- bool metadata = vxlan->flags & VXLAN_F_COLLECT_METADATA;
- bool ipv6 = vxlan->flags & VXLAN_F_IPV6 || metadata;
+ bool metadata = vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA;
+ bool ipv6 = vxlan->cfg.flags & VXLAN_F_IPV6 || metadata;
bool ipv4 = !ipv6 || metadata;
int ret = 0;
@@ -2866,116 +2899,172 @@ static int vxlan_sock_add(struct vxlan_dev *vxlan)
return ret;
}
-static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
- struct vxlan_config *conf,
- bool changelink)
+static int vxlan_config_validate(struct net *src_net, struct vxlan_config *conf,
+ struct net_device **lower,
+ struct vxlan_dev *old)
{
struct vxlan_net *vn = net_generic(src_net, vxlan_net_id);
- struct vxlan_dev *vxlan = netdev_priv(dev), *tmp;
- struct vxlan_rdst *dst = &vxlan->default_dst;
- unsigned short needed_headroom = ETH_HLEN;
+ struct vxlan_dev *tmp;
bool use_ipv6 = false;
- __be16 default_port = vxlan->cfg.dst_port;
- struct net_device *lowerdev = NULL;
- if (!changelink) {
- if (conf->flags & VXLAN_F_GPE) {
- /* For now, allow GPE only together with
- * COLLECT_METADATA. This can be relaxed later; in such
- * case, the other side of the PtP link will have to be
- * provided.
- */
- if ((conf->flags & ~VXLAN_F_ALLOWED_GPE) ||
- !(conf->flags & VXLAN_F_COLLECT_METADATA)) {
- pr_info("unsupported combination of extensions\n");
- return -EINVAL;
- }
- vxlan_raw_setup(dev);
- } else {
- vxlan_ether_setup(dev);
+ if (conf->flags & VXLAN_F_GPE) {
+ /* For now, allow GPE only together with
+ * COLLECT_METADATA. This can be relaxed later; in such
+ * case, the other side of the PtP link will have to be
+ * provided.
+ */
+ if ((conf->flags & ~VXLAN_F_ALLOWED_GPE) ||
+ !(conf->flags & VXLAN_F_COLLECT_METADATA)) {
+ return -EINVAL;
}
-
- /* MTU range: 68 - 65535 */
- dev->min_mtu = ETH_MIN_MTU;
- dev->max_mtu = ETH_MAX_MTU;
- vxlan->net = src_net;
}
- dst->remote_vni = conf->vni;
+ if (!conf->remote_ip.sa.sa_family && !conf->saddr.sa.sa_family) {
+ /* Unless IPv6 is explicitly requested, assume IPv4 */
+ conf->remote_ip.sa.sa_family = AF_INET;
+ conf->saddr.sa.sa_family = AF_INET;
+ } else if (!conf->remote_ip.sa.sa_family) {
+ conf->remote_ip.sa.sa_family = conf->saddr.sa.sa_family;
+ } else if (!conf->saddr.sa.sa_family) {
+ conf->saddr.sa.sa_family = conf->remote_ip.sa.sa_family;
+ }
- memcpy(&dst->remote_ip, &conf->remote_ip, sizeof(conf->remote_ip));
+ if (conf->saddr.sa.sa_family != conf->remote_ip.sa.sa_family)
+ return -EINVAL;
- /* Unless IPv6 is explicitly requested, assume IPv4 */
- if (!dst->remote_ip.sa.sa_family)
- dst->remote_ip.sa.sa_family = AF_INET;
+ if (vxlan_addr_multicast(&conf->saddr))
+ return -EINVAL;
- if (dst->remote_ip.sa.sa_family == AF_INET6 ||
- vxlan->cfg.saddr.sa.sa_family == AF_INET6) {
+ if (conf->saddr.sa.sa_family == AF_INET6) {
if (!IS_ENABLED(CONFIG_IPV6))
return -EPFNOSUPPORT;
use_ipv6 = true;
- vxlan->flags |= VXLAN_F_IPV6;
+ conf->flags |= VXLAN_F_IPV6;
+
+ if (!(conf->flags & VXLAN_F_COLLECT_METADATA)) {
+ int local_type =
+ ipv6_addr_type(&conf->saddr.sin6.sin6_addr);
+ int remote_type =
+ ipv6_addr_type(&conf->remote_ip.sin6.sin6_addr);
+
+ if (local_type & IPV6_ADDR_LINKLOCAL) {
+ if (!(remote_type & IPV6_ADDR_LINKLOCAL) &&
+ (remote_type != IPV6_ADDR_ANY))
+ return -EINVAL;
+
+ conf->flags |= VXLAN_F_IPV6_LINKLOCAL;
+ } else {
+ if (remote_type ==
+ (IPV6_ADDR_UNICAST | IPV6_ADDR_LINKLOCAL))
+ return -EINVAL;
+
+ conf->flags &= ~VXLAN_F_IPV6_LINKLOCAL;
+ }
+ }
}
- if (conf->label && !use_ipv6) {
- pr_info("label only supported in use with IPv6\n");
+ if (conf->label && !use_ipv6)
return -EINVAL;
- }
- if (conf->remote_ifindex &&
- conf->remote_ifindex != vxlan->cfg.remote_ifindex) {
- lowerdev = __dev_get_by_index(src_net, conf->remote_ifindex);
- dst->remote_ifindex = conf->remote_ifindex;
+ if (conf->remote_ifindex) {
+ struct net_device *lowerdev;
- if (!lowerdev) {
- pr_info("ifindex %d does not exist\n",
- dst->remote_ifindex);
+ lowerdev = __dev_get_by_index(src_net, conf->remote_ifindex);
+ if (!lowerdev)
return -ENODEV;
- }
#if IS_ENABLED(CONFIG_IPV6)
if (use_ipv6) {
struct inet6_dev *idev = __in6_dev_get(lowerdev);
- if (idev && idev->cnf.disable_ipv6) {
- pr_info("IPv6 is disabled via sysctl\n");
+ if (idev && idev->cnf.disable_ipv6)
return -EPERM;
- }
}
#endif
- if (!conf->mtu)
- dev->mtu = lowerdev->mtu -
- (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
+ *lower = lowerdev;
+ } else {
+ if (vxlan_addr_multicast(&conf->remote_ip))
+ return -EINVAL;
- needed_headroom = lowerdev->hard_header_len;
- } else if (!conf->remote_ifindex &&
- vxlan_addr_multicast(&dst->remote_ip)) {
- pr_info("multicast destination requires interface to be specified\n");
- return -EINVAL;
+#if IS_ENABLED(CONFIG_IPV6)
+ if (conf->flags & VXLAN_F_IPV6_LINKLOCAL)
+ return -EINVAL;
+#endif
+
+ *lower = NULL;
}
- if (lowerdev) {
- dev->gso_max_size = lowerdev->gso_max_size;
- dev->gso_max_segs = lowerdev->gso_max_segs;
+ if (!conf->dst_port) {
+ if (conf->flags & VXLAN_F_GPE)
+ conf->dst_port = htons(4790); /* IANA VXLAN-GPE port */
+ else
+ conf->dst_port = htons(vxlan_port);
}
- if (conf->mtu) {
- int max_mtu = ETH_MAX_MTU;
+ if (!conf->age_interval)
+ conf->age_interval = FDB_AGE_DEFAULT;
- if (lowerdev)
- max_mtu = lowerdev->mtu;
+ list_for_each_entry(tmp, &vn->vxlan_list, next) {
+ if (tmp == old)
+ continue;
- max_mtu -= (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
+ if (tmp->cfg.vni != conf->vni)
+ continue;
+ if (tmp->cfg.dst_port != conf->dst_port)
+ continue;
+ if ((tmp->cfg.flags & (VXLAN_F_RCV_FLAGS | VXLAN_F_IPV6)) !=
+ (conf->flags & (VXLAN_F_RCV_FLAGS | VXLAN_F_IPV6)))
+ continue;
- if (conf->mtu < dev->min_mtu || conf->mtu > dev->max_mtu)
- return -EINVAL;
+ if ((conf->flags & VXLAN_F_IPV6_LINKLOCAL) &&
+ tmp->cfg.remote_ifindex != conf->remote_ifindex)
+ continue;
+
+ return -EEXIST;
+ }
+
+ return 0;
+}
+
+static void vxlan_config_apply(struct net_device *dev,
+ struct vxlan_config *conf,
+ struct net_device *lowerdev, bool changelink)
+{
+ struct vxlan_dev *vxlan = netdev_priv(dev);
+ struct vxlan_rdst *dst = &vxlan->default_dst;
+ unsigned short needed_headroom = ETH_HLEN;
+ bool use_ipv6 = !!(conf->flags & VXLAN_F_IPV6);
+ int max_mtu = ETH_MAX_MTU;
+
+ if (!changelink) {
+ if (conf->flags & VXLAN_F_GPE)
+ vxlan_raw_setup(dev);
+ else
+ vxlan_ether_setup(dev);
+
+ if (conf->mtu)
+ dev->mtu = conf->mtu;
+ }
+
+ dst->remote_vni = conf->vni;
+
+ memcpy(&dst->remote_ip, &conf->remote_ip, sizeof(conf->remote_ip));
+
+ if (lowerdev) {
+ dst->remote_ifindex = conf->remote_ifindex;
- dev->mtu = conf->mtu;
+ dev->gso_max_size = lowerdev->gso_max_size;
+ dev->gso_max_segs = lowerdev->gso_max_segs;
- if (conf->mtu > max_mtu)
- dev->mtu = max_mtu;
+ needed_headroom = lowerdev->hard_header_len;
+
+ max_mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM :
+ VXLAN_HEADROOM);
}
+ if (dev->mtu > max_mtu)
+ dev->mtu = max_mtu;
+
if (use_ipv6 || conf->flags & VXLAN_F_COLLECT_METADATA)
needed_headroom += VXLAN6_HEADROOM;
else
@@ -2983,31 +3072,21 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
dev->needed_headroom = needed_headroom;
memcpy(&vxlan->cfg, conf, sizeof(*conf));
- if (!vxlan->cfg.dst_port) {
- if (conf->flags & VXLAN_F_GPE)
- vxlan->cfg.dst_port = htons(4790); /* IANA VXLAN-GPE port */
- else
- vxlan->cfg.dst_port = default_port;
- }
- vxlan->flags |= conf->flags;
+}
- if (!vxlan->cfg.age_interval)
- vxlan->cfg.age_interval = FDB_AGE_DEFAULT;
+static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
+ struct vxlan_config *conf,
+ bool changelink)
+{
+ struct vxlan_dev *vxlan = netdev_priv(dev);
+ struct net_device *lowerdev;
+ int ret;
- if (changelink)
- return 0;
+ ret = vxlan_config_validate(src_net, conf, &lowerdev, vxlan);
+ if (ret)
+ return ret;
- list_for_each_entry(tmp, &vn->vxlan_list, next) {
- if (tmp->cfg.vni == conf->vni &&
- (tmp->default_dst.remote_ip.sa.sa_family == AF_INET6 ||
- tmp->cfg.saddr.sa.sa_family == AF_INET6) == use_ipv6 &&
- tmp->cfg.dst_port == vxlan->cfg.dst_port &&
- (tmp->flags & VXLAN_F_RCV_FLAGS) ==
- (vxlan->flags & VXLAN_F_RCV_FLAGS)) {
- pr_info("duplicate VNI %u\n", be32_to_cpu(conf->vni));
- return -EEXIST;
- }
- }
+ vxlan_config_apply(dev, conf, lowerdev, changelink);
return 0;
}
@@ -3071,22 +3150,35 @@ static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[],
}
if (data[IFLA_VXLAN_GROUP]) {
+ if (changelink && (conf->remote_ip.sa.sa_family != AF_INET))
+ return -EOPNOTSUPP;
+
conf->remote_ip.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_GROUP]);
+ conf->remote_ip.sa.sa_family = AF_INET;
} else if (data[IFLA_VXLAN_GROUP6]) {
if (!IS_ENABLED(CONFIG_IPV6))
return -EPFNOSUPPORT;
+ if (changelink && (conf->remote_ip.sa.sa_family != AF_INET6))
+ return -EOPNOTSUPP;
+
conf->remote_ip.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_GROUP6]);
conf->remote_ip.sa.sa_family = AF_INET6;
}
if (data[IFLA_VXLAN_LOCAL]) {
+ if (changelink && (conf->saddr.sa.sa_family != AF_INET))
+ return -EOPNOTSUPP;
+
conf->saddr.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_LOCAL]);
conf->saddr.sa.sa_family = AF_INET;
} else if (data[IFLA_VXLAN_LOCAL6]) {
if (!IS_ENABLED(CONFIG_IPV6))
return -EPFNOSUPPORT;
+ if (changelink && (conf->saddr.sa.sa_family != AF_INET6))
+ return -EOPNOTSUPP;
+
/* TODO: respect scope id */
conf->saddr.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_LOCAL6]);
conf->saddr.sa.sa_family = AF_INET6;
@@ -3106,12 +3198,10 @@ static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[],
IPV6_FLOWLABEL_MASK;
if (data[IFLA_VXLAN_LEARNING]) {
- if (nla_get_u8(data[IFLA_VXLAN_LEARNING])) {
+ if (nla_get_u8(data[IFLA_VXLAN_LEARNING]))
conf->flags |= VXLAN_F_LEARN;
- } else {
+ else
conf->flags &= ~VXLAN_F_LEARN;
- vxlan->flags &= ~VXLAN_F_LEARN;
- }
} else if (!changelink) {
/* default to learn on a new device */
conf->flags |= VXLAN_F_LEARN;
@@ -3244,7 +3334,8 @@ static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[],
}
static int vxlan_newlink(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[])
+ struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
{
struct vxlan_config conf;
int err;
@@ -3257,7 +3348,8 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev,
}
static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
- struct nlattr *data[])
+ struct nlattr *data[],
+ struct netlink_ext_ack *extack)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_rdst *dst = &vxlan->default_dst;
@@ -3394,43 +3486,44 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->cfg.tos) ||
nla_put_be32(skb, IFLA_VXLAN_LABEL, vxlan->cfg.label) ||
nla_put_u8(skb, IFLA_VXLAN_LEARNING,
- !!(vxlan->flags & VXLAN_F_LEARN)) ||
+ !!(vxlan->cfg.flags & VXLAN_F_LEARN)) ||
nla_put_u8(skb, IFLA_VXLAN_PROXY,
- !!(vxlan->flags & VXLAN_F_PROXY)) ||
- nla_put_u8(skb, IFLA_VXLAN_RSC, !!(vxlan->flags & VXLAN_F_RSC)) ||
+ !!(vxlan->cfg.flags & VXLAN_F_PROXY)) ||
+ nla_put_u8(skb, IFLA_VXLAN_RSC,
+ !!(vxlan->cfg.flags & VXLAN_F_RSC)) ||
nla_put_u8(skb, IFLA_VXLAN_L2MISS,
- !!(vxlan->flags & VXLAN_F_L2MISS)) ||
+ !!(vxlan->cfg.flags & VXLAN_F_L2MISS)) ||
nla_put_u8(skb, IFLA_VXLAN_L3MISS,
- !!(vxlan->flags & VXLAN_F_L3MISS)) ||
+ !!(vxlan->cfg.flags & VXLAN_F_L3MISS)) ||
nla_put_u8(skb, IFLA_VXLAN_COLLECT_METADATA,
- !!(vxlan->flags & VXLAN_F_COLLECT_METADATA)) ||
+ !!(vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA)) ||
nla_put_u32(skb, IFLA_VXLAN_AGEING, vxlan->cfg.age_interval) ||
nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->cfg.addrmax) ||
nla_put_be16(skb, IFLA_VXLAN_PORT, vxlan->cfg.dst_port) ||
nla_put_u8(skb, IFLA_VXLAN_UDP_CSUM,
- !(vxlan->flags & VXLAN_F_UDP_ZERO_CSUM_TX)) ||
+ !(vxlan->cfg.flags & VXLAN_F_UDP_ZERO_CSUM_TX)) ||
nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_TX,
- !!(vxlan->flags & VXLAN_F_UDP_ZERO_CSUM6_TX)) ||
+ !!(vxlan->cfg.flags & VXLAN_F_UDP_ZERO_CSUM6_TX)) ||
nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_RX,
- !!(vxlan->flags & VXLAN_F_UDP_ZERO_CSUM6_RX)) ||
+ !!(vxlan->cfg.flags & VXLAN_F_UDP_ZERO_CSUM6_RX)) ||
nla_put_u8(skb, IFLA_VXLAN_REMCSUM_TX,
- !!(vxlan->flags & VXLAN_F_REMCSUM_TX)) ||
+ !!(vxlan->cfg.flags & VXLAN_F_REMCSUM_TX)) ||
nla_put_u8(skb, IFLA_VXLAN_REMCSUM_RX,
- !!(vxlan->flags & VXLAN_F_REMCSUM_RX)))
+ !!(vxlan->cfg.flags & VXLAN_F_REMCSUM_RX)))
goto nla_put_failure;
if (nla_put(skb, IFLA_VXLAN_PORT_RANGE, sizeof(ports), &ports))
goto nla_put_failure;
- if (vxlan->flags & VXLAN_F_GBP &&
+ if (vxlan->cfg.flags & VXLAN_F_GBP &&
nla_put_flag(skb, IFLA_VXLAN_GBP))
goto nla_put_failure;
- if (vxlan->flags & VXLAN_F_GPE &&
+ if (vxlan->cfg.flags & VXLAN_F_GPE &&
nla_put_flag(skb, IFLA_VXLAN_GPE))
goto nla_put_failure;
- if (vxlan->flags & VXLAN_F_REMCSUM_NOPARTIAL &&
+ if (vxlan->cfg.flags & VXLAN_F_REMCSUM_NOPARTIAL &&
nla_put_flag(skb, IFLA_VXLAN_REMCSUM_NOPARTIAL))
goto nla_put_failure;
diff --git a/drivers/net/wireless/ath/ath10k/bmi.h b/drivers/net/wireless/ath/ath10k/bmi.h
index 0342073ed397..9c0839b2ca8f 100644
--- a/drivers/net/wireless/ath/ath10k/bmi.h
+++ b/drivers/net/wireless/ath/ath10k/bmi.h
@@ -83,6 +83,8 @@ enum bmi_cmd_id {
#define BMI_NVRAM_SEG_NAME_SZ 16
#define BMI_PARAM_GET_EEPROM_BOARD_ID 0x10
+#define BMI_PARAM_GET_FLASH_BOARD_ID 0x8000
+#define BMI_PARAM_FLASH_SECTION_ALL 0x10000
#define ATH10K_BMI_BOARD_ID_FROM_OTP_MASK 0x7c00
#define ATH10K_BMI_BOARD_ID_FROM_OTP_LSB 10
@@ -188,8 +190,8 @@ struct bmi_target_info {
u32 type;
};
-/* in msec */
-#define BMI_COMMUNICATION_TIMEOUT_HZ (2 * HZ)
+/* in jiffies */
+#define BMI_COMMUNICATION_TIMEOUT_HZ (3 * HZ)
#define BMI_CE_NUM_TO_TARG 0
#define BMI_CE_NUM_TO_HOST 1
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index ee1090ca2eac..08b84c8c3614 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -59,205 +59,243 @@
* the buffer is sent/received.
*/
+static inline unsigned int
+ath10k_set_ring_byte(unsigned int offset,
+ struct ath10k_hw_ce_regs_addr_map *addr_map)
+{
+ return ((offset << addr_map->lsb) & addr_map->mask);
+}
+
+static inline unsigned int
+ath10k_get_ring_byte(unsigned int offset,
+ struct ath10k_hw_ce_regs_addr_map *addr_map)
+{
+ return ((offset & addr_map->mask) >> (addr_map->lsb));
+}
+
static inline void ath10k_ce_dest_ring_write_index_set(struct ath10k *ar,
u32 ce_ctrl_addr,
unsigned int n)
{
- ath10k_pci_write32(ar, ce_ctrl_addr + DST_WR_INDEX_ADDRESS, n);
+ ath10k_pci_write32(ar, ce_ctrl_addr +
+ ar->hw_ce_regs->dst_wr_index_addr, n);
}
static inline u32 ath10k_ce_dest_ring_write_index_get(struct ath10k *ar,
u32 ce_ctrl_addr)
{
- return ath10k_pci_read32(ar, ce_ctrl_addr + DST_WR_INDEX_ADDRESS);
+ return ath10k_pci_read32(ar, ce_ctrl_addr +
+ ar->hw_ce_regs->dst_wr_index_addr);
}
static inline void ath10k_ce_src_ring_write_index_set(struct ath10k *ar,
u32 ce_ctrl_addr,
unsigned int n)
{
- ath10k_pci_write32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS, n);
+ ath10k_pci_write32(ar, ce_ctrl_addr +
+ ar->hw_ce_regs->sr_wr_index_addr, n);
}
static inline u32 ath10k_ce_src_ring_write_index_get(struct ath10k *ar,
u32 ce_ctrl_addr)
{
- return ath10k_pci_read32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS);
+ return ath10k_pci_read32(ar, ce_ctrl_addr +
+ ar->hw_ce_regs->sr_wr_index_addr);
}
static inline u32 ath10k_ce_src_ring_read_index_get(struct ath10k *ar,
u32 ce_ctrl_addr)
{
- return ath10k_pci_read32(ar, ce_ctrl_addr + CURRENT_SRRI_ADDRESS);
+ return ath10k_pci_read32(ar, ce_ctrl_addr +
+ ar->hw_ce_regs->current_srri_addr);
}
static inline void ath10k_ce_src_ring_base_addr_set(struct ath10k *ar,
u32 ce_ctrl_addr,
unsigned int addr)
{
- ath10k_pci_write32(ar, ce_ctrl_addr + SR_BA_ADDRESS, addr);
+ ath10k_pci_write32(ar, ce_ctrl_addr +
+ ar->hw_ce_regs->sr_base_addr, addr);
}
static inline void ath10k_ce_src_ring_size_set(struct ath10k *ar,
u32 ce_ctrl_addr,
unsigned int n)
{
- ath10k_pci_write32(ar, ce_ctrl_addr + SR_SIZE_ADDRESS, n);
+ ath10k_pci_write32(ar, ce_ctrl_addr +
+ ar->hw_ce_regs->sr_size_addr, n);
}
static inline void ath10k_ce_src_ring_dmax_set(struct ath10k *ar,
u32 ce_ctrl_addr,
unsigned int n)
{
- u32 ctrl1_addr = ath10k_pci_read32((ar),
- (ce_ctrl_addr) + CE_CTRL1_ADDRESS);
+ struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs;
+ u32 ctrl1_addr = ath10k_pci_read32(ar,
+ ce_ctrl_addr + ctrl_regs->addr);
- ath10k_pci_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS,
- (ctrl1_addr & ~CE_CTRL1_DMAX_LENGTH_MASK) |
- CE_CTRL1_DMAX_LENGTH_SET(n));
+ ath10k_pci_write32(ar, ce_ctrl_addr + ctrl_regs->addr,
+ (ctrl1_addr & ~(ctrl_regs->dmax->mask)) |
+ ath10k_set_ring_byte(n, ctrl_regs->dmax));
}
static inline void ath10k_ce_src_ring_byte_swap_set(struct ath10k *ar,
u32 ce_ctrl_addr,
unsigned int n)
{
- u32 ctrl1_addr = ath10k_pci_read32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS);
+ struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs;
+ u32 ctrl1_addr = ath10k_pci_read32(ar, ce_ctrl_addr + ctrl_regs->addr);
- ath10k_pci_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS,
- (ctrl1_addr & ~CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK) |
- CE_CTRL1_SRC_RING_BYTE_SWAP_EN_SET(n));
+ ath10k_pci_write32(ar, ce_ctrl_addr + ctrl_regs->addr,
+ (ctrl1_addr & ~(ctrl_regs->src_ring->mask)) |
+ ath10k_set_ring_byte(n, ctrl_regs->src_ring));
}
static inline void ath10k_ce_dest_ring_byte_swap_set(struct ath10k *ar,
u32 ce_ctrl_addr,
unsigned int n)
{
- u32 ctrl1_addr = ath10k_pci_read32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS);
+ struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs;
+ u32 ctrl1_addr = ath10k_pci_read32(ar, ce_ctrl_addr + ctrl_regs->addr);
- ath10k_pci_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS,
- (ctrl1_addr & ~CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK) |
- CE_CTRL1_DST_RING_BYTE_SWAP_EN_SET(n));
+ ath10k_pci_write32(ar, ce_ctrl_addr + ctrl_regs->addr,
+ (ctrl1_addr & ~(ctrl_regs->dst_ring->mask)) |
+ ath10k_set_ring_byte(n, ctrl_regs->dst_ring));
}
static inline u32 ath10k_ce_dest_ring_read_index_get(struct ath10k *ar,
u32 ce_ctrl_addr)
{
- return ath10k_pci_read32(ar, ce_ctrl_addr + CURRENT_DRRI_ADDRESS);
+ return ath10k_pci_read32(ar, ce_ctrl_addr +
+ ar->hw_ce_regs->current_drri_addr);
}
static inline void ath10k_ce_dest_ring_base_addr_set(struct ath10k *ar,
u32 ce_ctrl_addr,
u32 addr)
{
- ath10k_pci_write32(ar, ce_ctrl_addr + DR_BA_ADDRESS, addr);
+ ath10k_pci_write32(ar, ce_ctrl_addr +
+ ar->hw_ce_regs->dr_base_addr, addr);
}
static inline void ath10k_ce_dest_ring_size_set(struct ath10k *ar,
u32 ce_ctrl_addr,
unsigned int n)
{
- ath10k_pci_write32(ar, ce_ctrl_addr + DR_SIZE_ADDRESS, n);
+ ath10k_pci_write32(ar, ce_ctrl_addr +
+ ar->hw_ce_regs->dr_size_addr, n);
}
static inline void ath10k_ce_src_ring_highmark_set(struct ath10k *ar,
u32 ce_ctrl_addr,
unsigned int n)
{
- u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS);
+ struct ath10k_hw_ce_dst_src_wm_regs *srcr_wm = ar->hw_ce_regs->wm_srcr;
+ u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + srcr_wm->addr);
- ath10k_pci_write32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS,
- (addr & ~SRC_WATERMARK_HIGH_MASK) |
- SRC_WATERMARK_HIGH_SET(n));
+ ath10k_pci_write32(ar, ce_ctrl_addr + srcr_wm->addr,
+ (addr & ~(srcr_wm->wm_high->mask)) |
+ (ath10k_set_ring_byte(n, srcr_wm->wm_high)));
}
static inline void ath10k_ce_src_ring_lowmark_set(struct ath10k *ar,
u32 ce_ctrl_addr,
unsigned int n)
{
- u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS);
+ struct ath10k_hw_ce_dst_src_wm_regs *srcr_wm = ar->hw_ce_regs->wm_srcr;
+ u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + srcr_wm->addr);
- ath10k_pci_write32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS,
- (addr & ~SRC_WATERMARK_LOW_MASK) |
- SRC_WATERMARK_LOW_SET(n));
+ ath10k_pci_write32(ar, ce_ctrl_addr + srcr_wm->addr,
+ (addr & ~(srcr_wm->wm_low->mask)) |
+ (ath10k_set_ring_byte(n, srcr_wm->wm_low)));
}
static inline void ath10k_ce_dest_ring_highmark_set(struct ath10k *ar,
u32 ce_ctrl_addr,
unsigned int n)
{
- u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS);
+ struct ath10k_hw_ce_dst_src_wm_regs *dstr_wm = ar->hw_ce_regs->wm_dstr;
+ u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + dstr_wm->addr);
- ath10k_pci_write32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS,
- (addr & ~DST_WATERMARK_HIGH_MASK) |
- DST_WATERMARK_HIGH_SET(n));
+ ath10k_pci_write32(ar, ce_ctrl_addr + dstr_wm->addr,
+ (addr & ~(dstr_wm->wm_high->mask)) |
+ (ath10k_set_ring_byte(n, dstr_wm->wm_high)));
}
static inline void ath10k_ce_dest_ring_lowmark_set(struct ath10k *ar,
u32 ce_ctrl_addr,
unsigned int n)
{
- u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS);
+ struct ath10k_hw_ce_dst_src_wm_regs *dstr_wm = ar->hw_ce_regs->wm_dstr;
+ u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + dstr_wm->addr);
- ath10k_pci_write32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS,
- (addr & ~DST_WATERMARK_LOW_MASK) |
- DST_WATERMARK_LOW_SET(n));
+ ath10k_pci_write32(ar, ce_ctrl_addr + dstr_wm->addr,
+ (addr & ~(dstr_wm->wm_low->mask)) |
+ (ath10k_set_ring_byte(n, dstr_wm->wm_low)));
}
static inline void ath10k_ce_copy_complete_inter_enable(struct ath10k *ar,
u32 ce_ctrl_addr)
{
- u32 host_ie_addr = ath10k_pci_read32(ar,
- ce_ctrl_addr + HOST_IE_ADDRESS);
+ struct ath10k_hw_ce_host_ie *host_ie = ar->hw_ce_regs->host_ie;
+ u32 host_ie_addr = ath10k_pci_read32(ar, ce_ctrl_addr +
+ ar->hw_ce_regs->host_ie_addr);
- ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS,
- host_ie_addr | HOST_IE_COPY_COMPLETE_MASK);
+ ath10k_pci_write32(ar, ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr,
+ host_ie_addr | host_ie->copy_complete->mask);
}
static inline void ath10k_ce_copy_complete_intr_disable(struct ath10k *ar,
u32 ce_ctrl_addr)
{
- u32 host_ie_addr = ath10k_pci_read32(ar,
- ce_ctrl_addr + HOST_IE_ADDRESS);
+ struct ath10k_hw_ce_host_ie *host_ie = ar->hw_ce_regs->host_ie;
+ u32 host_ie_addr = ath10k_pci_read32(ar, ce_ctrl_addr +
+ ar->hw_ce_regs->host_ie_addr);
- ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS,
- host_ie_addr & ~HOST_IE_COPY_COMPLETE_MASK);
+ ath10k_pci_write32(ar, ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr,
+ host_ie_addr & ~(host_ie->copy_complete->mask));
}
static inline void ath10k_ce_watermark_intr_disable(struct ath10k *ar,
u32 ce_ctrl_addr)
{
- u32 host_ie_addr = ath10k_pci_read32(ar,
- ce_ctrl_addr + HOST_IE_ADDRESS);
+ struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs;
+ u32 host_ie_addr = ath10k_pci_read32(ar, ce_ctrl_addr +
+ ar->hw_ce_regs->host_ie_addr);
- ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS,
- host_ie_addr & ~CE_WATERMARK_MASK);
+ ath10k_pci_write32(ar, ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr,
+ host_ie_addr & ~(wm_regs->wm_mask));
}
static inline void ath10k_ce_error_intr_enable(struct ath10k *ar,
u32 ce_ctrl_addr)
{
- u32 misc_ie_addr = ath10k_pci_read32(ar,
- ce_ctrl_addr + MISC_IE_ADDRESS);
+ struct ath10k_hw_ce_misc_regs *misc_regs = ar->hw_ce_regs->misc_regs;
+ u32 misc_ie_addr = ath10k_pci_read32(ar, ce_ctrl_addr +
+ ar->hw_ce_regs->misc_ie_addr);
- ath10k_pci_write32(ar, ce_ctrl_addr + MISC_IE_ADDRESS,
- misc_ie_addr | CE_ERROR_MASK);
+ ath10k_pci_write32(ar, ce_ctrl_addr + ar->hw_ce_regs->misc_ie_addr,
+ misc_ie_addr | misc_regs->err_mask);
}
static inline void ath10k_ce_error_intr_disable(struct ath10k *ar,
u32 ce_ctrl_addr)
{
- u32 misc_ie_addr = ath10k_pci_read32(ar,
- ce_ctrl_addr + MISC_IE_ADDRESS);
+ struct ath10k_hw_ce_misc_regs *misc_regs = ar->hw_ce_regs->misc_regs;
+ u32 misc_ie_addr = ath10k_pci_read32(ar, ce_ctrl_addr +
+ ar->hw_ce_regs->misc_ie_addr);
- ath10k_pci_write32(ar, ce_ctrl_addr + MISC_IE_ADDRESS,
- misc_ie_addr & ~CE_ERROR_MASK);
+ ath10k_pci_write32(ar, ce_ctrl_addr + ar->hw_ce_regs->misc_ie_addr,
+ misc_ie_addr & ~(misc_regs->err_mask));
}
static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar,
u32 ce_ctrl_addr,
unsigned int mask)
{
- ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IS_ADDRESS, mask);
+ struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs;
+
+ ath10k_pci_write32(ar, ce_ctrl_addr + wm_regs->addr, mask);
}
/*
@@ -594,6 +632,7 @@ int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
unsigned int nentries_mask = src_ring->nentries_mask;
unsigned int sw_index = src_ring->sw_index;
unsigned int read_index;
+ struct ce_desc *desc;
if (src_ring->hw_index == sw_index) {
/*
@@ -623,6 +662,9 @@ int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
/* sanity */
src_ring->per_transfer_context[sw_index] = NULL;
+ desc = CE_SRC_RING_TO_DESC(src_ring->base_addr_owner_space,
+ sw_index);
+ desc->nbytes = 0;
/* Update sw_index */
sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
@@ -715,13 +757,13 @@ void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
+ struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs;
u32 ctrl_addr = ce_state->ctrl_addr;
spin_lock_bh(&ar_pci->ce_lock);
/* Clear the copy-complete interrupts that will be handled here. */
- ath10k_ce_engine_int_status_clear(ar, ctrl_addr,
- HOST_IS_COPY_COMPLETE_MASK);
+ ath10k_ce_engine_int_status_clear(ar, ctrl_addr, wm_regs->cc_mask);
spin_unlock_bh(&ar_pci->ce_lock);
@@ -737,7 +779,7 @@ void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id)
* Misc CE interrupts are not being handled, but still need
* to be cleared.
*/
- ath10k_ce_engine_int_status_clear(ar, ctrl_addr, CE_WATERMARK_MASK);
+ ath10k_ce_engine_int_status_clear(ar, ctrl_addr, wm_regs->wm_mask);
spin_unlock_bh(&ar_pci->ce_lock);
}
diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h
index e76a98242b98..95743a57525d 100644
--- a/drivers/net/wireless/ath/ath10k/ce.h
+++ b/drivers/net/wireless/ath/ath10k/ce.h
@@ -263,143 +263,11 @@ struct ce_attr {
void (*recv_cb)(struct ath10k_ce_pipe *);
};
-#define SR_BA_ADDRESS 0x0000
-#define SR_SIZE_ADDRESS 0x0004
-#define DR_BA_ADDRESS 0x0008
-#define DR_SIZE_ADDRESS 0x000c
-#define CE_CMD_ADDRESS 0x0018
-
-#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_MSB 17
-#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB 17
-#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK 0x00020000
-#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_SET(x) \
- (((0 | (x)) << CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB) & \
- CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK)
-
-#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MSB 16
-#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB 16
-#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK 0x00010000
-#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_GET(x) \
- (((x) & CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK) >> \
- CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB)
-#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_SET(x) \
- (((0 | (x)) << CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB) & \
- CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK)
-
-#define CE_CTRL1_DMAX_LENGTH_MSB 15
-#define CE_CTRL1_DMAX_LENGTH_LSB 0
-#define CE_CTRL1_DMAX_LENGTH_MASK 0x0000ffff
-#define CE_CTRL1_DMAX_LENGTH_GET(x) \
- (((x) & CE_CTRL1_DMAX_LENGTH_MASK) >> CE_CTRL1_DMAX_LENGTH_LSB)
-#define CE_CTRL1_DMAX_LENGTH_SET(x) \
- (((0 | (x)) << CE_CTRL1_DMAX_LENGTH_LSB) & CE_CTRL1_DMAX_LENGTH_MASK)
-
-#define CE_CTRL1_ADDRESS 0x0010
-#define CE_CTRL1_HW_MASK 0x0007ffff
-#define CE_CTRL1_SW_MASK 0x0007ffff
-#define CE_CTRL1_HW_WRITE_MASK 0x00000000
-#define CE_CTRL1_SW_WRITE_MASK 0x0007ffff
-#define CE_CTRL1_RSTMASK 0xffffffff
-#define CE_CTRL1_RESET 0x00000080
-
-#define CE_CMD_HALT_STATUS_MSB 3
-#define CE_CMD_HALT_STATUS_LSB 3
-#define CE_CMD_HALT_STATUS_MASK 0x00000008
-#define CE_CMD_HALT_STATUS_GET(x) \
- (((x) & CE_CMD_HALT_STATUS_MASK) >> CE_CMD_HALT_STATUS_LSB)
-#define CE_CMD_HALT_STATUS_SET(x) \
- (((0 | (x)) << CE_CMD_HALT_STATUS_LSB) & CE_CMD_HALT_STATUS_MASK)
-#define CE_CMD_HALT_STATUS_RESET 0
-#define CE_CMD_HALT_MSB 0
-#define CE_CMD_HALT_MASK 0x00000001
-
-#define HOST_IE_COPY_COMPLETE_MSB 0
-#define HOST_IE_COPY_COMPLETE_LSB 0
-#define HOST_IE_COPY_COMPLETE_MASK 0x00000001
-#define HOST_IE_COPY_COMPLETE_GET(x) \
- (((x) & HOST_IE_COPY_COMPLETE_MASK) >> HOST_IE_COPY_COMPLETE_LSB)
-#define HOST_IE_COPY_COMPLETE_SET(x) \
- (((0 | (x)) << HOST_IE_COPY_COMPLETE_LSB) & HOST_IE_COPY_COMPLETE_MASK)
-#define HOST_IE_COPY_COMPLETE_RESET 0
-#define HOST_IE_ADDRESS 0x002c
-
-#define HOST_IS_DST_RING_LOW_WATERMARK_MASK 0x00000010
-#define HOST_IS_DST_RING_HIGH_WATERMARK_MASK 0x00000008
-#define HOST_IS_SRC_RING_LOW_WATERMARK_MASK 0x00000004
-#define HOST_IS_SRC_RING_HIGH_WATERMARK_MASK 0x00000002
-#define HOST_IS_COPY_COMPLETE_MASK 0x00000001
-#define HOST_IS_ADDRESS 0x0030
-
-#define MISC_IE_ADDRESS 0x0034
-
-#define MISC_IS_AXI_ERR_MASK 0x00000400
-
-#define MISC_IS_DST_ADDR_ERR_MASK 0x00000200
-#define MISC_IS_SRC_LEN_ERR_MASK 0x00000100
-#define MISC_IS_DST_MAX_LEN_VIO_MASK 0x00000080
-#define MISC_IS_DST_RING_OVERFLOW_MASK 0x00000040
-#define MISC_IS_SRC_RING_OVERFLOW_MASK 0x00000020
-
-#define MISC_IS_ADDRESS 0x0038
-
-#define SR_WR_INDEX_ADDRESS 0x003c
-
-#define DST_WR_INDEX_ADDRESS 0x0040
-
-#define CURRENT_SRRI_ADDRESS 0x0044
-
-#define CURRENT_DRRI_ADDRESS 0x0048
-
-#define SRC_WATERMARK_LOW_MSB 31
-#define SRC_WATERMARK_LOW_LSB 16
-#define SRC_WATERMARK_LOW_MASK 0xffff0000
-#define SRC_WATERMARK_LOW_GET(x) \
- (((x) & SRC_WATERMARK_LOW_MASK) >> SRC_WATERMARK_LOW_LSB)
-#define SRC_WATERMARK_LOW_SET(x) \
- (((0 | (x)) << SRC_WATERMARK_LOW_LSB) & SRC_WATERMARK_LOW_MASK)
-#define SRC_WATERMARK_LOW_RESET 0
-#define SRC_WATERMARK_HIGH_MSB 15
-#define SRC_WATERMARK_HIGH_LSB 0
-#define SRC_WATERMARK_HIGH_MASK 0x0000ffff
-#define SRC_WATERMARK_HIGH_GET(x) \
- (((x) & SRC_WATERMARK_HIGH_MASK) >> SRC_WATERMARK_HIGH_LSB)
-#define SRC_WATERMARK_HIGH_SET(x) \
- (((0 | (x)) << SRC_WATERMARK_HIGH_LSB) & SRC_WATERMARK_HIGH_MASK)
-#define SRC_WATERMARK_HIGH_RESET 0
-#define SRC_WATERMARK_ADDRESS 0x004c
-
-#define DST_WATERMARK_LOW_LSB 16
-#define DST_WATERMARK_LOW_MASK 0xffff0000
-#define DST_WATERMARK_LOW_SET(x) \
- (((0 | (x)) << DST_WATERMARK_LOW_LSB) & DST_WATERMARK_LOW_MASK)
-#define DST_WATERMARK_LOW_RESET 0
-#define DST_WATERMARK_HIGH_MSB 15
-#define DST_WATERMARK_HIGH_LSB 0
-#define DST_WATERMARK_HIGH_MASK 0x0000ffff
-#define DST_WATERMARK_HIGH_GET(x) \
- (((x) & DST_WATERMARK_HIGH_MASK) >> DST_WATERMARK_HIGH_LSB)
-#define DST_WATERMARK_HIGH_SET(x) \
- (((0 | (x)) << DST_WATERMARK_HIGH_LSB) & DST_WATERMARK_HIGH_MASK)
-#define DST_WATERMARK_HIGH_RESET 0
-#define DST_WATERMARK_ADDRESS 0x0050
-
static inline u32 ath10k_ce_base_address(struct ath10k *ar, unsigned int ce_id)
{
return CE0_BASE_ADDRESS + (CE1_BASE_ADDRESS - CE0_BASE_ADDRESS) * ce_id;
}
-#define CE_WATERMARK_MASK (HOST_IS_SRC_RING_LOW_WATERMARK_MASK | \
- HOST_IS_SRC_RING_HIGH_WATERMARK_MASK | \
- HOST_IS_DST_RING_LOW_WATERMARK_MASK | \
- HOST_IS_DST_RING_HIGH_WATERMARK_MASK)
-
-#define CE_ERROR_MASK (MISC_IS_AXI_ERR_MASK | \
- MISC_IS_DST_ADDR_ERR_MASK | \
- MISC_IS_SRC_LEN_ERR_MASK | \
- MISC_IS_DST_MAX_LEN_VIO_MASK | \
- MISC_IS_DST_RING_OVERFLOW_MASK | \
- MISC_IS_SRC_RING_OVERFLOW_MASK)
-
#define CE_SRC_RING_TO_DESC(baddr, idx) \
(&(((struct ce_desc *)baddr)[idx]))
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index eea111d704c5..75c5c903c8a6 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -72,6 +72,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_ops = &qca988x_ops,
.decap_align_bytes = 4,
.spectral_bin_discard = 0,
+ .vht160_mcs_rx_highest = 0,
+ .vht160_mcs_tx_highest = 0,
},
{
.id = QCA9887_HW_1_0_VERSION,
@@ -93,6 +95,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_ops = &qca988x_ops,
.decap_align_bytes = 4,
.spectral_bin_discard = 0,
+ .vht160_mcs_rx_highest = 0,
+ .vht160_mcs_tx_highest = 0,
},
{
.id = QCA6174_HW_2_1_VERSION,
@@ -113,6 +117,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_ops = &qca988x_ops,
.decap_align_bytes = 4,
.spectral_bin_discard = 0,
+ .vht160_mcs_rx_highest = 0,
+ .vht160_mcs_tx_highest = 0,
},
{
.id = QCA6174_HW_2_1_VERSION,
@@ -133,6 +139,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_ops = &qca988x_ops,
.decap_align_bytes = 4,
.spectral_bin_discard = 0,
+ .vht160_mcs_rx_highest = 0,
+ .vht160_mcs_tx_highest = 0,
},
{
.id = QCA6174_HW_3_0_VERSION,
@@ -153,6 +161,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_ops = &qca988x_ops,
.decap_align_bytes = 4,
.spectral_bin_discard = 0,
+ .vht160_mcs_rx_highest = 0,
+ .vht160_mcs_tx_highest = 0,
},
{
.id = QCA6174_HW_3_2_VERSION,
@@ -176,6 +186,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.target_cpu_freq = 176000000,
.decap_align_bytes = 4,
.spectral_bin_discard = 0,
+ .vht160_mcs_rx_highest = 0,
+ .vht160_mcs_tx_highest = 0,
},
{
.id = QCA99X0_HW_2_0_DEV_VERSION,
@@ -202,6 +214,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_ops = &qca99x0_ops,
.decap_align_bytes = 1,
.spectral_bin_discard = 4,
+ .vht160_mcs_rx_highest = 0,
+ .vht160_mcs_tx_highest = 0,
},
{
.id = QCA9984_HW_1_0_DEV_VERSION,
@@ -229,6 +243,12 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_ops = &qca99x0_ops,
.decap_align_bytes = 1,
.spectral_bin_discard = 12,
+
+ /* Can do only 2x2 VHT160 or 80+80. 1560Mbps is 4x4 80Mhz
+ * or 2x2 160Mhz, long-guard-interval.
+ */
+ .vht160_mcs_rx_highest = 1560,
+ .vht160_mcs_tx_highest = 1560,
},
{
.id = QCA9888_HW_2_0_DEV_VERSION,
@@ -255,6 +275,12 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_ops = &qca99x0_ops,
.decap_align_bytes = 1,
.spectral_bin_discard = 12,
+
+ /* Can do only 1x1 VHT160 or 80+80. 780Mbps is 2x2 80Mhz or
+ * 1x1 160Mhz, long-guard-interval.
+ */
+ .vht160_mcs_rx_highest = 780,
+ .vht160_mcs_tx_highest = 780,
},
{
.id = QCA9377_HW_1_0_DEV_VERSION,
@@ -275,6 +301,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_ops = &qca988x_ops,
.decap_align_bytes = 4,
.spectral_bin_discard = 0,
+ .vht160_mcs_rx_highest = 0,
+ .vht160_mcs_tx_highest = 0,
},
{
.id = QCA9377_HW_1_1_DEV_VERSION,
@@ -297,6 +325,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.target_cpu_freq = 176000000,
.decap_align_bytes = 4,
.spectral_bin_discard = 0,
+ .vht160_mcs_rx_highest = 0,
+ .vht160_mcs_tx_highest = 0,
},
{
.id = QCA4019_HW_1_0_DEV_VERSION,
@@ -324,6 +354,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_ops = &qca99x0_ops,
.decap_align_bytes = 1,
.spectral_bin_discard = 4,
+ .vht160_mcs_rx_highest = 0,
+ .vht160_mcs_tx_highest = 0,
},
};
@@ -691,7 +723,7 @@ static int ath10k_core_get_board_id_from_otp(struct ath10k *ar)
{
u32 result, address;
u8 board_id, chip_id;
- int ret;
+ int ret, bmi_board_id_param;
address = ar->hw_params.patch_load_addr;
@@ -715,8 +747,13 @@ static int ath10k_core_get_board_id_from_otp(struct ath10k *ar)
return ret;
}
- ret = ath10k_bmi_execute(ar, address, BMI_PARAM_GET_EEPROM_BOARD_ID,
- &result);
+ if (ar->cal_mode == ATH10K_PRE_CAL_MODE_DT ||
+ ar->cal_mode == ATH10K_PRE_CAL_MODE_FILE)
+ bmi_board_id_param = BMI_PARAM_GET_FLASH_BOARD_ID;
+ else
+ bmi_board_id_param = BMI_PARAM_GET_EEPROM_BOARD_ID;
+
+ ret = ath10k_bmi_execute(ar, address, bmi_board_id_param, &result);
if (ret) {
ath10k_err(ar, "could not execute otp for board id check: %d\n",
ret);
@@ -845,6 +882,11 @@ static int ath10k_download_and_run_otp(struct ath10k *ar)
return ret;
}
+ /* As of now pre-cal is valid for 10_4 variants */
+ if (ar->cal_mode == ATH10K_PRE_CAL_MODE_DT ||
+ ar->cal_mode == ATH10K_PRE_CAL_MODE_FILE)
+ bmi_otp_exe_param = BMI_PARAM_FLASH_SECTION_ALL;
+
ret = ath10k_bmi_execute(ar, address, bmi_otp_exe_param, &result);
if (ret) {
ath10k_err(ar, "could not execute otp (%d)\n", ret);
@@ -2449,24 +2491,29 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
case ATH10K_HW_QCA988X:
case ATH10K_HW_QCA9887:
ar->regs = &qca988x_regs;
+ ar->hw_ce_regs = &qcax_ce_regs;
ar->hw_values = &qca988x_values;
break;
case ATH10K_HW_QCA6174:
case ATH10K_HW_QCA9377:
ar->regs = &qca6174_regs;
+ ar->hw_ce_regs = &qcax_ce_regs;
ar->hw_values = &qca6174_values;
break;
case ATH10K_HW_QCA99X0:
case ATH10K_HW_QCA9984:
ar->regs = &qca99x0_regs;
+ ar->hw_ce_regs = &qcax_ce_regs;
ar->hw_values = &qca99x0_values;
break;
case ATH10K_HW_QCA9888:
ar->regs = &qca99x0_regs;
+ ar->hw_ce_regs = &qcax_ce_regs;
ar->hw_values = &qca9888_values;
break;
case ATH10K_HW_QCA4019:
ar->regs = &qca4019_regs;
+ ar->hw_ce_regs = &qcax_ce_regs;
ar->hw_values = &qca4019_values;
break;
default:
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index 8fc08a5043db..1aa5cf12fce0 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -794,6 +794,7 @@ struct ath10k {
struct completion target_suspend;
const struct ath10k_hw_regs *regs;
+ const struct ath10k_hw_ce_regs *hw_ce_regs;
const struct ath10k_hw_values *hw_values;
struct ath10k_bmi bmi;
struct ath10k_wmi wmi;
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index 84b6067ff6e7..398dda978d6e 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -829,6 +829,19 @@ static void ath10k_htt_rx_h_signal(struct ath10k *ar,
struct ieee80211_rx_status *status,
struct htt_rx_desc *rxd)
{
+ int i;
+
+ for (i = 0; i < IEEE80211_MAX_CHAINS ; i++) {
+ status->chains &= ~BIT(i);
+
+ if (rxd->ppdu_start.rssi_chains[i].pri20_mhz != 0x80) {
+ status->chain_signal[i] = ATH10K_DEFAULT_NOISE_FLOOR +
+ rxd->ppdu_start.rssi_chains[i].pri20_mhz;
+
+ status->chains |= BIT(i);
+ }
+ }
+
/* FIXME: Get real NF */
status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
rxd->ppdu_start.rssi_comb;
@@ -2229,9 +2242,15 @@ ath10k_update_per_peer_tx_stats(struct ath10k *ar,
txrate.mcs = ATH10K_HW_MCS_RATE(peer_stats->ratecode);
sgi = ATH10K_HW_GI(peer_stats->flags);
- if (((txrate.flags == WMI_RATE_PREAMBLE_HT) ||
- (txrate.flags == WMI_RATE_PREAMBLE_VHT)) && txrate.mcs > 9) {
- ath10k_warn(ar, "Invalid mcs %hhd peer stats", txrate.mcs);
+ if (txrate.flags == WMI_RATE_PREAMBLE_VHT && txrate.mcs > 9) {
+ ath10k_warn(ar, "Invalid VHT mcs %hhd peer stats", txrate.mcs);
+ return;
+ }
+
+ if (txrate.flags == WMI_RATE_PREAMBLE_HT &&
+ (txrate.mcs > 7 || txrate.nss < 1)) {
+ ath10k_warn(ar, "Invalid HT mcs %hhd nss %hhd peer stats",
+ txrate.mcs, txrate.nss);
return;
}
@@ -2254,7 +2273,7 @@ ath10k_update_per_peer_tx_stats(struct ath10k *ar,
arsta->txrate.legacy = rate;
} else if (txrate.flags == WMI_RATE_PREAMBLE_HT) {
arsta->txrate.flags = RATE_INFO_FLAGS_MCS;
- arsta->txrate.mcs = txrate.mcs;
+ arsta->txrate.mcs = txrate.mcs + 8 * (txrate.nss - 1);
} else {
arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
arsta->txrate.mcs = txrate.mcs;
diff --git a/drivers/net/wireless/ath/ath10k/hw.c b/drivers/net/wireless/ath/ath10k/hw.c
index c866ab524571..afb0c01cbb55 100644
--- a/drivers/net/wireless/ath/ath10k/hw.c
+++ b/drivers/net/wireless/ath/ath10k/hw.c
@@ -15,6 +15,7 @@
*/
#include <linux/types.h>
+#include <linux/bitops.h>
#include "core.h"
#include "hw.h"
#include "hif.h"
@@ -191,6 +192,142 @@ const struct ath10k_hw_values qca4019_values = {
.ce_desc_meta_data_lsb = 4,
};
+static struct ath10k_hw_ce_regs_addr_map qcax_src_ring = {
+ .msb = 0x00000010,
+ .lsb = 0x00000010,
+ .mask = GENMASK(16, 16),
+};
+
+static struct ath10k_hw_ce_regs_addr_map qcax_dst_ring = {
+ .msb = 0x00000011,
+ .lsb = 0x00000011,
+ .mask = GENMASK(17, 17),
+};
+
+static struct ath10k_hw_ce_regs_addr_map qcax_dmax = {
+ .msb = 0x0000000f,
+ .lsb = 0x00000000,
+ .mask = GENMASK(15, 0),
+};
+
+static struct ath10k_hw_ce_ctrl1 qcax_ctrl1 = {
+ .addr = 0x00000010,
+ .hw_mask = 0x0007ffff,
+ .sw_mask = 0x0007ffff,
+ .hw_wr_mask = 0x00000000,
+ .sw_wr_mask = 0x0007ffff,
+ .reset_mask = 0xffffffff,
+ .reset = 0x00000080,
+ .src_ring = &qcax_src_ring,
+ .dst_ring = &qcax_dst_ring,
+ .dmax = &qcax_dmax,
+};
+
+static struct ath10k_hw_ce_regs_addr_map qcax_cmd_halt_status = {
+ .msb = 0x00000003,
+ .lsb = 0x00000003,
+ .mask = GENMASK(3, 3),
+};
+
+static struct ath10k_hw_ce_cmd_halt qcax_cmd_halt = {
+ .msb = 0x00000000,
+ .mask = GENMASK(0, 0),
+ .status_reset = 0x00000000,
+ .status = &qcax_cmd_halt_status,
+};
+
+static struct ath10k_hw_ce_regs_addr_map qcax_host_ie_cc = {
+ .msb = 0x00000000,
+ .lsb = 0x00000000,
+ .mask = GENMASK(0, 0),
+};
+
+static struct ath10k_hw_ce_host_ie qcax_host_ie = {
+ .copy_complete_reset = 0x00000000,
+ .copy_complete = &qcax_host_ie_cc,
+};
+
+static struct ath10k_hw_ce_host_wm_regs qcax_wm_reg = {
+ .dstr_lmask = 0x00000010,
+ .dstr_hmask = 0x00000008,
+ .srcr_lmask = 0x00000004,
+ .srcr_hmask = 0x00000002,
+ .cc_mask = 0x00000001,
+ .wm_mask = 0x0000001E,
+ .addr = 0x00000030,
+};
+
+static struct ath10k_hw_ce_misc_regs qcax_misc_reg = {
+ .axi_err = 0x00000400,
+ .dstr_add_err = 0x00000200,
+ .srcr_len_err = 0x00000100,
+ .dstr_mlen_vio = 0x00000080,
+ .dstr_overflow = 0x00000040,
+ .srcr_overflow = 0x00000020,
+ .err_mask = 0x000007E0,
+ .addr = 0x00000038,
+};
+
+static struct ath10k_hw_ce_regs_addr_map qcax_src_wm_low = {
+ .msb = 0x0000001f,
+ .lsb = 0x00000010,
+ .mask = GENMASK(31, 16),
+};
+
+static struct ath10k_hw_ce_regs_addr_map qcax_src_wm_high = {
+ .msb = 0x0000000f,
+ .lsb = 0x00000000,
+ .mask = GENMASK(15, 0),
+};
+
+static struct ath10k_hw_ce_dst_src_wm_regs qcax_wm_src_ring = {
+ .addr = 0x0000004c,
+ .low_rst = 0x00000000,
+ .high_rst = 0x00000000,
+ .wm_low = &qcax_src_wm_low,
+ .wm_high = &qcax_src_wm_high,
+};
+
+static struct ath10k_hw_ce_regs_addr_map qcax_dst_wm_low = {
+ .lsb = 0x00000010,
+ .mask = GENMASK(31, 16),
+};
+
+static struct ath10k_hw_ce_regs_addr_map qcax_dst_wm_high = {
+ .msb = 0x0000000f,
+ .lsb = 0x00000000,
+ .mask = GENMASK(15, 0),
+};
+
+static struct ath10k_hw_ce_dst_src_wm_regs qcax_wm_dst_ring = {
+ .addr = 0x00000050,
+ .low_rst = 0x00000000,
+ .high_rst = 0x00000000,
+ .wm_low = &qcax_dst_wm_low,
+ .wm_high = &qcax_dst_wm_high,
+};
+
+struct ath10k_hw_ce_regs qcax_ce_regs = {
+ .sr_base_addr = 0x00000000,
+ .sr_size_addr = 0x00000004,
+ .dr_base_addr = 0x00000008,
+ .dr_size_addr = 0x0000000c,
+ .ce_cmd_addr = 0x00000018,
+ .misc_ie_addr = 0x00000034,
+ .sr_wr_index_addr = 0x0000003c,
+ .dst_wr_index_addr = 0x00000040,
+ .current_srri_addr = 0x00000044,
+ .current_drri_addr = 0x00000048,
+ .host_ie_addr = 0x0000002c,
+ .ctrl1_regs = &qcax_ctrl1,
+ .cmd_halt = &qcax_cmd_halt,
+ .host_ie = &qcax_host_ie,
+ .wm_regs = &qcax_wm_reg,
+ .misc_regs = &qcax_misc_reg,
+ .wm_srcr = &qcax_wm_src_ring,
+ .wm_dstr = &qcax_wm_dst_ring,
+};
+
const struct ath10k_hw_clk_params qca6174_clk[ATH10K_HW_REFCLK_COUNT] = {
{
.refclk = 48000000,
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index d34272803fd7..97dc1479f44e 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -268,6 +268,86 @@ extern const struct ath10k_hw_regs qca6174_regs;
extern const struct ath10k_hw_regs qca99x0_regs;
extern const struct ath10k_hw_regs qca4019_regs;
+struct ath10k_hw_ce_regs_addr_map {
+ u32 msb;
+ u32 lsb;
+ u32 mask;
+};
+
+struct ath10k_hw_ce_ctrl1 {
+ u32 addr;
+ u32 hw_mask;
+ u32 sw_mask;
+ u32 hw_wr_mask;
+ u32 sw_wr_mask;
+ u32 reset_mask;
+ u32 reset;
+ struct ath10k_hw_ce_regs_addr_map *src_ring;
+ struct ath10k_hw_ce_regs_addr_map *dst_ring;
+ struct ath10k_hw_ce_regs_addr_map *dmax; };
+
+struct ath10k_hw_ce_cmd_halt {
+ u32 status_reset;
+ u32 msb;
+ u32 mask;
+ struct ath10k_hw_ce_regs_addr_map *status; };
+
+struct ath10k_hw_ce_host_ie {
+ u32 copy_complete_reset;
+ struct ath10k_hw_ce_regs_addr_map *copy_complete; };
+
+struct ath10k_hw_ce_host_wm_regs {
+ u32 dstr_lmask;
+ u32 dstr_hmask;
+ u32 srcr_lmask;
+ u32 srcr_hmask;
+ u32 cc_mask;
+ u32 wm_mask;
+ u32 addr;
+};
+
+struct ath10k_hw_ce_misc_regs {
+ u32 axi_err;
+ u32 dstr_add_err;
+ u32 srcr_len_err;
+ u32 dstr_mlen_vio;
+ u32 dstr_overflow;
+ u32 srcr_overflow;
+ u32 err_mask;
+ u32 addr;
+};
+
+struct ath10k_hw_ce_dst_src_wm_regs {
+ u32 addr;
+ u32 low_rst;
+ u32 high_rst;
+ struct ath10k_hw_ce_regs_addr_map *wm_low;
+ struct ath10k_hw_ce_regs_addr_map *wm_high; };
+
+struct ath10k_hw_ce_regs {
+ u32 sr_base_addr;
+ u32 sr_size_addr;
+ u32 dr_base_addr;
+ u32 dr_size_addr;
+ u32 ce_cmd_addr;
+ u32 misc_ie_addr;
+ u32 sr_wr_index_addr;
+ u32 dst_wr_index_addr;
+ u32 current_srri_addr;
+ u32 current_drri_addr;
+ u32 ddr_addr_for_rri_low;
+ u32 ddr_addr_for_rri_high;
+ u32 ce_rri_low;
+ u32 ce_rri_high;
+ u32 host_ie_addr;
+ struct ath10k_hw_ce_host_wm_regs *wm_regs;
+ struct ath10k_hw_ce_misc_regs *misc_regs;
+ struct ath10k_hw_ce_ctrl1 *ctrl1_regs;
+ struct ath10k_hw_ce_cmd_halt *cmd_halt;
+ struct ath10k_hw_ce_host_ie *host_ie;
+ struct ath10k_hw_ce_dst_src_wm_regs *wm_srcr;
+ struct ath10k_hw_ce_dst_src_wm_regs *wm_dstr; };
+
struct ath10k_hw_values {
u32 rtc_state_val_on;
u8 ce_count;
@@ -282,6 +362,7 @@ extern const struct ath10k_hw_values qca6174_values;
extern const struct ath10k_hw_values qca99x0_values;
extern const struct ath10k_hw_values qca9888_values;
extern const struct ath10k_hw_values qca4019_values;
+extern struct ath10k_hw_ce_regs qcax_ce_regs;
void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey,
u32 cc, u32 rcc, u32 cc_prev, u32 rcc_prev);
@@ -454,6 +535,12 @@ struct ath10k_hw_params {
/* Number of bytes to be discarded for each FFT sample */
int spectral_bin_discard;
+
+ /* The board may have a restricted NSS for 160 or 80+80 vs what it
+ * can do for 80Mhz.
+ */
+ int vht160_mcs_rx_highest;
+ int vht160_mcs_tx_highest;
};
struct htt_rx_desc;
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 16cf250f6c39..55c808f03a84 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -1392,7 +1392,7 @@ static int ath10k_vdev_stop(struct ath10k_vif *arvif)
ret = ath10k_vdev_setup_sync(ar);
if (ret) {
- ath10k_warn(ar, "failed to syncronise setup for vdev %i: %d\n",
+ ath10k_warn(ar, "failed to synchronize setup for vdev %i: %d\n",
arvif->vdev_id, ret);
return ret;
}
@@ -2519,6 +2519,20 @@ static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vht peer %pM max_mpdu %d flags 0x%x\n",
sta->addr, arg->peer_max_mpdu, arg->peer_flags);
+
+ if (arg->peer_vht_rates.rx_max_rate &&
+ (sta->vht_cap.cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK)) {
+ switch (arg->peer_vht_rates.rx_max_rate) {
+ case 1560:
+ /* Must be 2x2 at 160Mhz is all it can do. */
+ arg->peer_bw_rxnss_override = 2;
+ break;
+ case 780:
+ /* Can only do 1x1 at 160Mhz (Long Guard Interval) */
+ arg->peer_bw_rxnss_override = 1;
+ break;
+ }
+ }
}
static void ath10k_peer_assoc_h_qos(struct ath10k *ar,
@@ -4361,6 +4375,7 @@ static int ath10k_mac_get_vht_cap_bf_sound_dim(struct ath10k *ar)
static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar)
{
struct ieee80211_sta_vht_cap vht_cap = {0};
+ struct ath10k_hw_params *hw = &ar->hw_params;
u16 mcs_map;
u32 val;
int i;
@@ -4390,7 +4405,7 @@ static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar)
* mode until that's resolved.
*/
if ((ar->vht_cap_info & IEEE80211_VHT_CAP_SHORT_GI_160) &&
- !(ar->vht_cap_info & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ))
+ (ar->vht_cap_info & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) == 0)
vht_cap.cap |= IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ;
mcs_map = 0;
@@ -4407,6 +4422,17 @@ static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar)
vht_cap.vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
vht_cap.vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
+ /* If we are supporting 160Mhz or 80+80, then the NIC may be able to do
+ * a restricted NSS for 160 or 80+80 vs what it can do for 80Mhz. Give
+ * user-space a clue if that is the case.
+ */
+ if ((vht_cap.cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) &&
+ (hw->vht160_mcs_rx_highest != 0 ||
+ hw->vht160_mcs_tx_highest != 0)) {
+ vht_cap.vht_mcs.rx_highest = cpu_to_le16(hw->vht160_mcs_rx_highest);
+ vht_cap.vht_mcs.tx_highest = cpu_to_le16(hw->vht160_mcs_tx_highest);
+ }
+
return vht_cap;
}
@@ -6072,6 +6098,20 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
ar->num_stations + 1, ar->max_num_stations,
ar->num_peers + 1, ar->max_num_peers);
+ num_tdls_stations = ath10k_mac_tdls_vif_stations_count(hw, vif);
+ num_tdls_vifs = ath10k_mac_tdls_vifs_count(hw);
+
+ if (sta->tdls) {
+ if (num_tdls_stations >= ar->max_num_tdls_vdevs) {
+ ath10k_warn(ar, "vdev %i exceeded maximum number of tdls vdevs %i\n",
+ arvif->vdev_id,
+ ar->max_num_tdls_vdevs);
+ ret = -ELNRNG;
+ goto exit;
+ }
+ peer_type = WMI_PEER_TYPE_TDLS;
+ }
+
ret = ath10k_mac_inc_num_stations(arvif, sta);
if (ret) {
ath10k_warn(ar, "refusing to associate station: too many connected already (%d)\n",
@@ -6079,9 +6119,6 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
goto exit;
}
- if (sta->tdls)
- peer_type = WMI_PEER_TYPE_TDLS;
-
ret = ath10k_peer_create(ar, vif, sta, arvif->vdev_id,
sta->addr, peer_type);
if (ret) {
@@ -6112,35 +6149,17 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
if (!sta->tdls)
goto exit;
- num_tdls_stations = ath10k_mac_tdls_vif_stations_count(hw, vif);
- num_tdls_vifs = ath10k_mac_tdls_vifs_count(hw);
-
- if (num_tdls_vifs >= ar->max_num_tdls_vdevs &&
- num_tdls_stations == 0) {
- ath10k_warn(ar, "vdev %i exceeded maximum number of tdls vdevs %i\n",
- arvif->vdev_id, ar->max_num_tdls_vdevs);
- ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
+ ret = ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id,
+ WMI_TDLS_ENABLE_ACTIVE);
+ if (ret) {
+ ath10k_warn(ar, "failed to update fw tdls state on vdev %i: %i\n",
+ arvif->vdev_id, ret);
+ ath10k_peer_delete(ar, arvif->vdev_id,
+ sta->addr);
ath10k_mac_dec_num_stations(arvif, sta);
- ret = -ENOBUFS;
goto exit;
}
- if (num_tdls_stations == 0) {
- /* This is the first tdls peer in current vif */
- enum wmi_tdls_state state = WMI_TDLS_ENABLE_ACTIVE;
-
- ret = ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id,
- state);
- if (ret) {
- ath10k_warn(ar, "failed to update fw tdls state on vdev %i: %i\n",
- arvif->vdev_id, ret);
- ath10k_peer_delete(ar, arvif->vdev_id,
- sta->addr);
- ath10k_mac_dec_num_stations(arvif, sta);
- goto exit;
- }
- }
-
ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id, sta,
WMI_TDLS_PEER_STATE_PEERING);
if (ret) {
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index 1e9806f57ee4..7ebfc409018d 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -101,7 +101,8 @@ static int ath10k_pci_init_irq(struct ath10k *ar);
static int ath10k_pci_deinit_irq(struct ath10k *ar);
static int ath10k_pci_request_irq(struct ath10k *ar);
static void ath10k_pci_free_irq(struct ath10k *ar);
-static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
+static int ath10k_pci_bmi_wait(struct ath10k *ar,
+ struct ath10k_ce_pipe *tx_pipe,
struct ath10k_ce_pipe *rx_pipe,
struct bmi_xfer *xfer);
static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar);
@@ -468,7 +469,7 @@ static int ath10k_pci_wake_wait(struct ath10k *ar)
while (tot_delay < PCIE_WAKE_TIMEOUT) {
if (ath10k_pci_is_awake(ar)) {
if (tot_delay > PCIE_WAKE_LATE_US)
- ath10k_warn(ar, "device wakeup took %d ms which is unusally long, otherwise it works normally.\n",
+ ath10k_warn(ar, "device wakeup took %d ms which is unusually long, otherwise it works normally.\n",
tot_delay / 1000);
return 0;
}
@@ -1846,7 +1847,7 @@ int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
if (ret)
goto err_resp;
- ret = ath10k_pci_bmi_wait(ce_tx, ce_rx, &xfer);
+ ret = ath10k_pci_bmi_wait(ar, ce_tx, ce_rx, &xfer);
if (ret) {
u32 unused_buffer;
unsigned int unused_nbytes;
@@ -1913,23 +1914,37 @@ static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
xfer->rx_done = true;
}
-static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
+static int ath10k_pci_bmi_wait(struct ath10k *ar,
+ struct ath10k_ce_pipe *tx_pipe,
struct ath10k_ce_pipe *rx_pipe,
struct bmi_xfer *xfer)
{
unsigned long timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
+ unsigned long started = jiffies;
+ unsigned long dur;
+ int ret;
while (time_before_eq(jiffies, timeout)) {
ath10k_pci_bmi_send_done(tx_pipe);
ath10k_pci_bmi_recv_data(rx_pipe);
- if (xfer->tx_done && (xfer->rx_done == xfer->wait_for_resp))
- return 0;
+ if (xfer->tx_done && (xfer->rx_done == xfer->wait_for_resp)) {
+ ret = 0;
+ goto out;
+ }
schedule();
}
- return -ETIMEDOUT;
+ ret = -ETIMEDOUT;
+
+out:
+ dur = jiffies - started;
+ if (dur > HZ)
+ ath10k_dbg(ar, ATH10K_DBG_BMI,
+ "bmi cmd took %lu jiffies hz %d ret %d\n",
+ dur, HZ, ret);
+ return ret;
}
/*
diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c
index 9e78fbae8413..859ed870bd97 100644
--- a/drivers/net/wireless/ath/ath10k/sdio.c
+++ b/drivers/net/wireless/ath/ath10k/sdio.c
@@ -1553,7 +1553,7 @@ static int ath10k_sdio_hif_diag_read(struct ath10k *ar, u32 address, void *buf,
/* read the data */
ret = ath10k_sdio_read(ar, MBOX_WINDOW_DATA_ADDRESS, buf, buf_len);
if (ret) {
- ath10k_warn(ar, "failed to read from mbox window data addrress: %d\n",
+ ath10k_warn(ar, "failed to read from mbox window data address: %d\n",
ret);
return ret;
}
@@ -1592,7 +1592,7 @@ static int ath10k_sdio_hif_diag_write_mem(struct ath10k *ar, u32 address,
ret = ath10k_sdio_write(ar, MBOX_WINDOW_DATA_ADDRESS, data, nbytes);
if (ret) {
ath10k_warn(ar,
- "failed to write 0x%p to mbox window data addrress: %d\n",
+ "failed to write 0x%p to mbox window data address: %d\n",
data, ret);
return ret;
}
diff --git a/drivers/net/wireless/ath/ath10k/thermal.c b/drivers/net/wireless/ath/ath10k/thermal.c
index 87948aff1bd5..ef717b631ff8 100644
--- a/drivers/net/wireless/ath/ath10k/thermal.c
+++ b/drivers/net/wireless/ath/ath10k/thermal.c
@@ -63,7 +63,7 @@ ath10k_thermal_set_cur_throttle_state(struct thermal_cooling_device *cdev,
return 0;
}
-static struct thermal_cooling_device_ops ath10k_thermal_ops = {
+static const struct thermal_cooling_device_ops ath10k_thermal_ops = {
.get_max_state = ath10k_thermal_get_max_throttle_state,
.get_cur_state = ath10k_thermal_get_cur_throttle_state,
.set_cur_state = ath10k_thermal_set_cur_throttle_state,
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
index f9188027a6f6..7616c1c4bbd3 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
@@ -2022,7 +2022,7 @@ ath10k_wmi_tlv_op_gen_sta_keepalive(struct ath10k *ar,
arp->dest_ip4_addr = arg->dest_ip4_addr;
ether_addr_copy(arp->dest_mac_addr.addr, arg->dest_mac_addr);
- ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv sta keepalive vdev %d enabled %d method %d inverval %d\n",
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv sta keepalive vdev %d enabled %d method %d interval %d\n",
arg->vdev_id, arg->enabled, arg->method, arg->interval);
return skb;
}
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index a66e2482897f..3efb404b83c0 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -4481,31 +4481,17 @@ static int ath10k_wmi_alloc_chunk(struct ath10k *ar, u32 req_id,
u32 num_units, u32 unit_len)
{
dma_addr_t paddr;
- u32 pool_size = 0;
+ u32 pool_size;
int idx = ar->wmi.num_mem_chunks;
- void *vaddr = NULL;
-
- if (ar->wmi.num_mem_chunks == ARRAY_SIZE(ar->wmi.mem_chunks))
- return -ENOMEM;
+ void *vaddr;
- while (!vaddr && num_units) {
- pool_size = num_units * round_up(unit_len, 4);
- if (!pool_size)
- return -EINVAL;
+ pool_size = num_units * round_up(unit_len, 4);
+ vaddr = dma_alloc_coherent(ar->dev, pool_size, &paddr, GFP_KERNEL);
- vaddr = kzalloc(pool_size, GFP_KERNEL | __GFP_NOWARN);
- if (!vaddr)
- num_units /= 2;
- }
-
- if (!num_units)
+ if (!vaddr)
return -ENOMEM;
- paddr = dma_map_single(ar->dev, vaddr, pool_size, DMA_BIDIRECTIONAL);
- if (dma_mapping_error(ar->dev, paddr)) {
- kfree(vaddr);
- return -ENOMEM;
- }
+ memset(vaddr, 0, pool_size);
ar->wmi.mem_chunks[idx].vaddr = vaddr;
ar->wmi.mem_chunks[idx].paddr = paddr;
@@ -5947,15 +5933,6 @@ static struct sk_buff *ath10k_wmi_10_4_op_gen_init(struct ath10k *ar)
int ath10k_wmi_start_scan_verify(const struct wmi_start_scan_arg *arg)
{
- if (arg->ie_len && !arg->ie)
- return -EINVAL;
- if (arg->n_channels && !arg->channels)
- return -EINVAL;
- if (arg->n_ssids && !arg->ssids)
- return -EINVAL;
- if (arg->n_bssids && !arg->bssids)
- return -EINVAL;
-
if (arg->ie_len > WLAN_SCAN_PARAMS_MAX_IE_LEN)
return -EINVAL;
if (arg->n_channels > ARRAY_SIZE(arg->channels))
@@ -6756,7 +6733,12 @@ ath10k_wmi_peer_assoc_fill_10_4(struct ath10k *ar, void *buf,
struct wmi_10_4_peer_assoc_complete_cmd *cmd = buf;
ath10k_wmi_peer_assoc_fill_10_2(ar, buf, arg);
- cmd->peer_bw_rxnss_override = 0;
+ if (arg->peer_bw_rxnss_override)
+ cmd->peer_bw_rxnss_override =
+ __cpu_to_le32((arg->peer_bw_rxnss_override - 1) |
+ BIT(PEER_BW_RXNSS_OVERRIDE_OFFSET));
+ else
+ cmd->peer_bw_rxnss_override = 0;
}
static int
@@ -8289,11 +8271,10 @@ void ath10k_wmi_free_host_mem(struct ath10k *ar)
/* free the host memory chunks requested by firmware */
for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
- dma_unmap_single(ar->dev,
- ar->wmi.mem_chunks[i].paddr,
- ar->wmi.mem_chunks[i].len,
- DMA_BIDIRECTIONAL);
- kfree(ar->wmi.mem_chunks[i].vaddr);
+ dma_free_coherent(ar->dev,
+ ar->wmi.mem_chunks[i].len,
+ ar->wmi.mem_chunks[i].vaddr,
+ ar->wmi.mem_chunks[i].paddr);
}
ar->wmi.num_mem_chunks = 0;
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index 1b4865a55595..baa38c8f847c 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -6028,6 +6028,8 @@ struct wmi_10_2_peer_assoc_complete_cmd {
__le32 info0; /* WMI_PEER_ASSOC_INFO0_ */
} __packed;
+#define PEER_BW_RXNSS_OVERRIDE_OFFSET 31
+
struct wmi_10_4_peer_assoc_complete_cmd {
struct wmi_10_2_peer_assoc_complete_cmd cmd;
__le32 peer_bw_rxnss_override;
@@ -6051,6 +6053,7 @@ struct wmi_peer_assoc_complete_arg {
u32 peer_vht_caps;
enum wmi_phy_mode peer_phymode;
struct wmi_vht_rate_set_arg peer_vht_rates;
+ u32 peer_bw_rxnss_override;
};
struct wmi_peer_add_wds_entry_cmd {
diff --git a/drivers/net/wireless/ath/ath6kl/htc_pipe.c b/drivers/net/wireless/ath/ath6kl/htc_pipe.c
index 5c0ba83a44aa..546243e11737 100644
--- a/drivers/net/wireless/ath/ath6kl/htc_pipe.c
+++ b/drivers/net/wireless/ath/ath6kl/htc_pipe.c
@@ -382,7 +382,7 @@ static enum htc_send_queue_result htc_try_send(struct htc_target *target,
list_for_each_entry_safe(packet, tmp_pkt,
txq, list) {
ath6kl_dbg(ATH6KL_DBG_HTC,
- "%s: Indicat overflowed TX pkts: %p\n",
+ "%s: Indicate overflowed TX pkts: %p\n",
__func__, packet);
action = ep->ep_cb.tx_full(ep->target, packet);
if (action == HTC_SEND_FULL_DROP) {
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index ae3043559b6d..fe5102ca5010 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -1821,8 +1821,6 @@ static void ar9003_hw_spectral_scan_wait(struct ath_hw *ah)
static void ar9003_hw_tx99_start(struct ath_hw *ah, u32 qnum)
{
REG_SET_BIT(ah, AR_PHY_TEST, PHY_AGC_CLR);
- REG_SET_BIT(ah, 0x9864, 0x7f000);
- REG_SET_BIT(ah, 0x9924, 0x7f00fe);
REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_DIS);
REG_WRITE(ah, AR_CR, AR_CR_RXD);
REG_WRITE(ah, AR_DLCL_IFS(qnum), 0);
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 9e65d14e7b1e..8b4ac7f0a09b 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -731,12 +731,12 @@ static int ath9k_start(struct ieee80211_hw *hw)
spin_unlock_bh(&sc->sc_pcu_lock);
+ ath9k_rng_start(sc);
+
mutex_unlock(&sc->mutex);
ath9k_ps_restore(sc);
- ath9k_rng_start(sc);
-
return 0;
}
@@ -826,10 +826,10 @@ static void ath9k_stop(struct ieee80211_hw *hw)
ath9k_deinit_channel_context(sc);
- ath9k_rng_stop(sc);
-
mutex_lock(&sc->mutex);
+ ath9k_rng_stop(sc);
+
ath_cancel_work(sc);
if (test_bit(ATH_OP_INVALID, &common->op_flags)) {
diff --git a/drivers/net/wireless/ath/ath9k/mci.c b/drivers/net/wireless/ath/ath9k/mci.c
index 66596b95273f..cf23fd815211 100644
--- a/drivers/net/wireless/ath/ath9k/mci.c
+++ b/drivers/net/wireless/ath/ath9k/mci.c
@@ -548,7 +548,7 @@ void ath_mci_intr(struct ath_softc *sc)
if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO) {
mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO;
- offset = ar9003_mci_state(ah, MCI_STATE_LAST_SCHD_MSG_OFFSET);
+ ar9003_mci_state(ah, MCI_STATE_LAST_SCHD_MSG_OFFSET);
}
if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_GPM) {
diff --git a/drivers/net/wireless/ath/ath9k/rng.c b/drivers/net/wireless/ath/ath9k/rng.c
index 568b1c6c2b2b..f9d3d6eedd3c 100644
--- a/drivers/net/wireless/ath/ath9k/rng.c
+++ b/drivers/net/wireless/ath/ath9k/rng.c
@@ -24,6 +24,8 @@
#define ATH9K_RNG_BUF_SIZE 320
#define ATH9K_RNG_ENTROPY(x) (((x) * 8 * 10) >> 5) /* quality: 10/32 */
+static DECLARE_WAIT_QUEUE_HEAD(rng_queue);
+
static int ath9k_rng_data_read(struct ath_softc *sc, u32 *buf, u32 buf_size)
{
int i, j;
@@ -85,7 +87,9 @@ static int ath9k_rng_kthread(void *data)
ATH9K_RNG_BUF_SIZE);
if (unlikely(!bytes_read)) {
delay = ath9k_rng_delay_get(++fail_stats);
- msleep_interruptible(delay);
+ wait_event_interruptible_timeout(rng_queue,
+ kthread_should_stop(),
+ msecs_to_jiffies(delay));
continue;
}
@@ -120,6 +124,8 @@ void ath9k_rng_start(struct ath_softc *sc)
void ath9k_rng_stop(struct ath_softc *sc)
{
- if (sc->rng_task)
+ if (sc->rng_task) {
kthread_stop(sc->rng_task);
+ sc->rng_task = NULL;
+ }
}
diff --git a/drivers/net/wireless/ath/ath9k/tx99.c b/drivers/net/wireless/ath/ath9k/tx99.c
index a866cbda0799..49ed1afb913c 100644
--- a/drivers/net/wireless/ath/ath9k/tx99.c
+++ b/drivers/net/wireless/ath/ath9k/tx99.c
@@ -189,22 +189,27 @@ static ssize_t write_file_tx99(struct file *file, const char __user *user_buf,
if (strtobool(buf, &start))
return -EINVAL;
+ mutex_lock(&sc->mutex);
+
if (start == sc->tx99_state) {
if (!start)
- return count;
+ goto out;
ath_dbg(common, XMIT, "Resetting TX99\n");
ath9k_tx99_deinit(sc);
}
if (!start) {
ath9k_tx99_deinit(sc);
- return count;
+ goto out;
}
r = ath9k_tx99_init(sc);
- if (r)
+ if (r) {
+ mutex_unlock(&sc->mutex);
return r;
-
+ }
+out:
+ mutex_unlock(&sc->mutex);
return count;
}
diff --git a/drivers/net/wireless/ath/ath9k/wmi.c b/drivers/net/wireless/ath/ath9k/wmi.c
index 85d09fdef8dc..64a354fa78ab 100644
--- a/drivers/net/wireless/ath/ath9k/wmi.c
+++ b/drivers/net/wireless/ath/ath9k/wmi.c
@@ -298,7 +298,6 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id,
u16 headroom = sizeof(struct htc_frame_hdr) +
sizeof(struct wmi_cmd_hdr);
struct sk_buff *skb;
- u8 *data;
unsigned long time_left;
int ret = 0;
@@ -312,7 +311,7 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id,
skb_reserve(skb, headroom);
if (cmd_len != 0 && cmd_buf != NULL) {
- data = skb_put_data(skb, cmd_buf, cmd_len);
+ skb_put_data(skb, cmd_buf, cmd_len);
}
mutex_lock(&wmi->op_mutex);
diff --git a/drivers/net/wireless/ath/wil6210/Makefile b/drivers/net/wireless/ath/wil6210/Makefile
index 89bf2f9eca1d..4ae21da78e9e 100644
--- a/drivers/net/wireless/ath/wil6210/Makefile
+++ b/drivers/net/wireless/ath/wil6210/Makefile
@@ -10,7 +10,6 @@ wil6210-y += interrupt.o
wil6210-y += txrx.o
wil6210-y += debug.o
wil6210-y += rx_reorder.o
-wil6210-y += ioctl.o
wil6210-y += fw.o
wil6210-y += pm.o
wil6210-y += pmc.o
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index fdaa99c541ac..0b5383a62d42 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -16,6 +16,7 @@
#include <linux/etherdevice.h>
#include <linux/moduleparam.h>
+#include <net/netlink.h>
#include "wil6210.h"
#include "wmi.h"
@@ -41,6 +42,126 @@ static struct ieee80211_channel wil_60ghz_channels[] = {
/* channel 4 not supported yet */
};
+/* Vendor id to be used in vendor specific command and events
+ * to user space.
+ * NOTE: The authoritative place for definition of QCA_NL80211_VENDOR_ID,
+ * vendor subcmd definitions prefixed with QCA_NL80211_VENDOR_SUBCMD, and
+ * qca_wlan_vendor_attr is open source file src/common/qca-vendor.h in
+ * git://w1.fi/srv/git/hostap.git; the values here are just a copy of that
+ */
+
+#define QCA_NL80211_VENDOR_ID 0x001374
+
+#define WIL_MAX_RF_SECTORS (128)
+#define WIL_CID_ALL (0xff)
+
+enum qca_wlan_vendor_attr_rf_sector {
+ QCA_ATTR_MAC_ADDR = 6,
+ QCA_ATTR_PAD = 13,
+ QCA_ATTR_TSF = 29,
+ QCA_ATTR_DMG_RF_SECTOR_INDEX = 30,
+ QCA_ATTR_DMG_RF_SECTOR_TYPE = 31,
+ QCA_ATTR_DMG_RF_MODULE_MASK = 32,
+ QCA_ATTR_DMG_RF_SECTOR_CFG = 33,
+ QCA_ATTR_DMG_RF_SECTOR_MAX,
+};
+
+enum qca_wlan_vendor_attr_dmg_rf_sector_type {
+ QCA_ATTR_DMG_RF_SECTOR_TYPE_RX,
+ QCA_ATTR_DMG_RF_SECTOR_TYPE_TX,
+ QCA_ATTR_DMG_RF_SECTOR_TYPE_MAX
+};
+
+enum qca_wlan_vendor_attr_dmg_rf_sector_cfg {
+ QCA_ATTR_DMG_RF_SECTOR_CFG_INVALID = 0,
+ QCA_ATTR_DMG_RF_SECTOR_CFG_MODULE_INDEX,
+ QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE0,
+ QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE1,
+ QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE2,
+ QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_HI,
+ QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_LO,
+ QCA_ATTR_DMG_RF_SECTOR_CFG_DTYPE_X16,
+
+ /* keep last */
+ QCA_ATTR_DMG_RF_SECTOR_CFG_AFTER_LAST,
+ QCA_ATTR_DMG_RF_SECTOR_CFG_MAX =
+ QCA_ATTR_DMG_RF_SECTOR_CFG_AFTER_LAST - 1
+};
+
+static const struct
+nla_policy wil_rf_sector_policy[QCA_ATTR_DMG_RF_SECTOR_MAX + 1] = {
+ [QCA_ATTR_MAC_ADDR] = { .len = ETH_ALEN },
+ [QCA_ATTR_DMG_RF_SECTOR_INDEX] = { .type = NLA_U16 },
+ [QCA_ATTR_DMG_RF_SECTOR_TYPE] = { .type = NLA_U8 },
+ [QCA_ATTR_DMG_RF_MODULE_MASK] = { .type = NLA_U32 },
+ [QCA_ATTR_DMG_RF_SECTOR_CFG] = { .type = NLA_NESTED },
+};
+
+static const struct
+nla_policy wil_rf_sector_cfg_policy[QCA_ATTR_DMG_RF_SECTOR_CFG_MAX + 1] = {
+ [QCA_ATTR_DMG_RF_SECTOR_CFG_MODULE_INDEX] = { .type = NLA_U8 },
+ [QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE0] = { .type = NLA_U32 },
+ [QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE1] = { .type = NLA_U32 },
+ [QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE2] = { .type = NLA_U32 },
+ [QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_HI] = { .type = NLA_U32 },
+ [QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_LO] = { .type = NLA_U32 },
+ [QCA_ATTR_DMG_RF_SECTOR_CFG_DTYPE_X16] = { .type = NLA_U32 },
+};
+
+enum qca_nl80211_vendor_subcmds {
+ QCA_NL80211_VENDOR_SUBCMD_DMG_RF_GET_SECTOR_CFG = 139,
+ QCA_NL80211_VENDOR_SUBCMD_DMG_RF_SET_SECTOR_CFG = 140,
+ QCA_NL80211_VENDOR_SUBCMD_DMG_RF_GET_SELECTED_SECTOR = 141,
+ QCA_NL80211_VENDOR_SUBCMD_DMG_RF_SET_SELECTED_SECTOR = 142,
+};
+
+static int wil_rf_sector_get_cfg(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ const void *data, int data_len);
+static int wil_rf_sector_set_cfg(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ const void *data, int data_len);
+static int wil_rf_sector_get_selected(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ const void *data, int data_len);
+static int wil_rf_sector_set_selected(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ const void *data, int data_len);
+
+/* vendor specific commands */
+static const struct wiphy_vendor_command wil_nl80211_vendor_commands[] = {
+ {
+ .info.vendor_id = QCA_NL80211_VENDOR_ID,
+ .info.subcmd = QCA_NL80211_VENDOR_SUBCMD_DMG_RF_GET_SECTOR_CFG,
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV |
+ WIPHY_VENDOR_CMD_NEED_RUNNING,
+ .doit = wil_rf_sector_get_cfg
+ },
+ {
+ .info.vendor_id = QCA_NL80211_VENDOR_ID,
+ .info.subcmd = QCA_NL80211_VENDOR_SUBCMD_DMG_RF_SET_SECTOR_CFG,
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV |
+ WIPHY_VENDOR_CMD_NEED_RUNNING,
+ .doit = wil_rf_sector_set_cfg
+ },
+ {
+ .info.vendor_id = QCA_NL80211_VENDOR_ID,
+ .info.subcmd =
+ QCA_NL80211_VENDOR_SUBCMD_DMG_RF_GET_SELECTED_SECTOR,
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV |
+ WIPHY_VENDOR_CMD_NEED_RUNNING,
+ .doit = wil_rf_sector_get_selected
+ },
+ {
+ .info.vendor_id = QCA_NL80211_VENDOR_ID,
+ .info.subcmd =
+ QCA_NL80211_VENDOR_SUBCMD_DMG_RF_SET_SELECTED_SECTOR,
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV |
+ WIPHY_VENDOR_CMD_NEED_RUNNING,
+ .doit = wil_rf_sector_set_selected
+ },
+};
+
static struct ieee80211_supported_band wil_band_60ghz = {
.channels = wil_60ghz_channels,
.n_channels = ARRAY_SIZE(wil_60ghz_channels),
@@ -1325,6 +1446,8 @@ static int wil_cfg80211_stop_ap(struct wiphy *wiphy,
wil6210_bus_request(wil, WIL_DEFAULT_BUS_REQUEST_KBPS);
wil_set_recovery_state(wil, fw_recovery_idle);
+ set_bit(wil_status_resetting, wil->status);
+
mutex_lock(&wil->mutex);
wmi_pcp_stop(wil);
@@ -1571,6 +1694,42 @@ static int wil_cfg80211_set_power_mgmt(struct wiphy *wiphy,
return wil_ps_update(wil, ps_profile);
}
+static int wil_cfg80211_suspend(struct wiphy *wiphy,
+ struct cfg80211_wowlan *wow)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ int rc;
+
+ /* Setting the wakeup trigger based on wow is TBD */
+
+ if (test_bit(wil_status_suspended, wil->status)) {
+ wil_dbg_pm(wil, "trying to suspend while suspended\n");
+ return 0;
+ }
+
+ rc = wil_can_suspend(wil, false);
+ if (rc)
+ goto out;
+
+ wil_dbg_pm(wil, "suspending\n");
+
+ wil_p2p_stop_discovery(wil);
+
+ wil_abort_scan(wil, true);
+
+out:
+ return rc;
+}
+
+static int wil_cfg80211_resume(struct wiphy *wiphy)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+
+ wil_dbg_pm(wil, "resuming\n");
+
+ return 0;
+}
+
static const struct cfg80211_ops wil_cfg80211_ops = {
.add_virtual_intf = wil_cfg80211_add_iface,
.del_virtual_intf = wil_cfg80211_del_iface,
@@ -1602,6 +1761,8 @@ static const struct cfg80211_ops wil_cfg80211_ops = {
.start_p2p_device = wil_cfg80211_start_p2p_device,
.stop_p2p_device = wil_cfg80211_stop_p2p_device,
.set_power_mgmt = wil_cfg80211_set_power_mgmt,
+ .suspend = wil_cfg80211_suspend,
+ .resume = wil_cfg80211_resume,
};
static void wil_wiphy_init(struct wiphy *wiphy)
@@ -1637,6 +1798,9 @@ static void wil_wiphy_init(struct wiphy *wiphy)
wiphy->n_cipher_suites = ARRAY_SIZE(wil_cipher_suites);
wiphy->mgmt_stypes = wil_mgmt_stypes;
wiphy->features |= NL80211_FEATURE_SK_TX_STATUS;
+
+ wiphy->n_vendor_commands = ARRAY_SIZE(wil_nl80211_vendor_commands);
+ wiphy->vendor_commands = wil_nl80211_vendor_commands;
}
struct wireless_dev *wil_cfg80211_init(struct device *dev)
@@ -1695,3 +1859,452 @@ void wil_p2p_wdev_free(struct wil6210_priv *wil)
kfree(p2p_wdev);
}
}
+
+static int wil_rf_sector_status_to_rc(u8 status)
+{
+ switch (status) {
+ case WMI_RF_SECTOR_STATUS_SUCCESS:
+ return 0;
+ case WMI_RF_SECTOR_STATUS_BAD_PARAMETERS_ERROR:
+ return -EINVAL;
+ case WMI_RF_SECTOR_STATUS_BUSY_ERROR:
+ return -EAGAIN;
+ case WMI_RF_SECTOR_STATUS_NOT_SUPPORTED_ERROR:
+ return -EOPNOTSUPP;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int wil_rf_sector_get_cfg(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ const void *data, int data_len)
+{
+ struct wil6210_priv *wil = wdev_to_wil(wdev);
+ int rc;
+ struct nlattr *tb[QCA_ATTR_DMG_RF_SECTOR_MAX + 1];
+ u16 sector_index;
+ u8 sector_type;
+ u32 rf_modules_vec;
+ struct wmi_get_rf_sector_params_cmd cmd;
+ struct {
+ struct wmi_cmd_hdr wmi;
+ struct wmi_get_rf_sector_params_done_event evt;
+ } __packed reply;
+ struct sk_buff *msg;
+ struct nlattr *nl_cfgs, *nl_cfg;
+ u32 i;
+ struct wmi_rf_sector_info *si;
+
+ if (!test_bit(WMI_FW_CAPABILITY_RF_SECTORS, wil->fw_capabilities))
+ return -EOPNOTSUPP;
+
+ rc = nla_parse(tb, QCA_ATTR_DMG_RF_SECTOR_MAX, data, data_len,
+ wil_rf_sector_policy, NULL);
+ if (rc) {
+ wil_err(wil, "Invalid rf sector ATTR\n");
+ return rc;
+ }
+
+ if (!tb[QCA_ATTR_DMG_RF_SECTOR_INDEX] ||
+ !tb[QCA_ATTR_DMG_RF_SECTOR_TYPE] ||
+ !tb[QCA_ATTR_DMG_RF_MODULE_MASK]) {
+ wil_err(wil, "Invalid rf sector spec\n");
+ return -EINVAL;
+ }
+
+ sector_index = nla_get_u16(
+ tb[QCA_ATTR_DMG_RF_SECTOR_INDEX]);
+ if (sector_index >= WIL_MAX_RF_SECTORS) {
+ wil_err(wil, "Invalid sector index %d\n", sector_index);
+ return -EINVAL;
+ }
+
+ sector_type = nla_get_u8(tb[QCA_ATTR_DMG_RF_SECTOR_TYPE]);
+ if (sector_type >= QCA_ATTR_DMG_RF_SECTOR_TYPE_MAX) {
+ wil_err(wil, "Invalid sector type %d\n", sector_type);
+ return -EINVAL;
+ }
+
+ rf_modules_vec = nla_get_u32(
+ tb[QCA_ATTR_DMG_RF_MODULE_MASK]);
+ if (rf_modules_vec >= BIT(WMI_MAX_RF_MODULES_NUM)) {
+ wil_err(wil, "Invalid rf module mask 0x%x\n", rf_modules_vec);
+ return -EINVAL;
+ }
+
+ cmd.sector_idx = cpu_to_le16(sector_index);
+ cmd.sector_type = sector_type;
+ cmd.rf_modules_vec = rf_modules_vec & 0xFF;
+ memset(&reply, 0, sizeof(reply));
+ rc = wmi_call(wil, WMI_GET_RF_SECTOR_PARAMS_CMDID, &cmd, sizeof(cmd),
+ WMI_GET_RF_SECTOR_PARAMS_DONE_EVENTID,
+ &reply, sizeof(reply),
+ 500);
+ if (rc)
+ return rc;
+ if (reply.evt.status) {
+ wil_err(wil, "get rf sector cfg failed with status %d\n",
+ reply.evt.status);
+ return wil_rf_sector_status_to_rc(reply.evt.status);
+ }
+
+ msg = cfg80211_vendor_cmd_alloc_reply_skb(
+ wiphy, 64 * WMI_MAX_RF_MODULES_NUM);
+ if (!msg)
+ return -ENOMEM;
+
+ if (nla_put_u64_64bit(msg, QCA_ATTR_TSF,
+ le64_to_cpu(reply.evt.tsf),
+ QCA_ATTR_PAD))
+ goto nla_put_failure;
+
+ nl_cfgs = nla_nest_start(msg, QCA_ATTR_DMG_RF_SECTOR_CFG);
+ if (!nl_cfgs)
+ goto nla_put_failure;
+ for (i = 0; i < WMI_MAX_RF_MODULES_NUM; i++) {
+ if (!(rf_modules_vec & BIT(i)))
+ continue;
+ nl_cfg = nla_nest_start(msg, i);
+ if (!nl_cfg)
+ goto nla_put_failure;
+ si = &reply.evt.sectors_info[i];
+ if (nla_put_u8(msg, QCA_ATTR_DMG_RF_SECTOR_CFG_MODULE_INDEX,
+ i) ||
+ nla_put_u32(msg, QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE0,
+ le32_to_cpu(si->etype0)) ||
+ nla_put_u32(msg, QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE1,
+ le32_to_cpu(si->etype1)) ||
+ nla_put_u32(msg, QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE2,
+ le32_to_cpu(si->etype2)) ||
+ nla_put_u32(msg, QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_HI,
+ le32_to_cpu(si->psh_hi)) ||
+ nla_put_u32(msg, QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_LO,
+ le32_to_cpu(si->psh_lo)) ||
+ nla_put_u32(msg, QCA_ATTR_DMG_RF_SECTOR_CFG_DTYPE_X16,
+ le32_to_cpu(si->dtype_swch_off)))
+ goto nla_put_failure;
+ nla_nest_end(msg, nl_cfg);
+ }
+
+ nla_nest_end(msg, nl_cfgs);
+ rc = cfg80211_vendor_cmd_reply(msg);
+ return rc;
+nla_put_failure:
+ kfree_skb(msg);
+ return -ENOBUFS;
+}
+
+static int wil_rf_sector_set_cfg(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ const void *data, int data_len)
+{
+ struct wil6210_priv *wil = wdev_to_wil(wdev);
+ int rc, tmp;
+ struct nlattr *tb[QCA_ATTR_DMG_RF_SECTOR_MAX + 1];
+ struct nlattr *tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_MAX + 1];
+ u16 sector_index, rf_module_index;
+ u8 sector_type;
+ u32 rf_modules_vec = 0;
+ struct wmi_set_rf_sector_params_cmd cmd;
+ struct {
+ struct wmi_cmd_hdr wmi;
+ struct wmi_set_rf_sector_params_done_event evt;
+ } __packed reply;
+ struct nlattr *nl_cfg;
+ struct wmi_rf_sector_info *si;
+
+ if (!test_bit(WMI_FW_CAPABILITY_RF_SECTORS, wil->fw_capabilities))
+ return -EOPNOTSUPP;
+
+ rc = nla_parse(tb, QCA_ATTR_DMG_RF_SECTOR_MAX, data, data_len,
+ wil_rf_sector_policy, NULL);
+ if (rc) {
+ wil_err(wil, "Invalid rf sector ATTR\n");
+ return rc;
+ }
+
+ if (!tb[QCA_ATTR_DMG_RF_SECTOR_INDEX] ||
+ !tb[QCA_ATTR_DMG_RF_SECTOR_TYPE] ||
+ !tb[QCA_ATTR_DMG_RF_SECTOR_CFG]) {
+ wil_err(wil, "Invalid rf sector spec\n");
+ return -EINVAL;
+ }
+
+ sector_index = nla_get_u16(
+ tb[QCA_ATTR_DMG_RF_SECTOR_INDEX]);
+ if (sector_index >= WIL_MAX_RF_SECTORS) {
+ wil_err(wil, "Invalid sector index %d\n", sector_index);
+ return -EINVAL;
+ }
+
+ sector_type = nla_get_u8(tb[QCA_ATTR_DMG_RF_SECTOR_TYPE]);
+ if (sector_type >= QCA_ATTR_DMG_RF_SECTOR_TYPE_MAX) {
+ wil_err(wil, "Invalid sector type %d\n", sector_type);
+ return -EINVAL;
+ }
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.sector_idx = cpu_to_le16(sector_index);
+ cmd.sector_type = sector_type;
+ nla_for_each_nested(nl_cfg, tb[QCA_ATTR_DMG_RF_SECTOR_CFG],
+ tmp) {
+ rc = nla_parse_nested(tb2, QCA_ATTR_DMG_RF_SECTOR_CFG_MAX,
+ nl_cfg, wil_rf_sector_cfg_policy,
+ NULL);
+ if (rc) {
+ wil_err(wil, "invalid sector cfg\n");
+ return -EINVAL;
+ }
+
+ if (!tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_MODULE_INDEX] ||
+ !tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE0] ||
+ !tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE1] ||
+ !tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE2] ||
+ !tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_HI] ||
+ !tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_LO] ||
+ !tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_DTYPE_X16]) {
+ wil_err(wil, "missing cfg params\n");
+ return -EINVAL;
+ }
+
+ rf_module_index = nla_get_u8(
+ tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_MODULE_INDEX]);
+ if (rf_module_index >= WMI_MAX_RF_MODULES_NUM) {
+ wil_err(wil, "invalid RF module index %d\n",
+ rf_module_index);
+ return -EINVAL;
+ }
+ rf_modules_vec |= BIT(rf_module_index);
+ si = &cmd.sectors_info[rf_module_index];
+ si->etype0 = cpu_to_le32(nla_get_u32(
+ tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE0]));
+ si->etype1 = cpu_to_le32(nla_get_u32(
+ tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE1]));
+ si->etype2 = cpu_to_le32(nla_get_u32(
+ tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE2]));
+ si->psh_hi = cpu_to_le32(nla_get_u32(
+ tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_HI]));
+ si->psh_lo = cpu_to_le32(nla_get_u32(
+ tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_LO]));
+ si->dtype_swch_off = cpu_to_le32(nla_get_u32(
+ tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_DTYPE_X16]));
+ }
+
+ cmd.rf_modules_vec = rf_modules_vec & 0xFF;
+ memset(&reply, 0, sizeof(reply));
+ rc = wmi_call(wil, WMI_SET_RF_SECTOR_PARAMS_CMDID, &cmd, sizeof(cmd),
+ WMI_SET_RF_SECTOR_PARAMS_DONE_EVENTID,
+ &reply, sizeof(reply),
+ 500);
+ if (rc)
+ return rc;
+ return wil_rf_sector_status_to_rc(reply.evt.status);
+}
+
+static int wil_rf_sector_get_selected(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ const void *data, int data_len)
+{
+ struct wil6210_priv *wil = wdev_to_wil(wdev);
+ int rc;
+ struct nlattr *tb[QCA_ATTR_DMG_RF_SECTOR_MAX + 1];
+ u8 sector_type, mac_addr[ETH_ALEN];
+ int cid = 0;
+ struct wmi_get_selected_rf_sector_index_cmd cmd;
+ struct {
+ struct wmi_cmd_hdr wmi;
+ struct wmi_get_selected_rf_sector_index_done_event evt;
+ } __packed reply;
+ struct sk_buff *msg;
+
+ if (!test_bit(WMI_FW_CAPABILITY_RF_SECTORS, wil->fw_capabilities))
+ return -EOPNOTSUPP;
+
+ rc = nla_parse(tb, QCA_ATTR_DMG_RF_SECTOR_MAX, data, data_len,
+ wil_rf_sector_policy, NULL);
+ if (rc) {
+ wil_err(wil, "Invalid rf sector ATTR\n");
+ return rc;
+ }
+
+ if (!tb[QCA_ATTR_DMG_RF_SECTOR_TYPE]) {
+ wil_err(wil, "Invalid rf sector spec\n");
+ return -EINVAL;
+ }
+ sector_type = nla_get_u8(tb[QCA_ATTR_DMG_RF_SECTOR_TYPE]);
+ if (sector_type >= QCA_ATTR_DMG_RF_SECTOR_TYPE_MAX) {
+ wil_err(wil, "Invalid sector type %d\n", sector_type);
+ return -EINVAL;
+ }
+
+ if (tb[QCA_ATTR_MAC_ADDR]) {
+ ether_addr_copy(mac_addr, nla_data(tb[QCA_ATTR_MAC_ADDR]));
+ cid = wil_find_cid(wil, mac_addr);
+ if (cid < 0) {
+ wil_err(wil, "invalid MAC address %pM\n", mac_addr);
+ return -ENOENT;
+ }
+ } else {
+ if (test_bit(wil_status_fwconnected, wil->status)) {
+ wil_err(wil, "must specify MAC address when connected\n");
+ return -EINVAL;
+ }
+ }
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.cid = (u8)cid;
+ cmd.sector_type = sector_type;
+ memset(&reply, 0, sizeof(reply));
+ rc = wmi_call(wil, WMI_GET_SELECTED_RF_SECTOR_INDEX_CMDID,
+ &cmd, sizeof(cmd),
+ WMI_GET_SELECTED_RF_SECTOR_INDEX_DONE_EVENTID,
+ &reply, sizeof(reply),
+ 500);
+ if (rc)
+ return rc;
+ if (reply.evt.status) {
+ wil_err(wil, "get rf selected sector cfg failed with status %d\n",
+ reply.evt.status);
+ return wil_rf_sector_status_to_rc(reply.evt.status);
+ }
+
+ msg = cfg80211_vendor_cmd_alloc_reply_skb(
+ wiphy, 64 * WMI_MAX_RF_MODULES_NUM);
+ if (!msg)
+ return -ENOMEM;
+
+ if (nla_put_u64_64bit(msg, QCA_ATTR_TSF,
+ le64_to_cpu(reply.evt.tsf),
+ QCA_ATTR_PAD) ||
+ nla_put_u16(msg, QCA_ATTR_DMG_RF_SECTOR_INDEX,
+ le16_to_cpu(reply.evt.sector_idx)))
+ goto nla_put_failure;
+
+ rc = cfg80211_vendor_cmd_reply(msg);
+ return rc;
+nla_put_failure:
+ kfree_skb(msg);
+ return -ENOBUFS;
+}
+
+static int wil_rf_sector_wmi_set_selected(struct wil6210_priv *wil,
+ u16 sector_index,
+ u8 sector_type, u8 cid)
+{
+ struct wmi_set_selected_rf_sector_index_cmd cmd;
+ struct {
+ struct wmi_cmd_hdr wmi;
+ struct wmi_set_selected_rf_sector_index_done_event evt;
+ } __packed reply;
+ int rc;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.sector_idx = cpu_to_le16(sector_index);
+ cmd.sector_type = sector_type;
+ cmd.cid = (u8)cid;
+ memset(&reply, 0, sizeof(reply));
+ rc = wmi_call(wil, WMI_SET_SELECTED_RF_SECTOR_INDEX_CMDID,
+ &cmd, sizeof(cmd),
+ WMI_SET_SELECTED_RF_SECTOR_INDEX_DONE_EVENTID,
+ &reply, sizeof(reply),
+ 500);
+ if (rc)
+ return rc;
+ return wil_rf_sector_status_to_rc(reply.evt.status);
+}
+
+static int wil_rf_sector_set_selected(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ const void *data, int data_len)
+{
+ struct wil6210_priv *wil = wdev_to_wil(wdev);
+ int rc;
+ struct nlattr *tb[QCA_ATTR_DMG_RF_SECTOR_MAX + 1];
+ u16 sector_index;
+ u8 sector_type, mac_addr[ETH_ALEN], i;
+ int cid = 0;
+
+ if (!test_bit(WMI_FW_CAPABILITY_RF_SECTORS, wil->fw_capabilities))
+ return -EOPNOTSUPP;
+
+ rc = nla_parse(tb, QCA_ATTR_DMG_RF_SECTOR_MAX, data, data_len,
+ wil_rf_sector_policy, NULL);
+ if (rc) {
+ wil_err(wil, "Invalid rf sector ATTR\n");
+ return rc;
+ }
+
+ if (!tb[QCA_ATTR_DMG_RF_SECTOR_INDEX] ||
+ !tb[QCA_ATTR_DMG_RF_SECTOR_TYPE]) {
+ wil_err(wil, "Invalid rf sector spec\n");
+ return -EINVAL;
+ }
+
+ sector_index = nla_get_u16(
+ tb[QCA_ATTR_DMG_RF_SECTOR_INDEX]);
+ if (sector_index >= WIL_MAX_RF_SECTORS &&
+ sector_index != WMI_INVALID_RF_SECTOR_INDEX) {
+ wil_err(wil, "Invalid sector index %d\n", sector_index);
+ return -EINVAL;
+ }
+
+ sector_type = nla_get_u8(tb[QCA_ATTR_DMG_RF_SECTOR_TYPE]);
+ if (sector_type >= QCA_ATTR_DMG_RF_SECTOR_TYPE_MAX) {
+ wil_err(wil, "Invalid sector type %d\n", sector_type);
+ return -EINVAL;
+ }
+
+ if (tb[QCA_ATTR_MAC_ADDR]) {
+ ether_addr_copy(mac_addr, nla_data(tb[QCA_ATTR_MAC_ADDR]));
+ if (!is_broadcast_ether_addr(mac_addr)) {
+ cid = wil_find_cid(wil, mac_addr);
+ if (cid < 0) {
+ wil_err(wil, "invalid MAC address %pM\n",
+ mac_addr);
+ return -ENOENT;
+ }
+ } else {
+ if (sector_index != WMI_INVALID_RF_SECTOR_INDEX) {
+ wil_err(wil, "broadcast MAC valid only with unlocking\n");
+ return -EINVAL;
+ }
+ cid = -1;
+ }
+ } else {
+ if (test_bit(wil_status_fwconnected, wil->status)) {
+ wil_err(wil, "must specify MAC address when connected\n");
+ return -EINVAL;
+ }
+ /* otherwise, using cid=0 for unassociated station */
+ }
+
+ if (cid >= 0) {
+ rc = wil_rf_sector_wmi_set_selected(wil, sector_index,
+ sector_type, cid);
+ } else {
+ /* unlock all cids */
+ rc = wil_rf_sector_wmi_set_selected(
+ wil, WMI_INVALID_RF_SECTOR_INDEX, sector_type,
+ WIL_CID_ALL);
+ if (rc == -EINVAL) {
+ for (i = 0; i < WIL6210_MAX_CID; i++) {
+ rc = wil_rf_sector_wmi_set_selected(
+ wil, WMI_INVALID_RF_SECTOR_INDEX,
+ sector_type, i);
+ /* the FW will silently ignore and return
+ * success for unused cid, so abort the loop
+ * on any other error
+ */
+ if (rc) {
+ wil_err(wil, "unlock cid %d failed with status %d\n",
+ i, rc);
+ break;
+ }
+ }
+ }
+ }
+
+ return rc;
+}
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index 5b0f9fc66bb6..f82506d276d3 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -509,6 +509,10 @@ static ssize_t wil_read_file_ioblob(struct file *file, char __user *user_buf,
void *buf;
size_t ret;
+ if (test_bit(wil_status_suspending, wil_blob->wil->status) ||
+ test_bit(wil_status_suspended, wil_blob->wil->status))
+ return 0;
+
if (pos < 0)
return -EINVAL;
@@ -1600,6 +1604,49 @@ static const struct file_operations fops_fw_version = {
.llseek = seq_lseek,
};
+/*---------suspend_stats---------*/
+static ssize_t wil_write_suspend_stats(struct file *file,
+ const char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ struct wil6210_priv *wil = file->private_data;
+
+ memset(&wil->suspend_stats, 0, sizeof(wil->suspend_stats));
+
+ return len;
+}
+
+static ssize_t wil_read_suspend_stats(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct wil6210_priv *wil = file->private_data;
+ static char text[400];
+ int n;
+
+ n = snprintf(text, sizeof(text),
+ "Suspend statistics:\n"
+ "successful suspends:%ld failed suspends:%ld\n"
+ "successful resumes:%ld failed resumes:%ld\n"
+ "rejected by host:%ld rejected by device:%ld\n",
+ wil->suspend_stats.successful_suspends,
+ wil->suspend_stats.failed_suspends,
+ wil->suspend_stats.successful_resumes,
+ wil->suspend_stats.failed_resumes,
+ wil->suspend_stats.rejected_by_host,
+ wil->suspend_stats.rejected_by_device);
+
+ n = min_t(int, n, sizeof(text));
+
+ return simple_read_from_buffer(user_buf, count, ppos, text, n);
+}
+
+static const struct file_operations fops_suspend_stats = {
+ .read = wil_read_suspend_stats,
+ .write = wil_write_suspend_stats,
+ .open = simple_open,
+};
+
/*----------------*/
static void wil6210_debugfs_init_blobs(struct wil6210_priv *wil,
struct dentry *dbg)
@@ -1652,6 +1699,7 @@ static const struct {
{"led_blink_time", 0644, &fops_led_blink_time},
{"fw_capabilities", 0444, &fops_fw_capabilities},
{"fw_version", 0444, &fops_fw_version},
+ {"suspend_stats", 0644, &fops_suspend_stats},
};
static void wil6210_debugfs_init_files(struct wil6210_priv *wil,
@@ -1698,6 +1746,7 @@ static const struct dbg_off dbg_wil_off[] = {
WIL_FIELD(discovery_mode, 0644, doff_u8),
WIL_FIELD(chip_revision, 0444, doff_u8),
WIL_FIELD(abft_len, 0644, doff_u8),
+ WIL_FIELD(wakeup_trigger, 0644, doff_u8),
{},
};
diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c
index cab1e5c0e374..cad8a95c4e4e 100644
--- a/drivers/net/wireless/ath/wil6210/interrupt.c
+++ b/drivers/net/wireless/ath/wil6210/interrupt.c
@@ -467,6 +467,12 @@ static irqreturn_t wil6210_thread_irq(int irq, void *cookie)
wil6210_unmask_irq_pseudo(wil);
+ if (wil->suspend_resp_rcvd) {
+ wil_dbg_irq(wil, "set suspend_resp_comp to true\n");
+ wil->suspend_resp_comp = true;
+ wake_up_interruptible(&wil->wq);
+ }
+
return IRQ_HANDLED;
}
diff --git a/drivers/net/wireless/ath/wil6210/ioctl.c b/drivers/net/wireless/ath/wil6210/ioctl.c
deleted file mode 100644
index 630380078236..000000000000
--- a/drivers/net/wireless/ath/wil6210/ioctl.c
+++ /dev/null
@@ -1,180 +0,0 @@
-/*
- * Copyright (c) 2014 Qualcomm Atheros, Inc.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include <linux/uaccess.h>
-
-#include "wil6210.h"
-#include <uapi/linux/wil6210_uapi.h>
-
-#define wil_hex_dump_ioctl(prefix_str, buf, len) \
- print_hex_dump_debug("DBG[IOC ]" prefix_str, \
- DUMP_PREFIX_OFFSET, 16, 1, buf, len, true)
-#define wil_dbg_ioctl(wil, fmt, arg...) wil_dbg(wil, "DBG[IOC ]" fmt, ##arg)
-
-static void __iomem *wil_ioc_addr(struct wil6210_priv *wil, uint32_t addr,
- uint32_t size, enum wil_memio_op op)
-{
- void __iomem *a;
- u32 off;
-
- switch (op & wil_mmio_addr_mask) {
- case wil_mmio_addr_linker:
- a = wmi_buffer(wil, cpu_to_le32(addr));
- break;
- case wil_mmio_addr_ahb:
- a = wmi_addr(wil, addr);
- break;
- case wil_mmio_addr_bar:
- a = wmi_addr(wil, addr + WIL6210_FW_HOST_OFF);
- break;
- default:
- wil_err(wil, "Unsupported address mode, op = 0x%08x\n", op);
- return NULL;
- }
-
- off = a - wil->csr;
- if (size >= WIL6210_MEM_SIZE - off) {
- wil_err(wil, "Requested block does not fit into memory: "
- "off = 0x%08x size = 0x%08x\n", off, size);
- return NULL;
- }
-
- return a;
-}
-
-static int wil_ioc_memio_dword(struct wil6210_priv *wil, void __user *data)
-{
- struct wil_memio io;
- void __iomem *a;
- bool need_copy = false;
-
- if (copy_from_user(&io, data, sizeof(io)))
- return -EFAULT;
-
- wil_dbg_ioctl(wil, "IO: addr = 0x%08x val = 0x%08x op = 0x%08x\n",
- io.addr, io.val, io.op);
-
- a = wil_ioc_addr(wil, io.addr, sizeof(u32), io.op);
- if (!a) {
- wil_err(wil, "invalid address 0x%08x, op = 0x%08x\n", io.addr,
- io.op);
- return -EINVAL;
- }
- /* operation */
- switch (io.op & wil_mmio_op_mask) {
- case wil_mmio_read:
- io.val = readl(a);
- need_copy = true;
- break;
- case wil_mmio_write:
- writel(io.val, a);
- wmb(); /* make sure write propagated to HW */
- break;
- default:
- wil_err(wil, "Unsupported operation, op = 0x%08x\n", io.op);
- return -EINVAL;
- }
-
- if (need_copy) {
- wil_dbg_ioctl(wil, "IO done: addr = 0x%08x"
- " val = 0x%08x op = 0x%08x\n",
- io.addr, io.val, io.op);
- if (copy_to_user(data, &io, sizeof(io)))
- return -EFAULT;
- }
-
- return 0;
-}
-
-static int wil_ioc_memio_block(struct wil6210_priv *wil, void __user *data)
-{
- struct wil_memio_block io;
- void *block;
- void __iomem *a;
- int rc = 0;
-
- if (copy_from_user(&io, data, sizeof(io)))
- return -EFAULT;
-
- wil_dbg_ioctl(wil, "IO: addr = 0x%08x size = 0x%08x op = 0x%08x\n",
- io.addr, io.size, io.op);
-
- /* size */
- if (io.size % 4) {
- wil_err(wil, "size is not multiple of 4: 0x%08x\n", io.size);
- return -EINVAL;
- }
-
- a = wil_ioc_addr(wil, io.addr, io.size, io.op);
- if (!a) {
- wil_err(wil, "invalid address 0x%08x, op = 0x%08x\n", io.addr,
- io.op);
- return -EINVAL;
- }
-
- block = kmalloc(io.size, GFP_USER);
- if (!block)
- return -ENOMEM;
-
- /* operation */
- switch (io.op & wil_mmio_op_mask) {
- case wil_mmio_read:
- wil_memcpy_fromio_32(block, a, io.size);
- wil_hex_dump_ioctl("Read ", block, io.size);
- if (copy_to_user(io.block, block, io.size)) {
- rc = -EFAULT;
- goto out_free;
- }
- break;
- case wil_mmio_write:
- if (copy_from_user(block, io.block, io.size)) {
- rc = -EFAULT;
- goto out_free;
- }
- wil_memcpy_toio_32(a, block, io.size);
- wmb(); /* make sure write propagated to HW */
- wil_hex_dump_ioctl("Write ", block, io.size);
- break;
- default:
- wil_err(wil, "Unsupported operation, op = 0x%08x\n", io.op);
- rc = -EINVAL;
- break;
- }
-
-out_free:
- kfree(block);
- return rc;
-}
-
-int wil_ioctl(struct wil6210_priv *wil, void __user *data, int cmd)
-{
- int ret;
-
- switch (cmd) {
- case WIL_IOCTL_MEMIO:
- ret = wil_ioc_memio_dword(wil, data);
- break;
- case WIL_IOCTL_MEMIO_BLOCK:
- ret = wil_ioc_memio_block(wil, data);
- break;
- default:
- wil_dbg_ioctl(wil, "Unsupported IOCTL 0x%04x\n", cmd);
- return -ENOIOCTLCMD;
- }
-
- wil_dbg_ioctl(wil, "ioctl(0x%04x) -> %d\n", cmd, ret);
- return ret;
-}
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index 32086792dfc3..daf944a71901 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -576,6 +576,9 @@ int wil_priv_init(struct wil6210_priv *wil)
wil->ps_profile = WMI_PS_PROFILE_TYPE_DEFAULT;
+ wil->wakeup_trigger = WMI_WAKEUP_TRIGGER_UCAST |
+ WMI_WAKEUP_TRIGGER_BCAST;
+
return 0;
out_wmi_wq:
@@ -586,8 +589,10 @@ out_wmi_wq:
void wil6210_bus_request(struct wil6210_priv *wil, u32 kbps)
{
- if (wil->platform_ops.bus_request)
+ if (wil->platform_ops.bus_request) {
+ wil->bus_request_kbps = kbps;
wil->platform_ops.bus_request(wil->platform_handle, kbps);
+ }
}
/**
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
index 708facd5f667..4a6ab2d0fdf1 100644
--- a/drivers/net/wireless/ath/wil6210/netdev.c
+++ b/drivers/net/wireless/ath/wil6210/netdev.c
@@ -42,20 +42,12 @@ static int wil_stop(struct net_device *ndev)
return wil_down(wil);
}
-static int wil_do_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
-{
- struct wil6210_priv *wil = ndev_to_wil(ndev);
-
- return wil_ioctl(wil, ifr->ifr_data, cmd);
-}
-
static const struct net_device_ops wil_netdev_ops = {
.ndo_open = wil_open,
.ndo_stop = wil_stop,
.ndo_start_xmit = wil_start_xmit,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = wil_do_ioctl,
};
static int wil6210_netdev_poll_rx(struct napi_struct *napi, int budget)
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
index b38515fc7ce7..d571feb2370e 100644
--- a/drivers/net/wireless/ath/wil6210/pcie_bus.c
+++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2016 Qualcomm Atheros, Inc.
+ * Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -26,6 +26,10 @@ static bool use_msi = true;
module_param(use_msi, bool, 0444);
MODULE_PARM_DESC(use_msi, " Use MSI interrupt, default - true");
+static bool ftm_mode;
+module_param(ftm_mode, bool, 0444);
+MODULE_PARM_DESC(ftm_mode, " Set factory test mode, default - false");
+
#ifdef CONFIG_PM
#ifdef CONFIG_PM_SLEEP
static int wil6210_pm_notify(struct notifier_block *notify_block,
@@ -36,13 +40,15 @@ static int wil6210_pm_notify(struct notifier_block *notify_block,
static
void wil_set_capabilities(struct wil6210_priv *wil)
{
+ const char *wil_fw_name;
u32 jtag_id = wil_r(wil, RGF_USER_JTAG_DEV_ID);
u8 chip_revision = (wil_r(wil, RGF_USER_REVISION_ID) &
RGF_USER_REVISION_ID_MASK);
bitmap_zero(wil->hw_capabilities, hw_capability_last);
bitmap_zero(wil->fw_capabilities, WMI_FW_CAPABILITY_MAX);
- wil->wil_fw_name = WIL_FW_NAME_DEFAULT;
+ wil->wil_fw_name = ftm_mode ? WIL_FW_NAME_FTM_DEFAULT :
+ WIL_FW_NAME_DEFAULT;
wil->chip_revision = chip_revision;
switch (jtag_id) {
@@ -51,9 +57,11 @@ void wil_set_capabilities(struct wil6210_priv *wil)
case REVISION_ID_SPARROW_D0:
wil->hw_name = "Sparrow D0";
wil->hw_version = HW_VER_SPARROW_D0;
- if (wil_fw_verify_file_exists(wil,
- WIL_FW_NAME_SPARROW_PLUS))
- wil->wil_fw_name = WIL_FW_NAME_SPARROW_PLUS;
+ wil_fw_name = ftm_mode ? WIL_FW_NAME_FTM_SPARROW_PLUS :
+ WIL_FW_NAME_SPARROW_PLUS;
+
+ if (wil_fw_verify_file_exists(wil, wil_fw_name))
+ wil->wil_fw_name = wil_fw_name;
break;
case REVISION_ID_SPARROW_B0:
wil->hw_name = "Sparrow B0";
@@ -104,8 +112,6 @@ static int wil_if_pcie_enable(struct wil6210_priv *wil)
wil_dbg_misc(wil, "if_pcie_enable, wmi_only %d\n", wmi_only);
- pdev->msi_enabled = 0;
-
pci_set_master(pdev);
wil_dbg_misc(wil, "Setup %s interrupt\n", use_msi ? "MSI" : "INTx");
@@ -183,6 +189,13 @@ static int wil_platform_rop_fw_recovery(void *wil_handle)
return 0;
}
+static void wil_platform_ops_uninit(struct wil6210_priv *wil)
+{
+ if (wil->platform_ops.uninit)
+ wil->platform_ops.uninit(wil->platform_handle);
+ memset(&wil->platform_ops, 0, sizeof(wil->platform_ops));
+}
+
static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct wil6210_priv *wil;
@@ -192,16 +205,18 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
.ramdump = wil_platform_rop_ramdump,
.fw_recovery = wil_platform_rop_fw_recovery,
};
+ u32 bar_size = pci_resource_len(pdev, 0);
/* check HW */
dev_info(&pdev->dev, WIL_NAME
- " device found [%04x:%04x] (rev %x)\n",
- (int)pdev->vendor, (int)pdev->device, (int)pdev->revision);
-
- if (pci_resource_len(pdev, 0) != WIL6210_MEM_SIZE) {
- dev_err(&pdev->dev, "Not " WIL_NAME "? "
- "BAR0 size is %lu while expecting %lu\n",
- (ulong)pci_resource_len(pdev, 0), WIL6210_MEM_SIZE);
+ " device found [%04x:%04x] (rev %x) bar size 0x%x\n",
+ (int)pdev->vendor, (int)pdev->device, (int)pdev->revision,
+ bar_size);
+
+ if ((bar_size < WIL6210_MIN_MEM_SIZE) ||
+ (bar_size > WIL6210_MAX_MEM_SIZE)) {
+ dev_err(&pdev->dev, "Unexpected BAR0 size 0x%x\n",
+ bar_size);
return -ENODEV;
}
@@ -214,6 +229,7 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
wil->pdev = pdev;
pci_set_drvdata(pdev, wil);
+ wil->bar_size = bar_size;
/* rollback to if_free */
wil->platform_handle =
@@ -241,7 +257,7 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
rc = pci_enable_device(pdev);
- if (rc) {
+ if (rc && pdev->msi_enabled == 0) {
wil_err(wil,
"pci_enable_device failed, retry with MSI only\n");
/* Work around for platforms that can't allocate IRQ:
@@ -256,6 +272,7 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_plat;
}
/* rollback to err_disable_pdev */
+ pci_set_power_state(pdev, PCI_D0);
rc = pci_request_region(pdev, 0, WIL_NAME);
if (rc) {
@@ -276,6 +293,15 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
wil_set_capabilities(wil);
wil6210_clear_irq(wil);
+ wil->keep_radio_on_during_sleep =
+ wil->platform_ops.keep_radio_on_during_sleep &&
+ wil->platform_ops.keep_radio_on_during_sleep(
+ wil->platform_handle) &&
+ test_bit(WMI_FW_CAPABILITY_D3_SUSPEND, wil->fw_capabilities);
+
+ wil_info(wil, "keep_radio_on_during_sleep (%d)\n",
+ wil->keep_radio_on_during_sleep);
+
/* FW should raise IRQ when ready */
rc = wil_if_pcie_enable(wil);
if (rc) {
@@ -316,8 +342,7 @@ err_release_reg:
err_disable_pdev:
pci_disable_device(pdev);
err_plat:
- if (wil->platform_ops.uninit)
- wil->platform_ops.uninit(wil->platform_handle);
+ wil_platform_ops_uninit(wil);
if_free:
wil_if_free(wil);
@@ -346,8 +371,7 @@ static void wil_pcie_remove(struct pci_dev *pdev)
pci_iounmap(pdev, csr);
pci_release_region(pdev, 0);
pci_disable_device(pdev);
- if (wil->platform_ops.uninit)
- wil->platform_ops.uninit(wil->platform_handle);
+ wil_platform_ops_uninit(wil);
wil_if_free(wil);
}
@@ -374,15 +398,16 @@ static int wil6210_suspend(struct device *dev, bool is_runtime)
goto out;
rc = wil_suspend(wil, is_runtime);
- if (rc)
- goto out;
-
- /* TODO: how do I bring card in low power state? */
-
- /* disable bus mastering */
- pci_clear_master(pdev);
- /* PCI will call pci_save_state(pdev) and pci_prepare_to_sleep(pdev) */
+ if (!rc) {
+ wil->suspend_stats.successful_suspends++;
+ /* If platform device supports keep_radio_on_during_sleep
+ * it will control PCIe master
+ */
+ if (!wil->keep_radio_on_during_sleep)
+ /* disable bus mastering */
+ pci_clear_master(pdev);
+ }
out:
return rc;
}
@@ -395,12 +420,21 @@ static int wil6210_resume(struct device *dev, bool is_runtime)
wil_dbg_pm(wil, "resume: %s\n", is_runtime ? "runtime" : "system");
- /* allow master */
- pci_set_master(pdev);
-
+ /* If platform device supports keep_radio_on_during_sleep it will
+ * control PCIe master
+ */
+ if (!wil->keep_radio_on_during_sleep)
+ /* allow master */
+ pci_set_master(pdev);
rc = wil_resume(wil, is_runtime);
- if (rc)
- pci_clear_master(pdev);
+ if (rc) {
+ wil_err(wil, "device failed to resume (%d)\n", rc);
+ wil->suspend_stats.failed_resumes++;
+ if (!wil->keep_radio_on_during_sleep)
+ pci_clear_master(pdev);
+ } else {
+ wil->suspend_stats.successful_resumes++;
+ }
return rc;
}
diff --git a/drivers/net/wireless/ath/wil6210/pm.c b/drivers/net/wireless/ath/wil6210/pm.c
index 2ae4fe85cc8c..ce1f384e7f8e 100644
--- a/drivers/net/wireless/ath/wil6210/pm.c
+++ b/drivers/net/wireless/ath/wil6210/pm.c
@@ -15,6 +15,7 @@
*/
#include "wil6210.h"
+#include <linux/jiffies.h>
int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime)
{
@@ -61,20 +62,170 @@ out:
wil_dbg_pm(wil, "can_suspend: %s => %s (%d)\n",
is_runtime ? "runtime" : "system", rc ? "No" : "Yes", rc);
+ if (rc)
+ wil->suspend_stats.rejected_by_host++;
+
return rc;
}
-int wil_suspend(struct wil6210_priv *wil, bool is_runtime)
+static int wil_resume_keep_radio_on(struct wil6210_priv *wil)
{
int rc = 0;
- struct net_device *ndev = wil_to_ndev(wil);
- wil_dbg_pm(wil, "suspend: %s\n", is_runtime ? "runtime" : "system");
+ /* wil_status_resuming will be cleared when getting
+ * WMI_TRAFFIC_RESUME_EVENTID
+ */
+ set_bit(wil_status_resuming, wil->status);
+ clear_bit(wil_status_suspended, wil->status);
+ wil_c(wil, RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD);
+ wil_unmask_irq(wil);
- if (test_bit(wil_status_suspended, wil->status)) {
- wil_dbg_pm(wil, "trying to suspend while suspended\n");
- return 0;
+ wil6210_bus_request(wil, wil->bus_request_kbps_pre_suspend);
+
+ /* Send WMI resume request to the device */
+ rc = wmi_resume(wil);
+ if (rc) {
+ wil_err(wil, "device failed to resume (%d), resetting\n", rc);
+ rc = wil_down(wil);
+ if (rc) {
+ wil_err(wil, "wil_down failed (%d)\n", rc);
+ goto out;
+ }
+ rc = wil_up(wil);
+ if (rc) {
+ wil_err(wil, "wil_up failed (%d)\n", rc);
+ goto out;
+ }
+ }
+
+ /* Wake all queues */
+ if (test_bit(wil_status_fwconnected, wil->status))
+ wil_update_net_queues_bh(wil, NULL, false);
+
+out:
+ if (rc)
+ set_bit(wil_status_suspended, wil->status);
+ return rc;
+}
+
+static int wil_suspend_keep_radio_on(struct wil6210_priv *wil)
+{
+ int rc = 0;
+ unsigned long start, data_comp_to;
+
+ wil_dbg_pm(wil, "suspend keep radio on\n");
+
+ /* Prevent handling of new tx and wmi commands */
+ set_bit(wil_status_suspending, wil->status);
+ wil_update_net_queues_bh(wil, NULL, true);
+
+ if (!wil_is_tx_idle(wil)) {
+ wil_dbg_pm(wil, "Pending TX data, reject suspend\n");
+ wil->suspend_stats.rejected_by_host++;
+ goto reject_suspend;
+ }
+
+ if (!wil_is_rx_idle(wil)) {
+ wil_dbg_pm(wil, "Pending RX data, reject suspend\n");
+ wil->suspend_stats.rejected_by_host++;
+ goto reject_suspend;
+ }
+
+ if (!wil_is_wmi_idle(wil)) {
+ wil_dbg_pm(wil, "Pending WMI events, reject suspend\n");
+ wil->suspend_stats.rejected_by_host++;
+ goto reject_suspend;
+ }
+
+ /* Send WMI suspend request to the device */
+ rc = wmi_suspend(wil);
+ if (rc) {
+ wil_dbg_pm(wil, "wmi_suspend failed, reject suspend (%d)\n",
+ rc);
+ goto reject_suspend;
+ }
+
+ /* Wait for completion of the pending RX packets */
+ start = jiffies;
+ data_comp_to = jiffies + msecs_to_jiffies(WIL_DATA_COMPLETION_TO_MS);
+ if (test_bit(wil_status_napi_en, wil->status)) {
+ while (!wil_is_rx_idle(wil)) {
+ if (time_after(jiffies, data_comp_to)) {
+ if (wil_is_rx_idle(wil))
+ break;
+ wil_err(wil,
+ "TO waiting for idle RX, suspend failed\n");
+ wil->suspend_stats.failed_suspends++;
+ goto resume_after_fail;
+ }
+ wil_dbg_ratelimited(wil, "rx vring is not empty -> NAPI\n");
+ napi_synchronize(&wil->napi_rx);
+ msleep(20);
+ }
+ }
+
+ /* In case of pending WMI events, reject the suspend
+ * and resume the device.
+ * This can happen if the device sent the WMI events before
+ * approving the suspend.
+ */
+ if (!wil_is_wmi_idle(wil)) {
+ wil_err(wil, "suspend failed due to pending WMI events\n");
+ wil->suspend_stats.failed_suspends++;
+ goto resume_after_fail;
+ }
+
+ wil_mask_irq(wil);
+
+ /* Disable device reset on PERST */
+ wil_s(wil, RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD);
+
+ if (wil->platform_ops.suspend) {
+ rc = wil->platform_ops.suspend(wil->platform_handle, true);
+ if (rc) {
+ wil_err(wil, "platform device failed to suspend (%d)\n",
+ rc);
+ wil->suspend_stats.failed_suspends++;
+ wil_c(wil, RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD);
+ wil_unmask_irq(wil);
+ goto resume_after_fail;
+ }
+ }
+
+ /* Save the current bus request to return to the same in resume */
+ wil->bus_request_kbps_pre_suspend = wil->bus_request_kbps;
+ wil6210_bus_request(wil, 0);
+
+ set_bit(wil_status_suspended, wil->status);
+ clear_bit(wil_status_suspending, wil->status);
+
+ return rc;
+
+resume_after_fail:
+ set_bit(wil_status_resuming, wil->status);
+ clear_bit(wil_status_suspending, wil->status);
+ rc = wmi_resume(wil);
+ /* if resume succeeded, reject the suspend */
+ if (!rc) {
+ rc = -EBUSY;
+ if (test_bit(wil_status_fwconnected, wil->status))
+ wil_update_net_queues_bh(wil, NULL, false);
}
+ return rc;
+
+reject_suspend:
+ clear_bit(wil_status_suspending, wil->status);
+ if (test_bit(wil_status_fwconnected, wil->status))
+ wil_update_net_queues_bh(wil, NULL, false);
+ return -EBUSY;
+}
+
+static int wil_suspend_radio_off(struct wil6210_priv *wil)
+{
+ int rc = 0;
+ struct net_device *ndev = wil_to_ndev(wil);
+
+ wil_dbg_pm(wil, "suspend radio off\n");
/* if netif up, hardware is alive, shut it down */
if (ndev->flags & IFF_UP) {
@@ -90,7 +241,7 @@ int wil_suspend(struct wil6210_priv *wil, bool is_runtime)
wil_disable_irq(wil);
if (wil->platform_ops.suspend) {
- rc = wil->platform_ops.suspend(wil->platform_handle);
+ rc = wil->platform_ops.suspend(wil->platform_handle, false);
if (rc) {
wil_enable_irq(wil);
goto out;
@@ -100,6 +251,50 @@ int wil_suspend(struct wil6210_priv *wil, bool is_runtime)
set_bit(wil_status_suspended, wil->status);
out:
+ wil_dbg_pm(wil, "suspend radio off: %d\n", rc);
+
+ return rc;
+}
+
+static int wil_resume_radio_off(struct wil6210_priv *wil)
+{
+ int rc = 0;
+ struct net_device *ndev = wil_to_ndev(wil);
+
+ wil_dbg_pm(wil, "Enabling PCIe IRQ\n");
+ wil_enable_irq(wil);
+ /* if netif up, bring hardware up
+ * During open(), IFF_UP set after actual device method
+ * invocation. This prevent recursive call to wil_up()
+ * wil_status_suspended will be cleared in wil_reset
+ */
+ if (ndev->flags & IFF_UP)
+ rc = wil_up(wil);
+ else
+ clear_bit(wil_status_suspended, wil->status);
+
+ return rc;
+}
+
+int wil_suspend(struct wil6210_priv *wil, bool is_runtime)
+{
+ int rc = 0;
+ struct net_device *ndev = wil_to_ndev(wil);
+ bool keep_radio_on = ndev->flags & IFF_UP &&
+ wil->keep_radio_on_during_sleep;
+
+ wil_dbg_pm(wil, "suspend: %s\n", is_runtime ? "runtime" : "system");
+
+ if (test_bit(wil_status_suspended, wil->status)) {
+ wil_dbg_pm(wil, "trying to suspend while suspended\n");
+ return 0;
+ }
+
+ if (!keep_radio_on)
+ rc = wil_suspend_radio_off(wil);
+ else
+ rc = wil_suspend_keep_radio_on(wil);
+
wil_dbg_pm(wil, "suspend: %s => %d\n",
is_runtime ? "runtime" : "system", rc);
@@ -110,29 +305,24 @@ int wil_resume(struct wil6210_priv *wil, bool is_runtime)
{
int rc = 0;
struct net_device *ndev = wil_to_ndev(wil);
+ bool keep_radio_on = ndev->flags & IFF_UP &&
+ wil->keep_radio_on_during_sleep;
wil_dbg_pm(wil, "resume: %s\n", is_runtime ? "runtime" : "system");
if (wil->platform_ops.resume) {
- rc = wil->platform_ops.resume(wil->platform_handle);
+ rc = wil->platform_ops.resume(wil->platform_handle,
+ keep_radio_on);
if (rc) {
wil_err(wil, "platform_ops.resume : %d\n", rc);
goto out;
}
}
- wil_dbg_pm(wil, "Enabling PCIe IRQ\n");
- wil_enable_irq(wil);
-
- /* if netif up, bring hardware up
- * During open(), IFF_UP set after actual device method
- * invocation. This prevent recursive call to wil_up().
- * wil_status_suspended will be cleared in wil_reset
- */
- if (ndev->flags & IFF_UP)
- rc = wil_up(wil);
+ if (keep_radio_on)
+ rc = wil_resume_keep_radio_on(wil);
else
- clear_bit(wil_status_suspended, wil->status);
+ rc = wil_resume_radio_off(wil);
out:
wil_dbg_pm(wil, "resume: %s => %d\n",
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index 84d91606e6f3..ec57bcce9601 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -104,6 +104,51 @@ static inline int wil_vring_avail_high(struct vring *vring)
return wil_vring_avail_tx(vring) > wil_vring_wmark_high(vring);
}
+/* returns true when all tx vrings are empty */
+bool wil_is_tx_idle(struct wil6210_priv *wil)
+{
+ int i;
+ unsigned long data_comp_to;
+
+ for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
+ struct vring *vring = &wil->vring_tx[i];
+ int vring_index = vring - wil->vring_tx;
+ struct vring_tx_data *txdata = &wil->vring_tx_data[vring_index];
+
+ spin_lock(&txdata->lock);
+
+ if (!vring->va || !txdata->enabled) {
+ spin_unlock(&txdata->lock);
+ continue;
+ }
+
+ data_comp_to = jiffies + msecs_to_jiffies(
+ WIL_DATA_COMPLETION_TO_MS);
+ if (test_bit(wil_status_napi_en, wil->status)) {
+ while (!wil_vring_is_empty(vring)) {
+ if (time_after(jiffies, data_comp_to)) {
+ wil_dbg_pm(wil,
+ "TO waiting for idle tx\n");
+ spin_unlock(&txdata->lock);
+ return false;
+ }
+ wil_dbg_ratelimited(wil,
+ "tx vring is not empty -> NAPI\n");
+ spin_unlock(&txdata->lock);
+ napi_synchronize(&wil->napi_tx);
+ msleep(20);
+ spin_lock(&txdata->lock);
+ if (!vring->va || !txdata->enabled)
+ break;
+ }
+ }
+
+ spin_unlock(&txdata->lock);
+ }
+
+ return true;
+}
+
/* wil_val_in_range - check if value in [min,max) */
static inline bool wil_val_in_range(int val, int min, int max)
{
@@ -406,6 +451,18 @@ static inline int wil_is_back_req(u8 fc)
(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_BACK_REQ);
}
+bool wil_is_rx_idle(struct wil6210_priv *wil)
+{
+ struct vring_rx_desc *_d;
+ struct vring *vring = &wil->vring_rx;
+
+ _d = (struct vring_rx_desc *)&vring->va[vring->swhead].rx;
+ if (_d->dma.status & RX_DMA_STATUS_DU)
+ return false;
+
+ return true;
+}
+
/**
* reap 1 frame from @swhead
*
@@ -1812,6 +1869,15 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
spin_lock(&txdata->lock);
+ if (test_bit(wil_status_suspending, wil->status) ||
+ test_bit(wil_status_suspended, wil->status) ||
+ test_bit(wil_status_resuming, wil->status)) {
+ wil_dbg_txrx(wil,
+ "suspend/resume in progress. drop packet\n");
+ spin_unlock(&txdata->lock);
+ return -EINVAL;
+ }
+
rc = (skb_is_gso(skb) ? __wil_tx_vring_tso : __wil_tx_vring)
(wil, vring, skb);
@@ -1864,6 +1930,11 @@ static inline void __wil_update_net_queues(struct wil6210_priv *wil,
return;
}
+ /* Do not wake the queues in suspend flow */
+ if (test_bit(wil_status_suspending, wil->status) ||
+ test_bit(wil_status_suspended, wil->status))
+ return;
+
/* check wake */
for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
struct vring *cur_vring = &wil->vring_tx[i];
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index b00c803a1e83..d085ccfc7228 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -37,8 +37,13 @@ extern bool debug_fw;
extern bool disable_ap_sme;
#define WIL_NAME "wil6210"
-#define WIL_FW_NAME_DEFAULT "wil6210.fw" /* code Sparrow B0 */
-#define WIL_FW_NAME_SPARROW_PLUS "wil6210_sparrow_plus.fw" /* code Sparrow D0 */
+
+#define WIL_FW_NAME_DEFAULT "wil6210.fw"
+#define WIL_FW_NAME_FTM_DEFAULT "wil6210_ftm.fw"
+
+#define WIL_FW_NAME_SPARROW_PLUS "wil6210_sparrow_plus.fw"
+#define WIL_FW_NAME_FTM_SPARROW_PLUS "wil6210_sparrow_plus_ftm.fw"
+
#define WIL_BOARD_FILE_NAME "wil6210.brd" /* board & radio parameters */
#define WIL_DEFAULT_BUS_REQUEST_KBPS 128000 /* ~1Gbps */
@@ -53,7 +58,8 @@ static inline u32 WIL_GET_BITS(u32 x, int b0, int b1)
return (x >> b0) & ((1 << (b1 - b0 + 1)) - 1);
}
-#define WIL6210_MEM_SIZE (2*1024*1024UL)
+#define WIL6210_MIN_MEM_SIZE (2 * 1024 * 1024UL)
+#define WIL6210_MAX_MEM_SIZE (4 * 1024 * 1024UL)
#define WIL_TX_Q_LEN_DEFAULT (4000)
#define WIL_RX_RING_SIZE_ORDER_DEFAULT (10)
@@ -77,6 +83,15 @@ static inline u32 WIL_GET_BITS(u32 x, int b0, int b1)
*/
#define WIL_MAX_MPDU_OVERHEAD (62)
+struct wil_suspend_stats {
+ unsigned long successful_suspends;
+ unsigned long failed_suspends;
+ unsigned long successful_resumes;
+ unsigned long failed_resumes;
+ unsigned long rejected_by_device;
+ unsigned long rejected_by_host;
+};
+
/* Calculate MAC buffer size for the firmware. It includes all overhead,
* as it will go over the air, and need to be 8 byte aligned
*/
@@ -284,6 +299,8 @@ enum {
#define ISR_MISC_MBOX_EVT BIT_DMA_EP_MISC_ICR_FW_INT(1)
#define ISR_MISC_FW_ERROR BIT_DMA_EP_MISC_ICR_FW_INT(3)
+#define WIL_DATA_COMPLETION_TO_MS 200
+
/* Hardware definitions end */
struct fw_map {
u32 from; /* linker address - from, inclusive */
@@ -412,7 +429,9 @@ enum { /* for wil6210_priv.status */
wil_status_irqen, /* FIXME: interrupts enabled - for debug */
wil_status_napi_en, /* NAPI enabled protected by wil->mutex */
wil_status_resetting, /* reset in progress */
+ wil_status_suspending, /* suspend in progress */
wil_status_suspended, /* suspend completed, device is suspended */
+ wil_status_resuming, /* resume in progress */
wil_status_last /* keep last */
};
@@ -594,6 +613,7 @@ extern u8 led_polarity;
struct wil6210_priv {
struct pci_dev *pdev;
+ u32 bar_size;
struct wireless_dev *wdev;
void __iomem *csr;
DECLARE_BITMAP(status, wil_status_last);
@@ -676,9 +696,12 @@ struct wil6210_priv {
struct wil_blob_wrapper blobs[ARRAY_SIZE(fw_mapping)];
u8 discovery_mode;
u8 abft_len;
+ u8 wakeup_trigger;
+ struct wil_suspend_stats suspend_stats;
void *platform_handle;
struct wil_platform_ops platform_ops;
+ bool keep_radio_on_during_sleep;
struct pmc_ctx pmc;
@@ -701,6 +724,11 @@ struct wil6210_priv {
struct notifier_block pm_notify;
#endif /* CONFIG_PM_SLEEP */
#endif /* CONFIG_PM */
+
+ bool suspend_resp_rcvd;
+ bool suspend_resp_comp;
+ u32 bus_request_kbps;
+ u32 bus_request_kbps_pre_suspend;
};
#define wil_to_wiphy(i) (i->wdev->wiphy)
@@ -949,7 +977,6 @@ void wil6210_unmask_irq_rx(struct wil6210_priv *wil);
int wil_iftype_nl2wmi(enum nl80211_iftype type);
-int wil_ioctl(struct wil6210_priv *wil, void __user *data, int cmd);
int wil_request_firmware(struct wil6210_priv *wil, const char *name,
bool load);
bool wil_fw_verify_file_exists(struct wil6210_priv *wil, const char *name);
@@ -957,6 +984,11 @@ bool wil_fw_verify_file_exists(struct wil6210_priv *wil, const char *name);
int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime);
int wil_suspend(struct wil6210_priv *wil, bool is_runtime);
int wil_resume(struct wil6210_priv *wil, bool is_runtime);
+bool wil_is_wmi_idle(struct wil6210_priv *wil);
+int wmi_resume(struct wil6210_priv *wil);
+int wmi_suspend(struct wil6210_priv *wil);
+bool wil_is_tx_idle(struct wil6210_priv *wil);
+bool wil_is_rx_idle(struct wil6210_priv *wil);
int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size);
void wil_fw_core_dump(struct wil6210_priv *wil);
diff --git a/drivers/net/wireless/ath/wil6210/wil_platform.h b/drivers/net/wireless/ath/wil6210/wil_platform.h
index f8c41172a3f4..5d9e4bfcb045 100644
--- a/drivers/net/wireless/ath/wil6210/wil_platform.h
+++ b/drivers/net/wireless/ath/wil6210/wil_platform.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2016 Qualcomm Atheros, Inc.
+ * Copyright (c) 2014-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -33,10 +33,11 @@ enum wil_platform_event {
*/
struct wil_platform_ops {
int (*bus_request)(void *handle, uint32_t kbps /* KBytes/Sec */);
- int (*suspend)(void *handle);
- int (*resume)(void *handle);
+ int (*suspend)(void *handle, bool keep_device_power);
+ int (*resume)(void *handle, bool device_powered_on);
void (*uninit)(void *handle);
int (*notify)(void *handle, enum wil_platform_event evt);
+ bool (*keep_radio_on_during_sleep)(void *handle);
};
/**
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index 0a5020f31de1..65ef67321fc0 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -37,6 +37,8 @@ module_param(led_id, byte, 0444);
MODULE_PARM_DESC(led_id,
" 60G device led enablement. Set the led ID (0-2) to enable");
+#define WIL_WAIT_FOR_SUSPEND_RESUME_COMP 200
+
/**
* WMI event receiving - theory of operations
*
@@ -157,7 +159,7 @@ void __iomem *wmi_buffer(struct wil6210_priv *wil, __le32 ptr_)
return NULL;
off = HOSTADDR(ptr);
- if (off > WIL6210_MEM_SIZE - 4)
+ if (off > wil->bar_size - 4)
return NULL;
return wil->csr + off;
@@ -177,7 +179,7 @@ void __iomem *wmi_addr(struct wil6210_priv *wil, u32 ptr)
return NULL;
off = HOSTADDR(ptr);
- if (off > WIL6210_MEM_SIZE - 4)
+ if (off > wil->bar_size - 4)
return NULL;
return wil->csr + off;
@@ -233,6 +235,16 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
return -EAGAIN;
}
+ /* Allow sending only suspend / resume commands during susepnd flow */
+ if ((test_bit(wil_status_suspending, wil->status) ||
+ test_bit(wil_status_suspended, wil->status) ||
+ test_bit(wil_status_resuming, wil->status)) &&
+ ((cmdid != WMI_TRAFFIC_SUSPEND_CMDID) &&
+ (cmdid != WMI_TRAFFIC_RESUME_CMDID))) {
+ wil_err(wil, "WMI: reject send_command during suspend\n");
+ return -EINVAL;
+ }
+
if (!head) {
wil_err(wil, "WMI head is garbage: 0x%08x\n", r->head);
return -EINVAL;
@@ -862,6 +874,11 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
return;
}
+ if (test_bit(wil_status_suspended, wil->status)) {
+ wil_err(wil, "suspended. cannot handle WMI event\n");
+ return;
+ }
+
for (n = 0;; n++) {
u16 len;
bool q;
@@ -914,6 +931,15 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
struct wmi_cmd_hdr *wmi = &evt->event.wmi;
u16 id = le16_to_cpu(wmi->command_id);
u32 tstamp = le32_to_cpu(wmi->fw_timestamp);
+ if (test_bit(wil_status_resuming, wil->status)) {
+ if (id == WMI_TRAFFIC_RESUME_EVENTID)
+ clear_bit(wil_status_resuming,
+ wil->status);
+ else
+ wil_err(wil,
+ "WMI evt %d while resuming\n",
+ id);
+ }
spin_lock_irqsave(&wil->wmi_ev_lock, flags);
if (wil->reply_id && wil->reply_id == id) {
if (wil->reply_buf) {
@@ -921,6 +947,11 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
min(len, wil->reply_size));
immed_reply = true;
}
+ if (id == WMI_TRAFFIC_SUSPEND_EVENTID) {
+ wil_dbg_wmi(wil,
+ "set suspend_resp_rcvd\n");
+ wil->suspend_resp_rcvd = true;
+ }
}
spin_unlock_irqrestore(&wil->wmi_ev_lock, flags);
@@ -1762,6 +1793,85 @@ void wmi_event_flush(struct wil6210_priv *wil)
spin_unlock_irqrestore(&wil->wmi_ev_lock, flags);
}
+int wmi_suspend(struct wil6210_priv *wil)
+{
+ int rc;
+ struct wmi_traffic_suspend_cmd cmd = {
+ .wakeup_trigger = wil->wakeup_trigger,
+ };
+ struct {
+ struct wmi_cmd_hdr wmi;
+ struct wmi_traffic_suspend_event evt;
+ } __packed reply;
+ u32 suspend_to = WIL_WAIT_FOR_SUSPEND_RESUME_COMP;
+
+ wil->suspend_resp_rcvd = false;
+ wil->suspend_resp_comp = false;
+
+ reply.evt.status = WMI_TRAFFIC_SUSPEND_REJECTED;
+
+ rc = wmi_call(wil, WMI_TRAFFIC_SUSPEND_CMDID, &cmd, sizeof(cmd),
+ WMI_TRAFFIC_SUSPEND_EVENTID, &reply, sizeof(reply),
+ suspend_to);
+ if (rc) {
+ wil_err(wil, "wmi_call for suspend req failed, rc=%d\n", rc);
+ if (rc == -ETIME)
+ /* wmi_call TO */
+ wil->suspend_stats.rejected_by_device++;
+ else
+ wil->suspend_stats.rejected_by_host++;
+ goto out;
+ }
+
+ wil_dbg_wmi(wil, "waiting for suspend_response_completed\n");
+
+ rc = wait_event_interruptible_timeout(wil->wq,
+ wil->suspend_resp_comp,
+ msecs_to_jiffies(suspend_to));
+ if (rc == 0) {
+ wil_err(wil, "TO waiting for suspend_response_completed\n");
+ if (wil->suspend_resp_rcvd)
+ /* Device responded but we TO due to another reason */
+ wil->suspend_stats.rejected_by_host++;
+ else
+ wil->suspend_stats.rejected_by_device++;
+ rc = -EBUSY;
+ goto out;
+ }
+
+ wil_dbg_wmi(wil, "suspend_response_completed rcvd\n");
+ if (reply.evt.status == WMI_TRAFFIC_SUSPEND_REJECTED) {
+ wil_dbg_pm(wil, "device rejected the suspend\n");
+ wil->suspend_stats.rejected_by_device++;
+ }
+ rc = reply.evt.status;
+
+out:
+ wil->suspend_resp_rcvd = false;
+ wil->suspend_resp_comp = false;
+
+ return rc;
+}
+
+int wmi_resume(struct wil6210_priv *wil)
+{
+ int rc;
+ struct {
+ struct wmi_cmd_hdr wmi;
+ struct wmi_traffic_resume_event evt;
+ } __packed reply;
+
+ reply.evt.status = WMI_TRAFFIC_RESUME_FAILED;
+
+ rc = wmi_call(wil, WMI_TRAFFIC_RESUME_CMDID, NULL, 0,
+ WMI_TRAFFIC_RESUME_EVENTID, &reply, sizeof(reply),
+ WIL_WAIT_FOR_SUSPEND_RESUME_COMP);
+ if (rc)
+ return rc;
+
+ return reply.evt.status;
+}
+
static bool wmi_evt_call_handler(struct wil6210_priv *wil, int id,
void *d, int len)
{
@@ -1851,3 +1961,36 @@ void wmi_event_worker(struct work_struct *work)
}
wil_dbg_wmi(wil, "event_worker: Finished\n");
}
+
+bool wil_is_wmi_idle(struct wil6210_priv *wil)
+{
+ ulong flags;
+ struct wil6210_mbox_ring *r = &wil->mbox_ctl.rx;
+ bool rc = false;
+
+ spin_lock_irqsave(&wil->wmi_ev_lock, flags);
+
+ /* Check if there are pending WMI events in the events queue */
+ if (!list_empty(&wil->pending_wmi_ev)) {
+ wil_dbg_pm(wil, "Pending WMI events in queue\n");
+ goto out;
+ }
+
+ /* Check if there is a pending WMI call */
+ if (wil->reply_id) {
+ wil_dbg_pm(wil, "Pending WMI call\n");
+ goto out;
+ }
+
+ /* Check if there are pending RX events in mbox */
+ r->head = wil_r(wil, RGF_MBOX +
+ offsetof(struct wil6210_mbox_ctl, rx.head));
+ if (r->tail != r->head)
+ wil_dbg_pm(wil, "Pending WMI mbox events\n");
+ else
+ rc = true;
+
+out:
+ spin_unlock_irqrestore(&wil->wmi_ev_lock, flags);
+ return rc;
+}
diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h
index f7f5f4f801e3..256f63c57da0 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.h
+++ b/drivers/net/wireless/ath/wil6210/wmi.h
@@ -59,6 +59,7 @@ enum wmi_fw_capability {
WMI_FW_CAPABILITY_DISABLE_AP_SME = 4,
WMI_FW_CAPABILITY_WMI_ONLY = 5,
WMI_FW_CAPABILITY_THERMAL_THROTTLING = 7,
+ WMI_FW_CAPABILITY_D3_SUSPEND = 8,
WMI_FW_CAPABILITY_MAX,
};
@@ -157,7 +158,7 @@ enum wmi_command_id {
WMI_FLASH_READ_CMDID = 0x902,
WMI_FLASH_WRITE_CMDID = 0x903,
/* Power management */
- WMI_TRAFFIC_DEFERRAL_CMDID = 0x904,
+ WMI_TRAFFIC_SUSPEND_CMDID = 0x904,
WMI_TRAFFIC_RESUME_CMDID = 0x905,
/* P2P */
WMI_P2P_CFG_CMDID = 0x910,
@@ -500,8 +501,14 @@ struct wmi_port_delete_cmd {
u8 reserved[3];
} __packed;
-/* WMI_TRAFFIC_DEFERRAL_CMDID */
-struct wmi_traffic_deferral_cmd {
+/* WMI_TRAFFIC_SUSPEND_CMD wakeup trigger bit mask values */
+enum wmi_wakeup_trigger {
+ WMI_WAKEUP_TRIGGER_UCAST = 0x01,
+ WMI_WAKEUP_TRIGGER_BCAST = 0x02,
+};
+
+/* WMI_TRAFFIC_SUSPEND_CMDID */
+struct wmi_traffic_suspend_cmd {
/* Bit vector: bit[0] - wake on Unicast, bit[1] - wake on Broadcast */
u8 wakeup_trigger;
} __packed;
@@ -1084,7 +1091,7 @@ enum wmi_event_id {
WMI_FLASH_READ_DONE_EVENTID = 0x1902,
WMI_FLASH_WRITE_DONE_EVENTID = 0x1903,
/* Power management */
- WMI_TRAFFIC_DEFERRAL_EVENTID = 0x1904,
+ WMI_TRAFFIC_SUSPEND_EVENTID = 0x1904,
WMI_TRAFFIC_RESUME_EVENTID = 0x1905,
/* P2P */
WMI_P2P_CFG_DONE_EVENTID = 0x1910,
@@ -1926,14 +1933,14 @@ struct wmi_link_maintain_cfg_read_done_event {
struct wmi_link_maintain_cfg lm_cfg;
} __packed;
-enum wmi_traffic_deferral_status {
- WMI_TRAFFIC_DEFERRAL_APPROVED = 0x0,
- WMI_TRAFFIC_DEFERRAL_REJECTED = 0x1,
+enum wmi_traffic_suspend_status {
+ WMI_TRAFFIC_SUSPEND_APPROVED = 0x0,
+ WMI_TRAFFIC_SUSPEND_REJECTED = 0x1,
};
-/* WMI_TRAFFIC_DEFERRAL_EVENTID */
-struct wmi_traffic_deferral_event {
- /* enum wmi_traffic_deferral_status_e */
+/* WMI_TRAFFIC_SUSPEND_EVENTID */
+struct wmi_traffic_suspend_event {
+ /* enum wmi_traffic_suspend_status_e */
u8 status;
} __packed;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
index 9b970dc2b922..984c1d0560b1 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
@@ -108,12 +108,14 @@ int brcmf_sdiod_intr_register(struct brcmf_sdio_dev *sdiodev)
int ret = 0;
u8 data;
u32 addr, gpiocontrol;
- unsigned long flags;
pdata = &sdiodev->settings->bus.sdio;
if (pdata->oob_irq_supported) {
brcmf_dbg(SDIO, "Enter, register OOB IRQ %d\n",
pdata->oob_irq_nr);
+ spin_lock_init(&sdiodev->irq_en_lock);
+ sdiodev->irq_en = true;
+
ret = request_irq(pdata->oob_irq_nr, brcmf_sdiod_oob_irqhandler,
pdata->oob_irq_flags, "brcmf_oob_intr",
&sdiodev->func[1]->dev);
@@ -122,10 +124,6 @@ int brcmf_sdiod_intr_register(struct brcmf_sdio_dev *sdiodev)
return ret;
}
sdiodev->oob_irq_requested = true;
- spin_lock_init(&sdiodev->irq_en_lock);
- spin_lock_irqsave(&sdiodev->irq_en_lock, flags);
- sdiodev->irq_en = true;
- spin_unlock_irqrestore(&sdiodev->irq_en_lock, flags);
ret = enable_irq_wake(pdata->oob_irq_nr);
if (ret != 0) {
@@ -706,7 +704,7 @@ done:
int brcmf_sdiod_recv_chain(struct brcmf_sdio_dev *sdiodev,
struct sk_buff_head *pktq, uint totlen)
{
- struct sk_buff *glom_skb;
+ struct sk_buff *glom_skb = NULL;
struct sk_buff *skb;
u32 addr = sdiodev->sbwad;
int err = 0;
@@ -727,10 +725,8 @@ int brcmf_sdiod_recv_chain(struct brcmf_sdio_dev *sdiodev,
return -ENOMEM;
err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, false, addr,
glom_skb);
- if (err) {
- brcmu_pkt_buf_free_skb(glom_skb);
+ if (err)
goto done;
- }
skb_queue_walk(pktq, skb) {
memcpy(skb->data, glom_skb->data, skb->len);
@@ -741,6 +737,7 @@ int brcmf_sdiod_recv_chain(struct brcmf_sdio_dev *sdiodev,
pktq);
done:
+ brcmu_pkt_buf_free_skb(glom_skb);
return err;
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
index b55c3293c4b4..163ddc49f951 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
@@ -113,6 +113,17 @@ struct brcmf_bus_msgbuf {
/**
+ * struct brcmf_bus_stats - bus statistic counters.
+ *
+ * @pktcowed: packets cowed for extra headroom/unorphan.
+ * @pktcow_failed: packets dropped due to failed cow-ing.
+ */
+struct brcmf_bus_stats {
+ atomic_t pktcowed;
+ atomic_t pktcow_failed;
+};
+
+/**
* struct brcmf_bus - interface structure between common and bus layer
*
* @bus_priv: pointer to private bus device.
@@ -120,11 +131,10 @@ struct brcmf_bus_msgbuf {
* @dev: device pointer of bus device.
* @drvr: public driver information.
* @state: operational state of the bus interface.
+ * @stats: statistics shared between common and bus layer.
* @maxctl: maximum size for rxctl request message.
- * @tx_realloc: number of tx packets realloced for headroom.
- * @dstats: dongle-based statistical data.
- * @dcmd_list: bus/device specific dongle initialization commands.
* @chip: device identifier of the dongle chip.
+ * @always_use_fws_queue: bus wants use queue also when fwsignal is inactive.
* @wowl_supported: is wowl supported by bus driver.
* @chiprev: revision of the dongle chip.
*/
@@ -138,8 +148,8 @@ struct brcmf_bus {
struct device *dev;
struct brcmf_pub *drvr;
enum brcmf_bus_state state;
+ struct brcmf_bus_stats stats;
uint maxctl;
- unsigned long tx_realloc;
u32 chip;
u32 chiprev;
bool always_use_fws_queue;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index b8f042146fc9..898c7b024c15 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -625,6 +625,7 @@ struct wireless_dev *brcmf_ap_add_vif(struct wiphy *wiphy, const char *name,
err = brcmf_net_attach(ifp, true);
if (err) {
brcmf_err("Registering netdevice failed\n");
+ free_netdev(ifp->ndev);
goto fail;
}
@@ -719,6 +720,8 @@ s32 brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg,
{
struct brcmf_scan_params_le params_le;
struct cfg80211_scan_request *scan_request;
+ u64 reqid;
+ u32 bucket;
s32 err = 0;
brcmf_dbg(SCAN, "Enter\n");
@@ -749,7 +752,7 @@ s32 brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg,
err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SCAN,
&params_le, sizeof(params_le));
if (err)
- brcmf_err("Scan abort failed\n");
+ brcmf_err("Scan abort failed\n");
}
brcmf_scan_config_mpc(ifp, 1);
@@ -758,11 +761,21 @@ s32 brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg,
* e-scan can be initiated internally
* which takes precedence.
*/
- if (cfg->internal_escan) {
- brcmf_dbg(SCAN, "scheduled scan completed\n");
- cfg->internal_escan = false;
- if (!aborted)
- cfg80211_sched_scan_results(cfg_to_wiphy(cfg), 0);
+ if (cfg->int_escan_map) {
+ brcmf_dbg(SCAN, "scheduled scan completed (%x)\n",
+ cfg->int_escan_map);
+ while (cfg->int_escan_map) {
+ bucket = __ffs(cfg->int_escan_map);
+ cfg->int_escan_map &= ~BIT(bucket);
+ reqid = brcmf_pno_find_reqid_by_bucket(cfg->pno,
+ bucket);
+ if (!aborted) {
+ brcmf_dbg(SCAN, "report results: reqid=%llu\n",
+ reqid);
+ cfg80211_sched_scan_results(cfg_to_wiphy(cfg),
+ reqid);
+ }
+ }
} else if (scan_request) {
struct cfg80211_scan_info info = {
.aborted = aborted,
@@ -1011,7 +1024,7 @@ static void brcmf_escan_prep(struct brcmf_cfg80211_info *cfg,
if (!ssid_le.SSID_len)
brcmf_dbg(SCAN, "%d: Broadcast scan\n", i);
else
- brcmf_dbg(SCAN, "%d: scan for %s size =%d\n",
+ brcmf_dbg(SCAN, "%d: scan for %.32s size=%d\n",
i, ssid_le.SSID, ssid_le.SSID_len);
memcpy(ptr, &ssid_le, sizeof(ssid_le));
ptr += sizeof(ssid_le);
@@ -3011,7 +3024,7 @@ void brcmf_abort_scanning(struct brcmf_cfg80211_info *cfg)
struct escan_info *escan = &cfg->escan_info;
set_bit(BRCMF_SCAN_STATUS_ABORT, &cfg->scan_status);
- if (cfg->internal_escan || cfg->scan_request) {
+ if (cfg->int_escan_map || cfg->scan_request) {
escan->escan_state = WL_ESCAN_STATE_IDLE;
brcmf_notify_escan_complete(cfg, escan->ifp, true, true);
}
@@ -3034,7 +3047,7 @@ static void brcmf_escan_timeout(unsigned long data)
struct brcmf_cfg80211_info *cfg =
(struct brcmf_cfg80211_info *)data;
- if (cfg->internal_escan || cfg->scan_request) {
+ if (cfg->int_escan_map || cfg->scan_request) {
brcmf_err("timer expired\n");
schedule_work(&cfg->escan_timeout_work);
}
@@ -3120,7 +3133,7 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp,
if (brcmf_p2p_scan_finding_common_channel(cfg, bss_info_le))
goto exit;
- if (!cfg->internal_escan && !cfg->scan_request) {
+ if (!cfg->int_escan_map && !cfg->scan_request) {
brcmf_dbg(SCAN, "result without cfg80211 request\n");
goto exit;
}
@@ -3166,7 +3179,7 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp,
cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
if (brcmf_p2p_scan_finding_common_channel(cfg, NULL))
goto exit;
- if (cfg->internal_escan || cfg->scan_request) {
+ if (cfg->int_escan_map || cfg->scan_request) {
brcmf_inform_bss(cfg);
aborted = status != BRCMF_E_STATUS_SUCCESS;
brcmf_notify_escan_complete(cfg, ifp, aborted, false);
@@ -3248,17 +3261,21 @@ static int brcmf_internal_escan_add_info(struct cfg80211_scan_request *req,
return 0;
}
-static int brcmf_start_internal_escan(struct brcmf_if *ifp,
+static int brcmf_start_internal_escan(struct brcmf_if *ifp, u32 fwmap,
struct cfg80211_scan_request *request)
{
struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
int err;
if (test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status)) {
+ if (cfg->int_escan_map)
+ brcmf_dbg(SCAN, "aborting internal scan: map=%u\n",
+ cfg->int_escan_map);
/* Abort any on-going scan */
brcmf_abort_scanning(cfg);
}
+ brcmf_dbg(SCAN, "start internal scan: map=%u\n", fwmap);
set_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status);
cfg->escan_info.run = brcmf_run_escan;
err = brcmf_do_escan(ifp, request);
@@ -3266,7 +3283,7 @@ static int brcmf_start_internal_escan(struct brcmf_if *ifp,
clear_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status);
return err;
}
- cfg->internal_escan = true;
+ cfg->int_escan_map = fwmap;
return 0;
}
@@ -3308,6 +3325,7 @@ brcmf_notify_sched_scan_results(struct brcmf_if *ifp,
struct wiphy *wiphy = cfg_to_wiphy(cfg);
int i, err = 0;
struct brcmf_pno_scanresults_le *pfn_result;
+ u32 bucket_map;
u32 result_count;
u32 status;
u32 datalen;
@@ -3352,6 +3370,7 @@ brcmf_notify_sched_scan_results(struct brcmf_if *ifp,
goto out_err;
}
+ bucket_map = 0;
for (i = 0; i < result_count; i++) {
netinfo = &netinfo_start[i];
@@ -3359,6 +3378,7 @@ brcmf_notify_sched_scan_results(struct brcmf_if *ifp,
netinfo->SSID_len = IEEE80211_MAX_SSID_LEN;
brcmf_dbg(SCAN, "SSID:%.32s Channel:%d\n",
netinfo->SSID, netinfo->channel);
+ bucket_map |= brcmf_pno_get_bucket_map(cfg->pno, netinfo);
err = brcmf_internal_escan_add_info(request,
netinfo->SSID,
netinfo->SSID_len,
@@ -3367,7 +3387,10 @@ brcmf_notify_sched_scan_results(struct brcmf_if *ifp,
goto out_err;
}
- err = brcmf_start_internal_escan(ifp, request);
+ if (!bucket_map)
+ goto free_req;
+
+ err = brcmf_start_internal_escan(ifp, bucket_map, request);
if (!err)
goto free_req;
@@ -3386,11 +3409,11 @@ brcmf_cfg80211_sched_scan_start(struct wiphy *wiphy,
struct brcmf_if *ifp = netdev_priv(ndev);
struct brcmf_cfg80211_info *cfg = wiphy_priv(wiphy);
- brcmf_dbg(SCAN, "Enter n_match_sets:%d n_ssids:%d\n",
+ brcmf_dbg(SCAN, "Enter: n_match_sets=%d n_ssids=%d\n",
req->n_match_sets, req->n_ssids);
if (test_bit(BRCMF_SCAN_STATUS_SUPPRESS, &cfg->scan_status)) {
- brcmf_err("Scanning suppressed: status (%lu)\n",
+ brcmf_err("Scanning suppressed: status=%lu\n",
cfg->scan_status);
return -EAGAIN;
}
@@ -3411,8 +3434,8 @@ static int brcmf_cfg80211_sched_scan_stop(struct wiphy *wiphy,
struct brcmf_if *ifp = netdev_priv(ndev);
brcmf_dbg(SCAN, "enter\n");
- brcmf_pno_clean(ifp);
- if (cfg->internal_escan)
+ brcmf_pno_stop_sched_scan(ifp, reqid);
+ if (cfg->int_escan_map)
brcmf_notify_escan_complete(cfg, ifp, true, true);
return 0;
}
@@ -6755,7 +6778,7 @@ static void brcmf_cfg80211_reg_notifier(struct wiphy *wiphy,
/* ignore non-ISO3166 country codes */
for (i = 0; i < sizeof(req->alpha2); i++)
if (req->alpha2[i] < 'A' || req->alpha2[i] > 'Z') {
- brcmf_err("not a ISO3166 code (0x%02x 0x%02x)\n",
+ brcmf_err("not an ISO3166 code (0x%02x 0x%02x)\n",
req->alpha2[0], req->alpha2[1]);
return;
}
@@ -6839,7 +6862,7 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
wiphy = wiphy_new(ops, sizeof(struct brcmf_cfg80211_info));
if (!wiphy) {
brcmf_err("Could not allocate wiphy device\n");
- return NULL;
+ goto ops_out;
}
memcpy(wiphy->perm_addr, drvr->mac, ETH_ALEN);
set_wiphy_dev(wiphy, busdev);
@@ -6940,6 +6963,13 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
brcmf_p2p_detach(&cfg->p2p);
goto wiphy_unreg_out;
}
+ err = brcmf_pno_attach(cfg);
+ if (err) {
+ brcmf_err("PNO initialisation failed (%d)\n", err);
+ brcmf_btcoex_detach(cfg);
+ brcmf_p2p_detach(&cfg->p2p);
+ goto wiphy_unreg_out;
+ }
if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_TDLS)) {
err = brcmf_fil_iovar_int_set(ifp, "tdls_enable", 1);
@@ -6972,6 +7002,7 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
return cfg;
detach:
+ brcmf_pno_detach(cfg);
brcmf_btcoex_detach(cfg);
brcmf_p2p_detach(&cfg->p2p);
wiphy_unreg_out:
@@ -6982,6 +7013,7 @@ priv_out:
ifp->vif = NULL;
wiphy_out:
brcmf_free_wiphy(wiphy);
+ops_out:
kfree(ops);
return NULL;
}
@@ -6991,6 +7023,7 @@ void brcmf_cfg80211_detach(struct brcmf_cfg80211_info *cfg)
if (!cfg)
return;
+ brcmf_pno_detach(cfg);
brcmf_btcoex_detach(cfg);
wiphy_unregister(cfg->wiphy);
kfree(cfg->ops);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
index a1c2e0af1774..12ff15446c6c 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
@@ -273,7 +273,7 @@ struct brcmf_cfg80211_wowl {
* @pub: common driver information.
* @channel: current channel.
* @active_scan: current scan mode.
- * @internal_escan: indicates internally initiated e-scan is running.
+ * @int_escan_map: bucket map for which internal e-scan is done.
* @ibss_starter: indicates this sta is ibss starter.
* @pwr_save: indicate whether dongle to support power save mode.
* @dongle_up: indicate whether dongle up or not.
@@ -289,6 +289,7 @@ struct brcmf_cfg80211_wowl {
* @vif_cnt: number of vif instances.
* @vif_event: vif event signalling.
* @wowl: wowl related information.
+ * @pno: information of pno module.
*/
struct brcmf_cfg80211_info {
struct wiphy *wiphy;
@@ -305,7 +306,7 @@ struct brcmf_cfg80211_info {
struct brcmf_pub *pub;
u32 channel;
bool active_scan;
- bool internal_escan;
+ u32 int_escan_map;
bool ibss_starter;
bool pwr_save;
bool dongle_up;
@@ -322,6 +323,7 @@ struct brcmf_cfg80211_info {
struct brcmu_d11inf d11inf;
struct brcmf_assoclist_le assoclist;
struct brcmf_cfg80211_wowl wowl;
+ struct brcmf_pno_info *pno;
};
/**
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index 511d190c6cca..2153e8062b4c 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -30,6 +30,7 @@
#include "debug.h"
#include "fwil_types.h"
#include "p2p.h"
+#include "pno.h"
#include "cfg80211.h"
#include "fwil.h"
#include "feature.h"
@@ -198,6 +199,7 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb,
struct brcmf_if *ifp = netdev_priv(ndev);
struct brcmf_pub *drvr = ifp->drvr;
struct ethhdr *eh;
+ int head_delta;
brcmf_dbg(DATA, "Enter, bsscfgidx=%d\n", ifp->bsscfgidx);
@@ -210,13 +212,21 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb,
goto done;
}
- /* Make sure there's enough writable headroom*/
- ret = skb_cow_head(skb, drvr->hdrlen);
- if (ret < 0) {
- brcmf_err("%s: skb_cow_head failed\n",
- brcmf_ifname(ifp));
- dev_kfree_skb(skb);
- goto done;
+ /* Make sure there's enough writeable headroom */
+ if (skb_headroom(skb) < drvr->hdrlen || skb_header_cloned(skb)) {
+ head_delta = drvr->hdrlen - skb_headroom(skb);
+
+ brcmf_dbg(INFO, "%s: insufficient headroom (%d)\n",
+ brcmf_ifname(ifp), head_delta);
+ atomic_inc(&drvr->bus_if->stats.pktcowed);
+ ret = pskb_expand_head(skb, ALIGN(head_delta, NET_SKB_PAD), 0,
+ GFP_ATOMIC);
+ if (ret < 0) {
+ brcmf_err("%s: failed to expand headroom\n",
+ brcmf_ifname(ifp));
+ atomic_inc(&drvr->bus_if->stats.pktcow_failed);
+ goto done;
+ }
}
/* validate length for ether packet */
@@ -484,13 +494,13 @@ int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked)
goto fail;
}
+ ndev->priv_destructor = brcmf_cfg80211_free_netdev;
brcmf_dbg(INFO, "%s: Broadcom Dongle Host Driver\n", ndev->name);
return 0;
fail:
drvr->iflist[ifp->bsscfgidx] = NULL;
ndev->netdev_ops = NULL;
- free_netdev(ndev);
return -EBADE;
}
@@ -503,6 +513,7 @@ static void brcmf_net_detach(struct net_device *ndev, bool rtnl_locked)
unregister_netdev(ndev);
} else {
brcmf_cfg80211_free_netdev(ndev);
+ free_netdev(ndev);
}
}
@@ -579,7 +590,6 @@ static int brcmf_net_p2p_attach(struct brcmf_if *ifp)
fail:
ifp->drvr->iflist[ifp->bsscfgidx] = NULL;
ndev->netdev_ops = NULL;
- free_netdev(ndev);
return -EBADE;
}
@@ -625,7 +635,6 @@ struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bsscfgidx, s32 ifidx,
return ERR_PTR(-ENOMEM);
ndev->needs_free_netdev = true;
- ndev->priv_destructor = brcmf_cfg80211_free_netdev;
ifp = netdev_priv(ndev);
ifp->ndev = ndev;
/* store mapping ifidx to bsscfgidx */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h
index fe264a5798f1..35919d9e8e13 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h
@@ -78,6 +78,7 @@ do { \
#define BRCMF_EVENT_ON() (brcmf_msg_level & BRCMF_EVENT_VAL)
#define BRCMF_FIL_ON() (brcmf_msg_level & BRCMF_FIL_VAL)
#define BRCMF_FWCON_ON() (brcmf_msg_level & BRCMF_FWCON_VAL)
+#define BRCMF_SCAN_ON() (brcmf_msg_level & BRCMF_SCAN_VAL)
#else /* defined(DEBUG) || defined(CONFIG_BRCM_TRACING) */
@@ -96,6 +97,7 @@ do { \
#define BRCMF_EVENT_ON() 0
#define BRCMF_FIL_ON() 0
#define BRCMF_FWCON_ON() 0
+#define BRCMF_SCAN_ON() 0
#endif /* defined(DEBUG) || defined(CONFIG_BRCM_TRACING) */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
index c7c1e9906500..d231042f19d6 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
@@ -442,7 +442,7 @@ struct brcmf_fw {
const char *nvram_name;
u16 domain_nr;
u16 bus_nr;
- void (*done)(struct device *dev, const struct firmware *fw,
+ void (*done)(struct device *dev, int err, const struct firmware *fw,
void *nvram_image, u32 nvram_len);
};
@@ -477,52 +477,51 @@ static void brcmf_fw_request_nvram_done(const struct firmware *fw, void *ctx)
if (!nvram && !(fwctx->flags & BRCMF_FW_REQ_NV_OPTIONAL))
goto fail;
- fwctx->done(fwctx->dev, fwctx->code, nvram, nvram_length);
+ fwctx->done(fwctx->dev, 0, fwctx->code, nvram, nvram_length);
kfree(fwctx);
return;
fail:
brcmf_dbg(TRACE, "failed: dev=%s\n", dev_name(fwctx->dev));
release_firmware(fwctx->code);
- device_release_driver(fwctx->dev);
+ fwctx->done(fwctx->dev, -ENOENT, NULL, NULL, 0);
kfree(fwctx);
}
static void brcmf_fw_request_code_done(const struct firmware *fw, void *ctx)
{
struct brcmf_fw *fwctx = ctx;
- int ret;
+ int ret = 0;
brcmf_dbg(TRACE, "enter: dev=%s\n", dev_name(fwctx->dev));
- if (!fw)
+ if (!fw) {
+ ret = -ENOENT;
goto fail;
-
- /* only requested code so done here */
- if (!(fwctx->flags & BRCMF_FW_REQUEST_NVRAM)) {
- fwctx->done(fwctx->dev, fw, NULL, 0);
- kfree(fwctx);
- return;
}
+ /* only requested code so done here */
+ if (!(fwctx->flags & BRCMF_FW_REQUEST_NVRAM))
+ goto done;
+
fwctx->code = fw;
ret = request_firmware_nowait(THIS_MODULE, true, fwctx->nvram_name,
fwctx->dev, GFP_KERNEL, fwctx,
brcmf_fw_request_nvram_done);
- if (!ret)
- return;
-
- brcmf_fw_request_nvram_done(NULL, fwctx);
+ /* pass NULL to nvram callback for bcm47xx fallback */
+ if (ret)
+ brcmf_fw_request_nvram_done(NULL, fwctx);
return;
fail:
brcmf_dbg(TRACE, "failed: dev=%s\n", dev_name(fwctx->dev));
- device_release_driver(fwctx->dev);
+done:
+ fwctx->done(fwctx->dev, ret, fw, NULL, 0);
kfree(fwctx);
}
int brcmf_fw_get_firmwares_pcie(struct device *dev, u16 flags,
const char *code, const char *nvram,
- void (*fw_cb)(struct device *dev,
+ void (*fw_cb)(struct device *dev, int err,
const struct firmware *fw,
void *nvram_image, u32 nvram_len),
u16 domain_nr, u16 bus_nr)
@@ -555,7 +554,7 @@ int brcmf_fw_get_firmwares_pcie(struct device *dev, u16 flags,
int brcmf_fw_get_firmwares(struct device *dev, u16 flags,
const char *code, const char *nvram,
- void (*fw_cb)(struct device *dev,
+ void (*fw_cb)(struct device *dev, int err,
const struct firmware *fw,
void *nvram_image, u32 nvram_len))
{
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h
index d3c9f0d52ae3..8fa4b7e1ab3d 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h
@@ -73,13 +73,13 @@ void brcmf_fw_nvram_free(void *nvram);
*/
int brcmf_fw_get_firmwares_pcie(struct device *dev, u16 flags,
const char *code, const char *nvram,
- void (*fw_cb)(struct device *dev,
+ void (*fw_cb)(struct device *dev, int err,
const struct firmware *fw,
void *nvram_image, u32 nvram_len),
u16 domain_nr, u16 bus_nr);
int brcmf_fw_get_firmwares(struct device *dev, u16 flags,
const char *code, const char *nvram,
- void (*fw_cb)(struct device *dev,
+ void (*fw_cb)(struct device *dev, int err,
const struct firmware *fw,
void *nvram_image, u32 nvram_len));
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
index 8c18fad3edc9..b857d539b9ac 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
@@ -806,6 +806,17 @@ struct brcmf_pno_macaddr_le {
};
/**
+ * struct brcmf_pno_bssid_le - bssid configuration for PNO scan.
+ *
+ * @bssid: BSS network identifier.
+ * @flags: flags for this BSSID.
+ */
+struct brcmf_pno_bssid_le {
+ u8 bssid[ETH_ALEN];
+ __le16 flags;
+};
+
+/**
* struct brcmf_pktcnt_le - packet counters.
*
* @rx_good_pkt: packets (MSDUs & MMPDUs) received from this station
@@ -835,15 +846,18 @@ struct brcmf_gtk_keyinfo_le {
u8 replay_counter[BRCMF_RSN_REPLAY_LEN];
};
+#define BRCMF_PNO_REPORT_NO_BATCH BIT(2)
+
/**
* struct brcmf_gscan_bucket_config - configuration data for channel bucket.
*
- * @bucket_end_index: !unknown!
- * @bucket_freq_multiple: !unknown!
- * @flag: !unknown!
- * @reserved: !unknown!
- * @repeat: !unknown!
- * @max_freq_multiple: !unknown!
+ * @bucket_end_index: last channel index in @channel_list in
+ * @struct brcmf_pno_config_le.
+ * @bucket_freq_multiple: scan interval expressed in N * @scan_freq.
+ * @flag: channel bucket report flags.
+ * @reserved: for future use.
+ * @repeat: number of scan at interval for exponential scan.
+ * @max_freq_multiple: maximum scan interval for exponential scan.
*/
struct brcmf_gscan_bucket_config {
u8 bucket_end_index;
@@ -855,16 +869,19 @@ struct brcmf_gscan_bucket_config {
};
/* version supported which must match firmware */
-#define BRCMF_GSCAN_CFG_VERSION 1
+#define BRCMF_GSCAN_CFG_VERSION 2
/**
* enum brcmf_gscan_cfg_flags - bit values for gscan flags.
*
* @BRCMF_GSCAN_CFG_FLAGS_ALL_RESULTS: send probe responses/beacons to host.
+ * @BRCMF_GSCAN_CFG_ALL_BUCKETS_IN_1ST_SCAN: all buckets will be included in
+ * first scan cycle.
* @BRCMF_GSCAN_CFG_FLAGS_CHANGE_ONLY: indicated only flags member is changed.
*/
enum brcmf_gscan_cfg_flags {
BRCMF_GSCAN_CFG_FLAGS_ALL_RESULTS = BIT(0),
+ BRCMF_GSCAN_CFG_ALL_BUCKETS_IN_1ST_SCAN = BIT(3),
BRCMF_GSCAN_CFG_FLAGS_CHANGE_ONLY = BIT(7),
};
@@ -884,12 +901,12 @@ enum brcmf_gscan_cfg_flags {
*/
struct brcmf_gscan_config {
__le16 version;
- u8 flags;
- u8 buffer_threshold;
- u8 swc_nbssid_threshold;
- u8 swc_rssi_window_size;
- u8 count_of_channel_buckets;
- u8 retry_threshold;
+ u8 flags;
+ u8 buffer_threshold;
+ u8 swc_nbssid_threshold;
+ u8 swc_rssi_window_size;
+ u8 count_of_channel_buckets;
+ u8 retry_threshold;
__le16 lost_ap_window;
struct brcmf_gscan_bucket_config bucket[1];
};
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
index 72373e59308e..f59642b2c935 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
@@ -2145,7 +2145,7 @@ void brcmf_fws_add_interface(struct brcmf_if *ifp)
struct brcmf_fws_info *fws = drvr_to_fws(ifp->drvr);
struct brcmf_fws_mac_descriptor *entry;
- if (!ifp->ndev || fws->fcmode == BRCMF_FWS_FCMODE_NONE)
+ if (!ifp->ndev || !brcmf_fws_queue_skbs(fws))
return;
entry = &fws->desc.iface[ifp->ifidx];
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
index aa299c47bfa2..2ce675ab40ef 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
@@ -2208,6 +2208,7 @@ struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name,
err = brcmf_net_attach(ifp, true);
if (err) {
brcmf_err("Registering netdevice failed\n");
+ free_netdev(ifp->ndev);
goto fail;
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
index f36b96dc6acd..f878706613e6 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
@@ -1650,16 +1650,23 @@ static const struct brcmf_buscore_ops brcmf_pcie_buscore_ops = {
.write32 = brcmf_pcie_buscore_write32,
};
-static void brcmf_pcie_setup(struct device *dev, const struct firmware *fw,
+static void brcmf_pcie_setup(struct device *dev, int ret,
+ const struct firmware *fw,
void *nvram, u32 nvram_len)
{
- struct brcmf_bus *bus = dev_get_drvdata(dev);
- struct brcmf_pciedev *pcie_bus_dev = bus->bus_priv.pcie;
- struct brcmf_pciedev_info *devinfo = pcie_bus_dev->devinfo;
+ struct brcmf_bus *bus;
+ struct brcmf_pciedev *pcie_bus_dev;
+ struct brcmf_pciedev_info *devinfo;
struct brcmf_commonring **flowrings;
- int ret;
u32 i;
+ /* check firmware loading result */
+ if (ret)
+ goto fail;
+
+ bus = dev_get_drvdata(dev);
+ pcie_bus_dev = bus->bus_priv.pcie;
+ devinfo = pcie_bus_dev->devinfo;
brcmf_pcie_attach(devinfo);
/* Some of the firmwares have the size of the memory of the device
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c
index a473445ed7c4..ffa243e2e2d0 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c
@@ -14,6 +14,7 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/netdevice.h>
+#include <linux/gcd.h>
#include <net/cfg80211.h>
#include "core.h"
@@ -35,6 +36,67 @@
#define BRCMF_PNO_HIDDEN_BIT 2
#define BRCMF_PNO_SCHED_SCAN_PERIOD 30
+#define BRCMF_PNO_MAX_BUCKETS 16
+#define GSCAN_BATCH_NO_THR_SET 101
+#define GSCAN_RETRY_THRESHOLD 3
+
+struct brcmf_pno_info {
+ int n_reqs;
+ struct cfg80211_sched_scan_request *reqs[BRCMF_PNO_MAX_BUCKETS];
+ struct mutex req_lock;
+};
+
+#define ifp_to_pno(_ifp) ((_ifp)->drvr->config->pno)
+
+static int brcmf_pno_store_request(struct brcmf_pno_info *pi,
+ struct cfg80211_sched_scan_request *req)
+{
+ if (WARN(pi->n_reqs == BRCMF_PNO_MAX_BUCKETS,
+ "pno request storage full\n"))
+ return -ENOSPC;
+
+ brcmf_dbg(SCAN, "reqid=%llu\n", req->reqid);
+ mutex_lock(&pi->req_lock);
+ pi->reqs[pi->n_reqs++] = req;
+ mutex_unlock(&pi->req_lock);
+ return 0;
+}
+
+static int brcmf_pno_remove_request(struct brcmf_pno_info *pi, u64 reqid)
+{
+ int i, err = 0;
+
+ mutex_lock(&pi->req_lock);
+
+ /* find request */
+ for (i = 0; i < pi->n_reqs; i++) {
+ if (pi->reqs[i]->reqid == reqid)
+ break;
+ }
+ /* request not found */
+ if (WARN(i == pi->n_reqs, "reqid not found\n")) {
+ err = -ENOENT;
+ goto done;
+ }
+
+ brcmf_dbg(SCAN, "reqid=%llu\n", reqid);
+ pi->n_reqs--;
+
+ /* if last we are done */
+ if (!pi->n_reqs || i == pi->n_reqs)
+ goto done;
+
+ /* fill the gap with remaining requests */
+ while (i <= pi->n_reqs - 1) {
+ pi->reqs[i] = pi->reqs[i + 1];
+ i++;
+ }
+
+done:
+ mutex_unlock(&pi->req_lock);
+ return err;
+}
+
static int brcmf_pno_channel_config(struct brcmf_if *ifp,
struct brcmf_pno_config_le *cfg)
{
@@ -57,16 +119,11 @@ static int brcmf_pno_config(struct brcmf_if *ifp, u32 scan_freq,
/* set extra pno params */
flags = BIT(BRCMF_PNO_IMMEDIATE_SCAN_BIT) |
- BIT(BRCMF_PNO_REPORT_SEPARATELY_BIT) |
BIT(BRCMF_PNO_ENABLE_ADAPTSCAN_BIT);
pfn_param.repeat = BRCMF_PNO_REPEAT;
pfn_param.exp = BRCMF_PNO_FREQ_EXPO_MAX;
/* set up pno scan fr */
- if (scan_freq < BRCMF_PNO_SCHED_SCAN_MIN_PERIOD) {
- brcmf_dbg(SCAN, "scan period too small, using minimum\n");
- scan_freq = BRCMF_PNO_SCHED_SCAN_MIN_PERIOD;
- }
pfn_param.scan_freq = cpu_to_le32(scan_freq);
if (mscan) {
@@ -101,12 +158,24 @@ exit:
return err;
}
-static int brcmf_pno_set_random(struct brcmf_if *ifp, u8 *mac_addr,
- u8 *mac_mask)
+static int brcmf_pno_set_random(struct brcmf_if *ifp, struct brcmf_pno_info *pi)
{
struct brcmf_pno_macaddr_le pfn_mac;
+ u8 *mac_addr = NULL;
+ u8 *mac_mask = NULL;
int err, i;
+ for (i = 0; i < pi->n_reqs; i++)
+ if (pi->reqs[i]->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
+ mac_addr = pi->reqs[i]->mac_addr;
+ mac_mask = pi->reqs[i]->mac_addr_mask;
+ break;
+ }
+
+ /* no random mac requested */
+ if (!mac_addr)
+ return 0;
+
pfn_mac.version = BRCMF_PFN_MACADDR_CFG_VER;
pfn_mac.flags = BRCMF_PFN_MAC_OUI_ONLY | BRCMF_PFN_SET_MAC_UNASSOC;
@@ -120,6 +189,8 @@ static int brcmf_pno_set_random(struct brcmf_if *ifp, u8 *mac_addr,
/* Set locally administered */
pfn_mac.mac[0] |= 0x02;
+ brcmf_dbg(SCAN, "enabling random mac: reqid=%llu mac=%pM\n",
+ pi->reqs[i]->reqid, pfn_mac.mac);
err = brcmf_fil_iovar_data_set(ifp, "pfn_macaddr", &pfn_mac,
sizeof(pfn_mac));
if (err)
@@ -132,6 +203,7 @@ static int brcmf_pno_add_ssid(struct brcmf_if *ifp, struct cfg80211_ssid *ssid,
bool active)
{
struct brcmf_pno_net_param_le pfn;
+ int err;
pfn.auth = cpu_to_le32(WLAN_AUTH_OPEN);
pfn.wpa_auth = cpu_to_le32(BRCMF_PNO_WPA_AUTH_ANY);
@@ -142,7 +214,28 @@ static int brcmf_pno_add_ssid(struct brcmf_if *ifp, struct cfg80211_ssid *ssid,
pfn.flags = cpu_to_le32(1 << BRCMF_PNO_HIDDEN_BIT);
pfn.ssid.SSID_len = cpu_to_le32(ssid->ssid_len);
memcpy(pfn.ssid.SSID, ssid->ssid, ssid->ssid_len);
- return brcmf_fil_iovar_data_set(ifp, "pfn_add", &pfn, sizeof(pfn));
+
+ brcmf_dbg(SCAN, "adding ssid=%.32s (active=%d)\n", ssid->ssid, active);
+ err = brcmf_fil_iovar_data_set(ifp, "pfn_add", &pfn, sizeof(pfn));
+ if (err < 0)
+ brcmf_err("adding failed: err=%d\n", err);
+ return err;
+}
+
+static int brcmf_pno_add_bssid(struct brcmf_if *ifp, const u8 *bssid)
+{
+ struct brcmf_pno_bssid_le bssid_cfg;
+ int err;
+
+ memcpy(bssid_cfg.bssid, bssid, ETH_ALEN);
+ bssid_cfg.flags = 0;
+
+ brcmf_dbg(SCAN, "adding bssid=%pM\n", bssid);
+ err = brcmf_fil_iovar_data_set(ifp, "pfn_add_bssid", &bssid_cfg,
+ sizeof(bssid_cfg));
+ if (err < 0)
+ brcmf_err("adding failed: err=%d\n", err);
+ return err;
}
static bool brcmf_is_ssid_active(struct cfg80211_ssid *ssid,
@@ -163,7 +256,7 @@ static bool brcmf_is_ssid_active(struct cfg80211_ssid *ssid,
return false;
}
-int brcmf_pno_clean(struct brcmf_if *ifp)
+static int brcmf_pno_clean(struct brcmf_if *ifp)
{
int ret;
@@ -179,73 +272,320 @@ int brcmf_pno_clean(struct brcmf_if *ifp)
return ret;
}
-int brcmf_pno_start_sched_scan(struct brcmf_if *ifp,
- struct cfg80211_sched_scan_request *req)
+static int brcmf_pno_get_bucket_channels(struct cfg80211_sched_scan_request *r,
+ struct brcmf_pno_config_le *pno_cfg)
{
- struct brcmf_pno_config_le pno_cfg;
- struct cfg80211_ssid *ssid;
+ u32 n_chan = le32_to_cpu(pno_cfg->channel_num);
u16 chan;
- int i, ret;
+ int i, err = 0;
- /* clean up everything */
- ret = brcmf_pno_clean(ifp);
- if (ret < 0) {
- brcmf_err("failed error=%d\n", ret);
- return ret;
+ for (i = 0; i < r->n_channels; i++) {
+ if (n_chan >= BRCMF_NUMCHANNELS) {
+ err = -ENOSPC;
+ goto done;
+ }
+ chan = r->channels[i]->hw_value;
+ brcmf_dbg(SCAN, "[%d] Chan : %u\n", n_chan, chan);
+ pno_cfg->channel_list[n_chan++] = cpu_to_le16(chan);
}
+ /* return number of channels */
+ err = n_chan;
+done:
+ pno_cfg->channel_num = cpu_to_le32(n_chan);
+ return err;
+}
- /* configure pno */
- ret = brcmf_pno_config(ifp, req->scan_plans[0].interval, 0, 0);
- if (ret < 0)
- return ret;
-
- /* configure random mac */
- if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
- ret = brcmf_pno_set_random(ifp, req->mac_addr,
- req->mac_addr_mask);
- if (ret < 0)
- return ret;
+static int brcmf_pno_prep_fwconfig(struct brcmf_pno_info *pi,
+ struct brcmf_pno_config_le *pno_cfg,
+ struct brcmf_gscan_bucket_config **buckets,
+ u32 *scan_freq)
+{
+ struct cfg80211_sched_scan_request *sr;
+ struct brcmf_gscan_bucket_config *fw_buckets;
+ int i, err, chidx;
+
+ brcmf_dbg(SCAN, "n_reqs=%d\n", pi->n_reqs);
+ if (WARN_ON(!pi->n_reqs))
+ return -ENODATA;
+
+ /*
+ * actual scan period is determined using gcd() for each
+ * scheduled scan period.
+ */
+ *scan_freq = pi->reqs[0]->scan_plans[0].interval;
+ for (i = 1; i < pi->n_reqs; i++) {
+ sr = pi->reqs[i];
+ *scan_freq = gcd(sr->scan_plans[0].interval, *scan_freq);
+ }
+ if (*scan_freq < BRCMF_PNO_SCHED_SCAN_MIN_PERIOD) {
+ brcmf_dbg(SCAN, "scan period too small, using minimum\n");
+ *scan_freq = BRCMF_PNO_SCHED_SCAN_MIN_PERIOD;
}
- /* configure channels to use */
- for (i = 0; i < req->n_channels; i++) {
- chan = req->channels[i]->hw_value;
- pno_cfg.channel_list[i] = cpu_to_le16(chan);
+ *buckets = NULL;
+ fw_buckets = kcalloc(pi->n_reqs, sizeof(*fw_buckets), GFP_KERNEL);
+ if (!fw_buckets)
+ return -ENOMEM;
+
+ memset(pno_cfg, 0, sizeof(*pno_cfg));
+ for (i = 0; i < pi->n_reqs; i++) {
+ sr = pi->reqs[i];
+ chidx = brcmf_pno_get_bucket_channels(sr, pno_cfg);
+ if (chidx < 0) {
+ err = chidx;
+ goto fail;
+ }
+ fw_buckets[i].bucket_end_index = chidx - 1;
+ fw_buckets[i].bucket_freq_multiple =
+ sr->scan_plans[0].interval / *scan_freq;
+ /* assure period is non-zero */
+ if (!fw_buckets[i].bucket_freq_multiple)
+ fw_buckets[i].bucket_freq_multiple = 1;
+ fw_buckets[i].flag = BRCMF_PNO_REPORT_NO_BATCH;
}
- if (req->n_channels) {
- pno_cfg.channel_num = cpu_to_le32(req->n_channels);
- brcmf_pno_channel_config(ifp, &pno_cfg);
+
+ if (BRCMF_SCAN_ON()) {
+ brcmf_err("base period=%u\n", *scan_freq);
+ for (i = 0; i < pi->n_reqs; i++) {
+ brcmf_err("[%d] period %u max %u repeat %u flag %x idx %u\n",
+ i, fw_buckets[i].bucket_freq_multiple,
+ le16_to_cpu(fw_buckets[i].max_freq_multiple),
+ fw_buckets[i].repeat, fw_buckets[i].flag,
+ fw_buckets[i].bucket_end_index);
+ }
}
+ *buckets = fw_buckets;
+ return pi->n_reqs;
- /* configure each match set */
- for (i = 0; i < req->n_match_sets; i++) {
- ssid = &req->match_sets[i].ssid;
- if (!ssid->ssid_len) {
- brcmf_err("skip broadcast ssid\n");
- continue;
+fail:
+ kfree(fw_buckets);
+ return err;
+}
+
+static int brcmf_pno_config_networks(struct brcmf_if *ifp,
+ struct brcmf_pno_info *pi)
+{
+ struct cfg80211_sched_scan_request *r;
+ struct cfg80211_match_set *ms;
+ bool active;
+ int i, j, err = 0;
+
+ for (i = 0; i < pi->n_reqs; i++) {
+ r = pi->reqs[i];
+
+ for (j = 0; j < r->n_match_sets; j++) {
+ ms = &r->match_sets[j];
+ if (ms->ssid.ssid_len) {
+ active = brcmf_is_ssid_active(&ms->ssid, r);
+ err = brcmf_pno_add_ssid(ifp, &ms->ssid,
+ active);
+ }
+ if (!err && is_valid_ether_addr(ms->bssid))
+ err = brcmf_pno_add_bssid(ifp, ms->bssid);
+
+ if (err < 0)
+ return err;
}
+ }
+ return 0;
+}
- ret = brcmf_pno_add_ssid(ifp, ssid,
- brcmf_is_ssid_active(ssid, req));
- if (ret < 0)
- brcmf_dbg(SCAN, ">>> PNO filter %s for ssid (%s)\n",
- ret == 0 ? "set" : "failed", ssid->ssid);
+static int brcmf_pno_config_sched_scans(struct brcmf_if *ifp)
+{
+ struct brcmf_pno_info *pi;
+ struct brcmf_gscan_config *gscan_cfg;
+ struct brcmf_gscan_bucket_config *buckets;
+ struct brcmf_pno_config_le pno_cfg;
+ size_t gsz;
+ u32 scan_freq;
+ int err, n_buckets;
+
+ pi = ifp_to_pno(ifp);
+ n_buckets = brcmf_pno_prep_fwconfig(pi, &pno_cfg, &buckets,
+ &scan_freq);
+ if (n_buckets < 0)
+ return n_buckets;
+
+ gsz = sizeof(*gscan_cfg) + (n_buckets - 1) * sizeof(*buckets);
+ gscan_cfg = kzalloc(gsz, GFP_KERNEL);
+ if (!gscan_cfg) {
+ err = -ENOMEM;
+ goto free_buckets;
}
+
+ /* clean up everything */
+ err = brcmf_pno_clean(ifp);
+ if (err < 0) {
+ brcmf_err("failed error=%d\n", err);
+ goto free_gscan;
+ }
+
+ /* configure pno */
+ err = brcmf_pno_config(ifp, scan_freq, 0, 0);
+ if (err < 0)
+ goto free_gscan;
+
+ err = brcmf_pno_channel_config(ifp, &pno_cfg);
+ if (err < 0)
+ goto clean;
+
+ gscan_cfg->version = cpu_to_le16(BRCMF_GSCAN_CFG_VERSION);
+ gscan_cfg->retry_threshold = GSCAN_RETRY_THRESHOLD;
+ gscan_cfg->buffer_threshold = GSCAN_BATCH_NO_THR_SET;
+ gscan_cfg->flags = BRCMF_GSCAN_CFG_ALL_BUCKETS_IN_1ST_SCAN;
+
+ gscan_cfg->count_of_channel_buckets = n_buckets;
+ memcpy(&gscan_cfg->bucket[0], buckets,
+ n_buckets * sizeof(*buckets));
+
+ err = brcmf_fil_iovar_data_set(ifp, "pfn_gscan_cfg", gscan_cfg, gsz);
+
+ if (err < 0)
+ goto clean;
+
+ /* configure random mac */
+ err = brcmf_pno_set_random(ifp, pi);
+ if (err < 0)
+ goto clean;
+
+ err = brcmf_pno_config_networks(ifp, pi);
+ if (err < 0)
+ goto clean;
+
/* Enable the PNO */
- ret = brcmf_fil_iovar_int_set(ifp, "pfn", 1);
+ err = brcmf_fil_iovar_int_set(ifp, "pfn", 1);
+
+clean:
+ if (err < 0)
+ brcmf_pno_clean(ifp);
+free_gscan:
+ kfree(gscan_cfg);
+free_buckets:
+ kfree(buckets);
+ return err;
+}
+
+int brcmf_pno_start_sched_scan(struct brcmf_if *ifp,
+ struct cfg80211_sched_scan_request *req)
+{
+ struct brcmf_pno_info *pi;
+ int ret;
+
+ brcmf_dbg(TRACE, "reqid=%llu\n", req->reqid);
+
+ pi = ifp_to_pno(ifp);
+ ret = brcmf_pno_store_request(pi, req);
if (ret < 0)
- brcmf_err("PNO enable failed!! ret=%d\n", ret);
+ return ret;
- return ret;
+ ret = brcmf_pno_config_sched_scans(ifp);
+ if (ret < 0) {
+ brcmf_pno_remove_request(pi, req->reqid);
+ if (pi->n_reqs)
+ (void)brcmf_pno_config_sched_scans(ifp);
+ return ret;
+ }
+ return 0;
+}
+
+int brcmf_pno_stop_sched_scan(struct brcmf_if *ifp, u64 reqid)
+{
+ struct brcmf_pno_info *pi;
+ int err;
+
+ brcmf_dbg(TRACE, "reqid=%llu\n", reqid);
+
+ pi = ifp_to_pno(ifp);
+ err = brcmf_pno_remove_request(pi, reqid);
+ if (err)
+ return err;
+
+ brcmf_pno_clean(ifp);
+
+ if (pi->n_reqs)
+ (void)brcmf_pno_config_sched_scans(ifp);
+
+ return 0;
+}
+
+int brcmf_pno_attach(struct brcmf_cfg80211_info *cfg)
+{
+ struct brcmf_pno_info *pi;
+
+ brcmf_dbg(TRACE, "enter\n");
+ pi = kzalloc(sizeof(*pi), GFP_KERNEL);
+ if (!pi)
+ return -ENOMEM;
+
+ cfg->pno = pi;
+ mutex_init(&pi->req_lock);
+ return 0;
+}
+
+void brcmf_pno_detach(struct brcmf_cfg80211_info *cfg)
+{
+ struct brcmf_pno_info *pi;
+
+ brcmf_dbg(TRACE, "enter\n");
+ pi = cfg->pno;
+ cfg->pno = NULL;
+
+ WARN_ON(pi->n_reqs);
+ mutex_destroy(&pi->req_lock);
+ kfree(pi);
}
void brcmf_pno_wiphy_params(struct wiphy *wiphy, bool gscan)
{
/* scheduled scan settings */
- wiphy->max_sched_scan_reqs = gscan ? 2 : 1;
+ wiphy->max_sched_scan_reqs = gscan ? BRCMF_PNO_MAX_BUCKETS : 1;
wiphy->max_sched_scan_ssids = BRCMF_PNO_MAX_PFN_COUNT;
wiphy->max_match_sets = BRCMF_PNO_MAX_PFN_COUNT;
wiphy->max_sched_scan_ie_len = BRCMF_SCAN_IE_LEN_MAX;
wiphy->max_sched_scan_plan_interval = BRCMF_PNO_SCHED_SCAN_MAX_PERIOD;
}
+u64 brcmf_pno_find_reqid_by_bucket(struct brcmf_pno_info *pi, u32 bucket)
+{
+ u64 reqid = 0;
+
+ mutex_lock(&pi->req_lock);
+
+ if (bucket < pi->n_reqs)
+ reqid = pi->reqs[bucket]->reqid;
+
+ mutex_unlock(&pi->req_lock);
+ return reqid;
+}
+
+u32 brcmf_pno_get_bucket_map(struct brcmf_pno_info *pi,
+ struct brcmf_pno_net_info_le *ni)
+{
+ struct cfg80211_sched_scan_request *req;
+ struct cfg80211_match_set *ms;
+ u32 bucket_map = 0;
+ int i, j;
+
+ mutex_lock(&pi->req_lock);
+ for (i = 0; i < pi->n_reqs; i++) {
+ req = pi->reqs[i];
+
+ if (!req->n_match_sets)
+ continue;
+ for (j = 0; j < req->n_match_sets; j++) {
+ ms = &req->match_sets[j];
+ if (ms->ssid.ssid_len == ni->SSID_len &&
+ !memcmp(ms->ssid.ssid, ni->SSID, ni->SSID_len)) {
+ bucket_map |= BIT(i);
+ break;
+ }
+ if (is_valid_ether_addr(ms->bssid) &&
+ !memcmp(ms->bssid, ni->bssid, ETH_ALEN)) {
+ bucket_map |= BIT(i);
+ break;
+ }
+ }
+ }
+ mutex_unlock(&pi->req_lock);
+ return bucket_map;
+}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.h
index 07ec51f6bec1..cd9e35ae3b21 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.h
@@ -21,12 +21,8 @@
#define BRCMF_PNO_SCHED_SCAN_MIN_PERIOD 10
#define BRCMF_PNO_SCHED_SCAN_MAX_PERIOD 508
-/**
- * brcmf_pno_clean - disable and clear pno in firmware.
- *
- * @ifp: interface object used.
- */
-int brcmf_pno_clean(struct brcmf_if *ifp);
+/* forward declaration */
+struct brcmf_pno_info;
/**
* brcmf_pno_start_sched_scan - initiate scheduled scan on device.
@@ -38,6 +34,14 @@ int brcmf_pno_start_sched_scan(struct brcmf_if *ifp,
struct cfg80211_sched_scan_request *req);
/**
+ * brcmf_pno_stop_sched_scan - terminate scheduled scan on device.
+ *
+ * @ifp: interface object used.
+ * @reqid: unique identifier of scan to be stopped.
+ */
+int brcmf_pno_stop_sched_scan(struct brcmf_if *ifp, u64 reqid);
+
+/**
* brcmf_pno_wiphy_params - fill scheduled scan parameters in wiphy instance.
*
* @wiphy: wiphy instance to be used.
@@ -45,4 +49,35 @@ int brcmf_pno_start_sched_scan(struct brcmf_if *ifp,
*/
void brcmf_pno_wiphy_params(struct wiphy *wiphy, bool gscan);
+/**
+ * brcmf_pno_attach - allocate and attach module information.
+ *
+ * @cfg: cfg80211 context used.
+ */
+int brcmf_pno_attach(struct brcmf_cfg80211_info *cfg);
+
+/**
+ * brcmf_pno_detach - detach and free module information.
+ *
+ * @cfg: cfg80211 context used.
+ */
+void brcmf_pno_detach(struct brcmf_cfg80211_info *cfg);
+
+/**
+ * brcmf_pno_find_reqid_by_bucket - find request id for given bucket index.
+ *
+ * @pi: pno instance used.
+ * @bucket: index of firmware bucket.
+ */
+u64 brcmf_pno_find_reqid_by_bucket(struct brcmf_pno_info *pi, u32 bucket);
+
+/**
+ * brcmf_pno_get_bucket_map - determine bucket map for given netinfo.
+ *
+ * @pi: pno instance used.
+ * @netinfo: netinfo to compare with bucket configuration.
+ */
+u32 brcmf_pno_get_bucket_map(struct brcmf_pno_info *pi,
+ struct brcmf_pno_net_info_le *netinfo);
+
#endif /* _BRCMF_PNO_H */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index e03450059b06..fbcbb4325936 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -612,7 +612,9 @@ BRCMF_FW_NVRAM_DEF(43340, "brcmfmac43340-sdio.bin", "brcmfmac43340-sdio.txt");
BRCMF_FW_NVRAM_DEF(4335, "brcmfmac4335-sdio.bin", "brcmfmac4335-sdio.txt");
BRCMF_FW_NVRAM_DEF(43362, "brcmfmac43362-sdio.bin", "brcmfmac43362-sdio.txt");
BRCMF_FW_NVRAM_DEF(4339, "brcmfmac4339-sdio.bin", "brcmfmac4339-sdio.txt");
-BRCMF_FW_NVRAM_DEF(43430, "brcmfmac43430-sdio.bin", "brcmfmac43430-sdio.txt");
+BRCMF_FW_NVRAM_DEF(43430A0, "brcmfmac43430a0-sdio.bin", "brcmfmac43430a0-sdio.txt");
+/* Note the names are not postfixed with a1 for backward compatibility */
+BRCMF_FW_NVRAM_DEF(43430A1, "brcmfmac43430-sdio.bin", "brcmfmac43430-sdio.txt");
BRCMF_FW_NVRAM_DEF(43455, "brcmfmac43455-sdio.bin", "brcmfmac43455-sdio.txt");
BRCMF_FW_NVRAM_DEF(4354, "brcmfmac4354-sdio.bin", "brcmfmac4354-sdio.txt");
BRCMF_FW_NVRAM_DEF(4356, "brcmfmac4356-sdio.bin", "brcmfmac4356-sdio.txt");
@@ -630,7 +632,8 @@ static struct brcmf_firmware_mapping brcmf_sdio_fwnames[] = {
BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4335_CHIP_ID, 0xFFFFFFFF, 4335),
BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43362_CHIP_ID, 0xFFFFFFFE, 43362),
BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4339_CHIP_ID, 0xFFFFFFFF, 4339),
- BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43430_CHIP_ID, 0xFFFFFFFF, 43430),
+ BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43430_CHIP_ID, 0x00000001, 43430A0),
+ BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43430_CHIP_ID, 0xFFFFFFFE, 43430A1),
BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4345_CHIP_ID, 0xFFFFFFC0, 43455),
BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4354_CHIP_ID, 0xFFFFFFFF, 4354),
BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4356_CHIP_ID, 0xFFFFFFFF, 4356)
@@ -2034,6 +2037,7 @@ brcmf_sdio_wait_event_wakeup(struct brcmf_sdio *bus)
static int brcmf_sdio_txpkt_hdalign(struct brcmf_sdio *bus, struct sk_buff *pkt)
{
+ struct brcmf_bus_stats *stats;
u16 head_pad;
u8 *dat_buf;
@@ -2043,16 +2047,18 @@ static int brcmf_sdio_txpkt_hdalign(struct brcmf_sdio *bus, struct sk_buff *pkt)
head_pad = ((unsigned long)dat_buf % bus->head_align);
if (head_pad) {
if (skb_headroom(pkt) < head_pad) {
- bus->sdiodev->bus_if->tx_realloc++;
- head_pad = 0;
- if (skb_cow(pkt, head_pad))
+ stats = &bus->sdiodev->bus_if->stats;
+ atomic_inc(&stats->pktcowed);
+ if (skb_cow_head(pkt, head_pad)) {
+ atomic_inc(&stats->pktcow_failed);
return -ENOMEM;
+ }
}
skb_push(pkt, head_pad);
dat_buf = (u8 *)(pkt->data);
- memset(dat_buf, 0, head_pad + bus->tx_hdrlen);
}
- return head_pad;
+ memset(dat_buf, 0, head_pad + bus->tx_hdrlen);
+ return 0;
}
/**
@@ -3982,21 +3988,26 @@ static const struct brcmf_bus_ops brcmf_sdio_bus_ops = {
.get_memdump = brcmf_sdio_bus_get_memdump,
};
-static void brcmf_sdio_firmware_callback(struct device *dev,
+static void brcmf_sdio_firmware_callback(struct device *dev, int err,
const struct firmware *code,
void *nvram, u32 nvram_len)
{
- struct brcmf_bus *bus_if = dev_get_drvdata(dev);
- struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
- struct brcmf_sdio *bus = sdiodev->bus;
- int err = 0;
+ struct brcmf_bus *bus_if;
+ struct brcmf_sdio_dev *sdiodev;
+ struct brcmf_sdio *bus;
u8 saveclk;
- brcmf_dbg(TRACE, "Enter: dev=%s\n", dev_name(dev));
+ brcmf_dbg(TRACE, "Enter: dev=%s, err=%d\n", dev_name(dev), err);
+ bus_if = dev_get_drvdata(dev);
+ sdiodev = bus_if->bus_priv.sdio;
+ if (err)
+ goto fail;
if (!bus_if->drvr)
return;
+ bus = sdiodev->bus;
+
/* try to download image and nvram to the dongle */
bus->alp_only = true;
err = brcmf_sdio_download_firmware(bus, code, nvram, nvram_len);
@@ -4083,6 +4094,7 @@ release:
fail:
brcmf_dbg(TRACE, "failed: dev=%s, err=%d\n", dev_name(dev), err);
device_release_driver(dev);
+ device_release_driver(&sdiodev->func[2]->dev);
}
struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
index e4d545f9edee..0eea48e73331 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
@@ -1159,17 +1159,18 @@ fail:
return ret;
}
-static void brcmf_usb_probe_phase2(struct device *dev,
+static void brcmf_usb_probe_phase2(struct device *dev, int ret,
const struct firmware *fw,
void *nvram, u32 nvlen)
{
struct brcmf_bus *bus = dev_get_drvdata(dev);
- struct brcmf_usbdev_info *devinfo;
- int ret;
+ struct brcmf_usbdev_info *devinfo = bus->bus_priv.usb->devinfo;
+
+ if (ret)
+ goto error;
brcmf_dbg(USB, "Start fw downloading\n");
- devinfo = bus->bus_priv.usb->devinfo;
ret = check_file(fw->data);
if (ret < 0) {
brcmf_err("invalid firmware\n");
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/main.c b/drivers/net/wireless/intel/iwlwifi/dvm/main.c
index b49848683587..4c8f9f1a5532 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/main.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/main.c
@@ -1513,7 +1513,7 @@ out_destroy_workqueue:
out_free_eeprom_blob:
kfree(priv->eeprom_blob);
out_free_eeprom:
- iwl_free_nvm_data(priv->nvm_data);
+ kfree(priv->nvm_data);
out_free_hw:
ieee80211_free_hw(priv->hw);
out:
@@ -1532,7 +1532,7 @@ static void iwl_op_mode_dvm_stop(struct iwl_op_mode *op_mode)
iwl_tt_exit(priv);
kfree(priv->eeprom_blob);
- iwl_free_nvm_data(priv->nvm_data);
+ kfree(priv->nvm_data);
/*netif_stop_queue(dev); */
flush_workqueue(priv->workqueue);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
index 89137717c1fc..766bb2037b94 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
@@ -70,8 +70,8 @@
#include "iwl-agn-hw.h"
/* Highest firmware API version supported */
-#define IWL8000_UCODE_API_MAX 30
-#define IWL8265_UCODE_API_MAX 30
+#define IWL8000_UCODE_API_MAX 31
+#define IWL8265_UCODE_API_MAX 31
/* Lowest firmware API version supported */
#define IWL8000_UCODE_API_MIN 22
@@ -98,7 +98,6 @@
IWL8265_FW_PRE __stringify(api) ".ucode"
#define NVM_HW_SECTION_NUM_FAMILY_8000 10
-#define DEFAULT_NVM_FILE_FAMILY_8000B "nvmData-8000B"
#define DEFAULT_NVM_FILE_FAMILY_8000C "nvmData-8000C"
/* Max SDIO RX/TX aggregation sizes of the ADDBA request/response */
@@ -162,10 +161,11 @@ static const struct iwl_tt_params iwl8000_tt_params = {
.dccm2_len = IWL8260_DCCM2_LEN, \
.smem_offset = IWL8260_SMEM_OFFSET, \
.smem_len = IWL8260_SMEM_LEN, \
- .default_nvm_file_B_step = DEFAULT_NVM_FILE_FAMILY_8000B, \
.default_nvm_file_C_step = DEFAULT_NVM_FILE_FAMILY_8000C, \
.thermal_params = &iwl8000_tt_params, \
- .apmg_not_supported = true
+ .apmg_not_supported = true, \
+ .ext_nvm = true, \
+ .dbgc_supported = true
#define IWL_DEVICE_8000 \
IWL_DEVICE_8000_COMMON, \
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-9000.c b/drivers/net/wireless/intel/iwlwifi/iwl-9000.c
index 110ceefccc15..42daaddfa740 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-9000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-9000.c
@@ -55,7 +55,7 @@
#include "iwl-agn-hw.h"
/* Highest firmware API version supported */
-#define IWL9000_UCODE_API_MAX 30
+#define IWL9000_UCODE_API_MAX 31
/* Lowest firmware API version supported */
#define IWL9000_UCODE_API_MIN 30
@@ -73,10 +73,13 @@
#define IWL9000_SMEM_LEN 0x68000
#define IWL9000_FW_PRE "iwlwifi-9000-pu-a0-jf-a0-"
+#define IWL9000RFB_FW_PRE "iwlwifi-9000-pu-a0-jf-b0-"
#define IWL9260A_FW_PRE "iwlwifi-9260-th-a0-jf-a0-"
#define IWL9260B_FW_PRE "iwlwifi-9260-th-b0-jf-b0-"
#define IWL9000_MODULE_FIRMWARE(api) \
IWL9000_FW_PRE "-" __stringify(api) ".ucode"
+#define IWL9000RFB_MODULE_FIRMWARE(api) \
+ IWL9000RFB_FW_PRE "-" __stringify(api) ".ucode"
#define IWL9260A_MODULE_FIRMWARE(api) \
IWL9260A_FW_PRE "-" __stringify(api) ".ucode"
#define IWL9260B_MODULE_FIRMWARE(api) \
@@ -125,7 +128,7 @@ static const struct iwl_tt_params iwl9000_tt_params = {
#define IWL_DEVICE_9000 \
.ucode_api_max = IWL9000_UCODE_API_MAX, \
.ucode_api_min = IWL9000_UCODE_API_MIN, \
- .device_family = IWL_DEVICE_FAMILY_8000, \
+ .device_family = IWL_DEVICE_FAMILY_9000, \
.max_inst_size = IWL60_RTC_INST_SIZE, \
.max_data_size = IWL60_RTC_DATA_SIZE, \
.base_params = &iwl9000_base_params, \
@@ -144,7 +147,9 @@ static const struct iwl_tt_params iwl9000_tt_params = {
.mq_rx_supported = true, \
.vht_mu_mimo_supported = true, \
.mac_addr_from_csr = true, \
- .rf_id = true
+ .rf_id = true, \
+ .ext_nvm = true, \
+ .dbgc_supported = true
const struct iwl_cfg iwl9160_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 9160",
@@ -182,6 +187,7 @@ const struct iwl_cfg iwl9270_2ac_cfg = {
const struct iwl_cfg iwl9460_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 9460",
.fw_name_pre = IWL9000_FW_PRE,
+ .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
IWL_DEVICE_9000,
.ht_params = &iwl9000_ht_params,
.nvm_ver = IWL9000_NVM_VERSION,
@@ -193,6 +199,7 @@ const struct iwl_cfg iwl9460_2ac_cfg = {
const struct iwl_cfg iwl9560_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 9560",
.fw_name_pre = IWL9000_FW_PRE,
+ .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
IWL_DEVICE_9000,
.ht_params = &iwl9000_ht_params,
.nvm_ver = IWL9000_NVM_VERSION,
@@ -202,5 +209,6 @@ const struct iwl_cfg iwl9560_2ac_cfg = {
};
MODULE_FIRMWARE(IWL9000_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL9000RFB_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL9260A_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL9260B_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-a000.c b/drivers/net/wireless/intel/iwlwifi/iwl-a000.c
index c648cfb981a3..4634c46d1eb4 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-a000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-a000.c
@@ -55,7 +55,7 @@
#include "iwl-agn-hw.h"
/* Highest firmware API version supported */
-#define IWL_A000_UCODE_API_MAX 30
+#define IWL_A000_UCODE_API_MAX 31
/* Lowest firmware API version supported */
#define IWL_A000_UCODE_API_MIN 24
@@ -74,7 +74,7 @@
#define IWL_A000_JF_FW_PRE "iwlwifi-Qu-a0-jf-b0-"
#define IWL_A000_HR_FW_PRE "iwlwifi-Qu-a0-hr-a0-"
-#define IWL_A000_HR_CDB_FW_PRE "iwlwifi-QuIcp-a0-hrcdb-a0-"
+#define IWL_A000_HR_CDB_FW_PRE "iwlwifi-QuIcp-z0-hrcdb-a0-"
#define IWL_A000_HR_MODULE_FIRMWARE(api) \
IWL_A000_HR_FW_PRE "-" __stringify(api) ".ucode"
@@ -103,7 +103,7 @@ static const struct iwl_ht_params iwl_a000_ht_params = {
#define IWL_DEVICE_A000 \
.ucode_api_max = IWL_A000_UCODE_API_MAX, \
.ucode_api_min = IWL_A000_UCODE_API_MIN, \
- .device_family = IWL_DEVICE_FAMILY_8000, \
+ .device_family = IWL_DEVICE_FAMILY_A000, \
.max_inst_size = IWL60_RTC_INST_SIZE, \
.max_data_size = IWL60_RTC_DATA_SIZE, \
.base_params = &iwl_a000_base_params, \
@@ -123,7 +123,9 @@ static const struct iwl_ht_params iwl_a000_ht_params = {
.mac_addr_from_csr = true, \
.use_tfh = true, \
.rf_id = true, \
- .gen2 = true
+ .gen2 = true, \
+ .ext_nvm = true, \
+ .dbgc_supported = true
const struct iwl_cfg iwla000_2ac_cfg_hr = {
.name = "Intel(R) Dual Band Wireless AC a000",
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index a12197e3ce78..127017efdd87 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -88,6 +88,8 @@ enum iwl_device_family {
IWL_DEVICE_FAMILY_6150,
IWL_DEVICE_FAMILY_7000,
IWL_DEVICE_FAMILY_8000,
+ IWL_DEVICE_FAMILY_9000,
+ IWL_DEVICE_FAMILY_A000,
};
/*
@@ -129,6 +131,7 @@ enum iwl_led_mode {
/* Antenna presence definitions */
#define ANT_NONE 0x0
+#define ANT_INVALID 0xff
#define ANT_A BIT(0)
#define ANT_B BIT(1)
#define ANT_C BIT(2)
@@ -275,6 +278,8 @@ struct iwl_pwr_tx_backoff {
* filename is constructed as fw_name_pre<api>.ucode.
* @fw_name_pre_next_step: same as @fw_name_pre, only for next step
* (if supported)
+ * @fw_name_pre_rf_next_step: same as @fw_name_pre_next_step, only for rf next
+ * step. Supported only in integrated solutions.
* @ucode_api_max: Highest version of uCode API supported by driver.
* @ucode_api_min: Lowest version of uCode API supported by driver.
* @max_inst_size: The maximal length of the fw inst section
@@ -315,6 +320,7 @@ struct iwl_pwr_tx_backoff {
* @integrated: discrete or integrated
* @gen2: a000 and on transport operation
* @cdb: CDB support
+ * @ext_nvm: extended NVM format
*
* We enable the driver to be backward compatible wrt. hardware features.
* API differences in uCode shouldn't be handled here but through TLVs
@@ -325,13 +331,13 @@ struct iwl_cfg {
const char *name;
const char *fw_name_pre;
const char *fw_name_pre_next_step;
+ const char *fw_name_pre_rf_next_step;
/* params not likely to change within a device family */
const struct iwl_base_params *base_params;
/* params likely to change within a device family */
const struct iwl_ht_params *ht_params;
const struct iwl_eeprom_params *eeprom_params;
const struct iwl_pwr_tx_backoff *pwr_tx_backoffs;
- const char *default_nvm_file_B_step;
const char *default_nvm_file_C_step;
const struct iwl_tt_params *thermal_params;
enum iwl_device_family device_family;
@@ -362,7 +368,9 @@ struct iwl_cfg {
integrated:1,
use_tfh:1,
gen2:1,
- cdb:1;
+ cdb:1,
+ ext_nvm:1,
+ dbgc_supported:1;
u8 valid_tx_ant;
u8 valid_rx_ant;
u8 non_shared_ant;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
index fa120fb55373..e239b1d92cf9 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
@@ -153,6 +153,10 @@
/* GIO Chicken Bits (PCI Express bus link power management) */
#define CSR_GIO_CHICKEN_BITS (CSR_BASE+0x100)
+/* host chicken bits */
+#define CSR_HOST_CHICKEN (CSR_BASE + 0x204)
+#define CSR_HOST_CHICKEN_PM_IDLE_SRC_DIS_SB_PME BIT(19)
+
/* Analog phase-lock-loop configuration */
#define CSR_ANA_PLL_CFG (CSR_BASE+0x20c)
@@ -316,6 +320,11 @@
#define CSR_HW_REV_DASH(_val) (((_val) & 0x0000003) >> 0)
#define CSR_HW_REV_STEP(_val) (((_val) & 0x000000C) >> 2)
+/* HW RFID */
+#define CSR_HW_RFID_FLAVOR(_val) (((_val) & 0x000000F) >> 0)
+#define CSR_HW_RFID_DASH(_val) (((_val) & 0x00000F0) >> 4)
+#define CSR_HW_RFID_STEP(_val) (((_val) & 0x0000F00) >> 8)
+#define CSR_HW_RFID_TYPE(_val) (((_val) & 0x0FFF000) >> 12)
/**
* hw_rev values
@@ -348,7 +357,8 @@ enum {
/* RF_ID value */
#define CSR_HW_RF_ID_TYPE_JF (0x00105000)
-#define CSR_HW_RF_ID_TYPE_HR (0x00109000)
+#define CSR_HW_RF_ID_TYPE_HR (0x0010A000)
+#define CSR_HW_RF_ID_TYPE_HRCDB (0x00109000)
/* EEPROM REG */
#define CSR_EEPROM_REG_READ_VALID_MSK (0x00000001)
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h
index d80312b46f16..a80e4202cd03 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h
@@ -35,19 +35,20 @@
TRACE_EVENT(iwlwifi_dev_tx_data,
TP_PROTO(const struct device *dev,
- struct sk_buff *skb,
- u8 hdr_len, size_t data_len),
- TP_ARGS(dev, skb, hdr_len, data_len),
+ struct sk_buff *skb, u8 hdr_len),
+ TP_ARGS(dev, skb, hdr_len),
TP_STRUCT__entry(
DEV_ENTRY
- __dynamic_array(u8, data, iwl_trace_data(skb) ? data_len : 0)
+ __dynamic_array(u8, data,
+ iwl_trace_data(skb) ? skb->len - hdr_len : 0)
),
TP_fast_assign(
DEV_ASSIGN;
if (iwl_trace_data(skb))
skb_copy_bits(skb, hdr_len,
- __get_dynamic_array(data), data_len);
+ __get_dynamic_array(data),
+ skb->len - hdr_len);
),
TP_printk("[%s] TX frame data", __get_str(dev))
);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h
index f02e2c89abbb..7f16dcce0995 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h
@@ -2,7 +2,7 @@
*
* Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -91,8 +91,8 @@ TRACE_EVENT(iwlwifi_dev_tx,
TP_PROTO(const struct device *dev, struct sk_buff *skb,
void *tfd, size_t tfdlen,
void *buf0, size_t buf0_len,
- void *buf1, size_t buf1_len),
- TP_ARGS(dev, skb, tfd, tfdlen, buf0, buf0_len, buf1, buf1_len),
+ int hdr_len),
+ TP_ARGS(dev, skb, tfd, tfdlen, buf0, buf0_len, hdr_len),
TP_STRUCT__entry(
DEV_ENTRY
@@ -105,15 +105,20 @@ TRACE_EVENT(iwlwifi_dev_tx,
* for the possible padding).
*/
__dynamic_array(u8, buf0, buf0_len)
- __dynamic_array(u8, buf1, iwl_trace_data(skb) ? 0 : buf1_len)
+ __dynamic_array(u8, buf1, hdr_len > 0 && iwl_trace_data(skb) ?
+ 0 : skb->len - hdr_len)
),
TP_fast_assign(
DEV_ASSIGN;
- __entry->framelen = buf0_len + buf1_len;
+ __entry->framelen = buf0_len;
+ if (hdr_len > 0)
+ __entry->framelen += skb->len - hdr_len;
memcpy(__get_dynamic_array(tfd), tfd, tfdlen);
memcpy(__get_dynamic_array(buf0), buf0, buf0_len);
- if (!iwl_trace_data(skb))
- memcpy(__get_dynamic_array(buf1), buf1, buf1_len);
+ if (hdr_len > 0 && !iwl_trace_data(skb))
+ skb_copy_bits(skb, hdr_len,
+ __get_dynamic_array(buf1),
+ skb->len - hdr_len);
),
TP_printk("[%s] TX %.2x (%zu bytes)",
__get_str(dev), ((u8 *)__get_dynamic_array(buf0))[0],
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index 5cfacb0bca84..c8d451474b64 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -215,9 +215,13 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first)
char tag[8];
const char *fw_pre_name;
- if (drv->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000 &&
+ if (drv->trans->cfg->device_family == IWL_DEVICE_FAMILY_9000 &&
CSR_HW_REV_STEP(drv->trans->hw_rev) == SILICON_B_STEP)
fw_pre_name = cfg->fw_name_pre_next_step;
+ else if (drv->trans->cfg->integrated &&
+ CSR_HW_RFID_STEP(drv->trans->hw_rf_id) == SILICON_B_STEP &&
+ cfg->fw_name_pre_rf_next_step)
+ fw_pre_name = cfg->fw_name_pre_rf_next_step;
else
fw_pre_name = cfg->fw_name_pre;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.h b/drivers/net/wireless/intel/iwlwifi/iwl-drv.h
index 6c537e04864e..1f8a2eeb7dff 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.h
@@ -79,12 +79,12 @@
#define NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */
#define NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
-#define NVM_RF_CFG_FLAVOR_MSK_FAMILY_8000(x) (x & 0xF)
-#define NVM_RF_CFG_DASH_MSK_FAMILY_8000(x) ((x >> 4) & 0xF)
-#define NVM_RF_CFG_STEP_MSK_FAMILY_8000(x) ((x >> 8) & 0xF)
-#define NVM_RF_CFG_TYPE_MSK_FAMILY_8000(x) ((x >> 12) & 0xFFF)
-#define NVM_RF_CFG_TX_ANT_MSK_FAMILY_8000(x) ((x >> 24) & 0xF)
-#define NVM_RF_CFG_RX_ANT_MSK_FAMILY_8000(x) ((x >> 28) & 0xF)
+#define EXT_NVM_RF_CFG_FLAVOR_MSK(x) ((x) & 0xF)
+#define EXT_NVM_RF_CFG_DASH_MSK(x) (((x) >> 4) & 0xF)
+#define EXT_NVM_RF_CFG_STEP_MSK(x) (((x) >> 8) & 0xF)
+#define EXT_NVM_RF_CFG_TYPE_MSK(x) (((x) >> 12) & 0xFFF)
+#define EXT_NVM_RF_CFG_TX_ANT_MSK(x) (((x) >> 24) & 0xF)
+#define EXT_NVM_RF_CFG_RX_ANT_MSK(x) (((x) >> 28) & 0xF)
/**
* DOC: Driver system flows - drv component
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h
index e04a91d70a15..b33888991b94 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h
@@ -121,15 +121,6 @@ struct iwl_nvm_data *
iwl_parse_eeprom_data(struct device *dev, const struct iwl_cfg *cfg,
const u8 *eeprom, size_t eeprom_size);
-/**
- * iwl_free_nvm_data - free NVM data
- * @data: the data to free
- */
-static inline void iwl_free_nvm_data(struct iwl_nvm_data *data)
-{
- kfree(data);
-}
-
int iwl_nvm_check_version(struct iwl_nvm_data *data,
struct iwl_trans *trans);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
index 62f9fe926d78..66e5db41e559 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
+ * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
+ * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -66,6 +66,7 @@
#define __iwl_fh_h__
#include <linux/types.h>
+#include <linux/bitfield.h>
/****************************/
/* Flow Handler Definitions */
@@ -77,6 +78,8 @@
*/
#define FH_MEM_LOWER_BOUND (0x1000)
#define FH_MEM_UPPER_BOUND (0x2000)
+#define FH_MEM_LOWER_BOUND_GEN2 (0xa06000)
+#define FH_MEM_UPPER_BOUND_GEN2 (0xa08000)
/**
* Keep-Warm (KW) buffer base address.
@@ -476,13 +479,12 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
#define RFH_GEN_CFG 0xA09800
#define RFH_GEN_CFG_SERVICE_DMA_SNOOP BIT(0)
#define RFH_GEN_CFG_RFH_DMA_SNOOP BIT(1)
-#define RFH_GEN_CFG_RB_CHUNK_SIZE_POS 4
+#define RFH_GEN_CFG_RB_CHUNK_SIZE BIT(4)
#define RFH_GEN_CFG_RB_CHUNK_SIZE_128 1
#define RFH_GEN_CFG_RB_CHUNK_SIZE_64 0
-#define RFH_GEN_CFG_DEFAULT_RXQ_NUM_MASK 0xF00
-#define RFH_GEN_CFG_DEFAULT_RXQ_NUM_POS 8
-
-#define DEFAULT_RXQ_NUM 0
+/* the driver assumes everywhere that the default RXQ is 0 */
+#define RFH_GEN_CFG_DEFAULT_RXQ_NUM 0xF00
+#define RFH_GEN_CFG_VAL(_n, _v) FIELD_PREP(RFH_GEN_CFG_ ## _n, _v)
/* end of 9000 rx series registers */
@@ -653,6 +655,17 @@ static inline u8 iwl_get_dma_hi_addr(dma_addr_t addr)
{
return (sizeof(addr) > sizeof(u32) ? upper_32_bits(addr) : 0) & 0xF;
}
+
+/**
+ * enum iwl_tfd_tb_hi_n_len - TB hi_n_len bits
+ * @TB_HI_N_LEN_ADDR_HI_MSK: high 4 bits (to make it 36) of DMA address
+ * @TB_HI_N_LEN_LEN_MSK: length of the TB
+ */
+enum iwl_tfd_tb_hi_n_len {
+ TB_HI_N_LEN_ADDR_HI_MSK = 0xf,
+ TB_HI_N_LEN_LEN_MSK = 0xfff0,
+};
+
/**
* struct iwl_tfd_tb transmit buffer descriptor within transmit frame descriptor
*
@@ -660,8 +673,7 @@ static inline u8 iwl_get_dma_hi_addr(dma_addr_t addr)
*
* @lo: low [31:0] portion of the dma address of TX buffer
* every even is unaligned on 16 bit boundary
- * @hi_n_len 0-3 [35:32] portion of dma
- * 4-15 length of the tx buffer
+ * @hi_n_len: &enum iwl_tfd_tb_hi_n_len
*/
struct iwl_tfd_tb {
__le32 lo;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fw-api.h b/drivers/net/wireless/intel/iwlwifi/iwl-fw-api.h
new file mode 100644
index 000000000000..a004409fa984
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-fw-api.h
@@ -0,0 +1,205 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#ifndef __iwl_fw_api_h__
+#define __iwl_fw_api_h__
+
+/**
+ * DOC: Host command section
+ *
+ * A host command is a command issued by the upper layer to the fw. There are
+ * several versions of fw that have several APIs. The transport layer is
+ * completely agnostic to these differences.
+ * The transport does provide helper functionality (i.e. SYNC / ASYNC mode),
+ */
+#define SEQ_TO_QUEUE(s) (((s) >> 8) & 0x1f)
+#define QUEUE_TO_SEQ(q) (((q) & 0x1f) << 8)
+#define SEQ_TO_INDEX(s) ((s) & 0xff)
+#define INDEX_TO_SEQ(i) ((i) & 0xff)
+#define SEQ_RX_FRAME cpu_to_le16(0x8000)
+
+/*
+ * those functions retrieve specific information from
+ * the id field in the iwl_host_cmd struct which contains
+ * the command id, the group id and the version of the command
+ * and vice versa
+*/
+static inline u8 iwl_cmd_opcode(u32 cmdid)
+{
+ return cmdid & 0xFF;
+}
+
+static inline u8 iwl_cmd_groupid(u32 cmdid)
+{
+ return ((cmdid & 0xFF00) >> 8);
+}
+
+static inline u8 iwl_cmd_version(u32 cmdid)
+{
+ return ((cmdid & 0xFF0000) >> 16);
+}
+
+static inline u32 iwl_cmd_id(u8 opcode, u8 groupid, u8 version)
+{
+ return opcode + (groupid << 8) + (version << 16);
+}
+
+/* make u16 wide id out of u8 group and opcode */
+#define WIDE_ID(grp, opcode) (((grp) << 8) | (opcode))
+#define DEF_ID(opcode) ((1 << 8) | (opcode))
+
+/* due to the conversion, this group is special; new groups
+ * should be defined in the appropriate fw-api header files
+ */
+#define IWL_ALWAYS_LONG_GROUP 1
+
+/**
+ * struct iwl_cmd_header
+ *
+ * This header format appears in the beginning of each command sent from the
+ * driver, and each response/notification received from uCode.
+ */
+struct iwl_cmd_header {
+ u8 cmd; /* Command ID: REPLY_RXON, etc. */
+ u8 group_id;
+ /*
+ * The driver sets up the sequence number to values of its choosing.
+ * uCode does not use this value, but passes it back to the driver
+ * when sending the response to each driver-originated command, so
+ * the driver can match the response to the command. Since the values
+ * don't get used by uCode, the driver may set up an arbitrary format.
+ *
+ * There is one exception: uCode sets bit 15 when it originates
+ * the response/notification, i.e. when the response/notification
+ * is not a direct response to a command sent by the driver. For
+ * example, uCode issues REPLY_RX when it sends a received frame
+ * to the driver; it is not a direct response to any driver command.
+ *
+ * The Linux driver uses the following format:
+ *
+ * 0:7 tfd index - position within TX queue
+ * 8:12 TX queue id
+ * 13:14 reserved
+ * 15 unsolicited RX or uCode-originated notification
+ */
+ __le16 sequence;
+} __packed;
+
+/**
+ * struct iwl_cmd_header_wide
+ *
+ * This header format appears in the beginning of each command sent from the
+ * driver, and each response/notification received from uCode.
+ * this is the wide version that contains more information about the command
+ * like length, version and command type
+ */
+struct iwl_cmd_header_wide {
+ u8 cmd;
+ u8 group_id;
+ __le16 sequence;
+ __le16 length;
+ u8 reserved;
+ u8 version;
+} __packed;
+
+/**
+ * iwl_tx_queue_cfg_actions - TXQ config options
+ * @TX_QUEUE_CFG_ENABLE_QUEUE: enable a queue
+ * @TX_QUEUE_CFG_TFD_SHORT_FORMAT: use short TFD format
+ */
+enum iwl_tx_queue_cfg_actions {
+ TX_QUEUE_CFG_ENABLE_QUEUE = BIT(0),
+ TX_QUEUE_CFG_TFD_SHORT_FORMAT = BIT(1),
+};
+
+/**
+ * struct iwl_tx_queue_cfg_cmd - txq hw scheduler config command
+ * @sta_id: station id
+ * @tid: tid of the queue
+ * @flags: see &enum iwl_tx_queue_cfg_actions
+ * @cb_size: size of TFD cyclic buffer. Value is exponent - 3.
+ * Minimum value 0 (8 TFDs), maximum value 5 (256 TFDs)
+ * @byte_cnt_addr: address of byte count table
+ * @tfdq_addr: address of TFD circular buffer
+ */
+struct iwl_tx_queue_cfg_cmd {
+ u8 sta_id;
+ u8 tid;
+ __le16 flags;
+ __le32 cb_size;
+ __le64 byte_cnt_addr;
+ __le64 tfdq_addr;
+} __packed; /* TX_QUEUE_CFG_CMD_API_S_VER_2 */
+
+/**
+ * struct iwl_tx_queue_cfg_rsp - response to txq hw scheduler config
+ * @queue_number: queue number assigned to this RA -TID
+ * @flags: set on failure
+ * @write_pointer: initial value for write pointer
+ */
+struct iwl_tx_queue_cfg_rsp {
+ __le16 queue_number;
+ __le16 flags;
+ __le16 write_pointer;
+ __le16 reserved;
+} __packed; /* TX_QUEUE_CFG_RSP_API_S_VER_2 */
+
+#endif /* __iwl_fw_api_h__*/
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fw-error-dump.h b/drivers/net/wireless/intel/iwlwifi/iwl-fw-error-dump.h
index 420c31dab263..cfebde68a391 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-fw-error-dump.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-fw-error-dump.h
@@ -116,7 +116,7 @@ enum iwl_fw_error_dump_type {
/**
* struct iwl_fw_error_dump_data - data for one type
- * @type: %enum iwl_fw_error_dump_type
+ * @type: &enum iwl_fw_error_dump_type
* @len: the length starting from %data
* @data: the data itself
*/
@@ -130,7 +130,7 @@ struct iwl_fw_error_dump_data {
* struct iwl_fw_error_dump_file - the layout of the header of the file
* @barker: must be %IWL_FW_ERROR_DUMP_BARKER
* @file_len: the length of all the file starting from %barker
- * @data: array of %struct iwl_fw_error_dump_data
+ * @data: array of &struct iwl_fw_error_dump_data
*/
struct iwl_fw_error_dump_file {
__le32 barker;
@@ -225,7 +225,7 @@ enum iwl_fw_error_dump_mem_type {
/**
* struct iwl_fw_error_dump_mem - chunk of memory
- * @type: %enum iwl_fw_error_dump_mem_type
+ * @type: &enum iwl_fw_error_dump_mem_type
* @offset: the offset from which the memory was read
* @data: the content of the memory
*/
@@ -324,7 +324,7 @@ enum iwl_fw_dbg_trigger {
/**
* struct iwl_fw_error_dump_trigger_desc - describes the trigger condition
- * @type: %enum iwl_fw_dbg_trigger
+ * @type: &enum iwl_fw_dbg_trigger
* @data: raw data about what happened
*/
struct iwl_fw_error_dump_trigger_desc {
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h
index 44419e82da1b..a216657b3c60 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h
@@ -244,6 +244,7 @@ typedef unsigned int __bitwise iwl_ucode_tlv_api_t;
* @IWL_UCODE_TLV_API_TKIP_MIC_KEYS: This ucode supports version 2 of
* ADD_MODIFY_STA_KEY_API_S_VER_2.
* @IWL_UCODE_TLV_API_STA_TYPE: This ucode supports station type assignement.
+ * @IWL_UCODE_TLV_API_NAN2_VER2: This ucode supports NAN API version 2
*
* @NUM_IWL_UCODE_TLV_API: number of bits used
*/
@@ -255,6 +256,7 @@ enum iwl_ucode_tlv_api {
IWL_UCODE_TLV_API_SCAN_TSF_REPORT = (__force iwl_ucode_tlv_api_t)28,
IWL_UCODE_TLV_API_TKIP_MIC_KEYS = (__force iwl_ucode_tlv_api_t)29,
IWL_UCODE_TLV_API_STA_TYPE = (__force iwl_ucode_tlv_api_t)30,
+ IWL_UCODE_TLV_API_NAN2_VER2 = (__force iwl_ucode_tlv_api_t)31,
NUM_IWL_UCODE_TLV_API
#ifdef __CHECKER__
@@ -351,6 +353,7 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_STA_PM_NOTIF = (__force iwl_ucode_tlv_capa_t)38,
IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT = (__force iwl_ucode_tlv_capa_t)39,
IWL_UCODE_TLV_CAPA_CDB_SUPPORT = (__force iwl_ucode_tlv_capa_t)40,
+ IWL_UCODE_TLV_CAPA_D0I3_END_FIRST = (__force iwl_ucode_tlv_capa_t)41,
IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE = (__force iwl_ucode_tlv_capa_t)64,
IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS = (__force iwl_ucode_tlv_capa_t)65,
IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT = (__force iwl_ucode_tlv_capa_t)67,
@@ -395,8 +398,8 @@ enum iwl_ucode_tlv_capa {
#define IWL_UCODE_API(ver) (((ver) & 0x0000FF00) >> 8)
#define IWL_UCODE_SERIAL(ver) ((ver) & 0x000000FF)
-/*
- * Calibration control struct.
+/**
+ * struct iwl_tlv_calib_ctrl - Calibration control struct.
* Sent as part of the phy configuration command.
* @flow_trigger: bitmap for which calibrations to perform according to
* flow triggers.
@@ -468,7 +471,7 @@ enum iwl_fw_dbg_reg_operator {
/**
* struct iwl_fw_dbg_reg_op - an operation on a register
*
- * @op: %enum iwl_fw_dbg_reg_operator
+ * @op: &enum iwl_fw_dbg_reg_operator
* @addr: offset of the register
* @val: value
*/
@@ -526,7 +529,7 @@ struct iwl_fw_dbg_mem_seg_tlv {
* struct iwl_fw_dbg_dest_tlv - configures the destination of the debug data
*
* @version: version of the TLV - currently 0
- * @monitor_mode: %enum iwl_fw_dbg_monitor_mode
+ * @monitor_mode: &enum iwl_fw_dbg_monitor_mode
* @size_power: buffer size will be 2^(size_power + 11)
* @base_reg: addr of the base addr register (PRPH)
* @end_reg: addr of the end addr register (PRPH)
@@ -595,15 +598,15 @@ enum iwl_fw_dbg_trigger_vif_type {
/**
* struct iwl_fw_dbg_trigger_tlv - a TLV that describes the trigger
- * @id: %enum iwl_fw_dbg_trigger
- * @vif_type: %enum iwl_fw_dbg_trigger_vif_type
+ * @id: &enum iwl_fw_dbg_trigger
+ * @vif_type: &enum iwl_fw_dbg_trigger_vif_type
* @stop_conf_ids: bitmap of configurations this trigger relates to.
* if the mode is %IWL_FW_DBG_TRIGGER_STOP, then if the bit corresponding
* to the currently running configuration is set, the data should be
* collected.
* @stop_delay: how many milliseconds to wait before collecting the data
* after the STOP trigger fires.
- * @mode: %enum iwl_fw_dbg_trigger_mode - can be stop / start of both
+ * @mode: &enum iwl_fw_dbg_trigger_mode - can be stop / start of both
* @start_conf_id: if mode is %IWL_FW_DBG_TRIGGER_START, this defines what
* configuration should be applied when the triggers kicks in.
* @occurrences: number of occurrences. 0 means the trigger will never fire.
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-io.c b/drivers/net/wireless/intel/iwlwifi/iwl-io.c
index 9c8b09cf1f7b..c527b8c10370 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-io.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-io.c
@@ -241,12 +241,12 @@ IWL_EXPORT_SYMBOL(iwl_clear_bits_prph);
void iwl_force_nmi(struct iwl_trans *trans)
{
- if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) {
+ if (trans->cfg->device_family < IWL_DEVICE_FAMILY_8000) {
iwl_write_prph(trans, DEVICE_SET_NMI_REG,
DEVICE_SET_NMI_VAL_DRV);
iwl_write_prph(trans, DEVICE_SET_NMI_REG,
DEVICE_SET_NMI_VAL_HW);
- } else if (trans->cfg->gen2) {
+ } else if (trans->cfg->device_family == IWL_DEVICE_FAMILY_A000) {
iwl_write_prph(trans, UREG_NIC_SET_NMI_DRIVER,
DEVICE_SET_NMI_8000_VAL);
} else {
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
index 4d32b10fe50c..0bd85e58cc2c 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
@@ -114,7 +114,7 @@ enum iwl_uapsd_disable {
* @debug_level: levels are IWL_DL_*
* @ant_coupling: antenna coupling in dB, default = 0
* @nvm_file: specifies a external NVM file
- * @uapsd_disable: disable U-APSD, see %enum iwl_uapsd_disable, default =
+ * @uapsd_disable: disable U-APSD, see &enum iwl_uapsd_disable, default =
* IWL_DISABLE_UAPSD_BSS | IWL_DISABLE_UAPSD_P2P_CLIENT
* @d0i3_disable: disable d0i3, default = 1,
* @d0i3_entry_delay: time to wait after no refs are taken before
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
index 721ae6bef5da..5c08f4d40f6a 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -94,30 +94,21 @@ enum wkp_nvm_offsets {
XTAL_CALIB = 0x316 - NVM_CALIB_SECTION
};
-enum family_8000_nvm_offsets {
+enum ext_nvm_offsets {
/* NVM HW-Section offset (in words) definitions */
- HW_ADDR0_WFPM_FAMILY_8000 = 0x12,
- HW_ADDR1_WFPM_FAMILY_8000 = 0x16,
- HW_ADDR0_PCIE_FAMILY_8000 = 0x8A,
- HW_ADDR1_PCIE_FAMILY_8000 = 0x8E,
- MAC_ADDRESS_OVERRIDE_FAMILY_8000 = 1,
+ MAC_ADDRESS_OVERRIDE_EXT_NVM = 1,
/* NVM SW-Section offset (in words) definitions */
- NVM_SW_SECTION_FAMILY_8000 = 0x1C0,
- NVM_VERSION_FAMILY_8000 = 0,
- RADIO_CFG_FAMILY_8000 = 0,
+ NVM_VERSION_EXT_NVM = 0,
+ RADIO_CFG_FAMILY_EXT_NVM = 0,
SKU_FAMILY_8000 = 2,
N_HW_ADDRS_FAMILY_8000 = 3,
/* NVM REGULATORY -Section offset (in words) definitions */
- NVM_CHANNELS_FAMILY_8000 = 0,
- NVM_LAR_OFFSET_FAMILY_8000_OLD = 0x4C7,
- NVM_LAR_OFFSET_FAMILY_8000 = 0x507,
- NVM_LAR_ENABLED_FAMILY_8000 = 0x7,
-
- /* NVM calibration section offset (in words) definitions */
- NVM_CALIB_SECTION_FAMILY_8000 = 0x2B8,
- XTAL_CALIB_FAMILY_8000 = 0x316 - NVM_CALIB_SECTION_FAMILY_8000
+ NVM_CHANNELS_EXTENDED = 0,
+ NVM_LAR_OFFSET_OLD = 0x4C7,
+ NVM_LAR_OFFSET = 0x507,
+ NVM_LAR_ENABLED = 0x7,
};
/* SKU Capabilities (actual values from NVM definition) */
@@ -141,7 +132,7 @@ static const u8 iwl_nvm_channels[] = {
149, 153, 157, 161, 165
};
-static const u8 iwl_nvm_channels_family_8000[] = {
+static const u8 iwl_ext_nvm_channels[] = {
/* 2.4 GHz */
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
/* 5 GHz */
@@ -151,9 +142,9 @@ static const u8 iwl_nvm_channels_family_8000[] = {
};
#define IWL_NUM_CHANNELS ARRAY_SIZE(iwl_nvm_channels)
-#define IWL_NUM_CHANNELS_FAMILY_8000 ARRAY_SIZE(iwl_nvm_channels_family_8000)
+#define IWL_NUM_CHANNELS_EXT ARRAY_SIZE(iwl_ext_nvm_channels)
#define NUM_2GHZ_CHANNELS 14
-#define NUM_2GHZ_CHANNELS_FAMILY_8000 14
+#define NUM_2GHZ_CHANNELS_EXT 14
#define FIRST_2GHZ_HT_MINUS 5
#define LAST_2GHZ_HT_PLUS 9
#define LAST_5GHZ_HT 165
@@ -219,7 +210,7 @@ static u32 iwl_get_channel_flags(u8 ch_num, int ch_idx, bool is_5ghz,
u32 flags = IEEE80211_CHAN_NO_HT40;
u32 last_5ghz_ht = LAST_5GHZ_HT;
- if (cfg->device_family == IWL_DEVICE_FAMILY_8000)
+ if (cfg->ext_nvm)
last_5ghz_ht = LAST_5GHZ_HT_FAMILY_8000;
if (!is_5ghz && (nvm_flags & NVM_CHANNEL_40MHZ)) {
@@ -273,14 +264,14 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
int num_of_ch, num_2ghz_channels;
const u8 *nvm_chan;
- if (cfg->device_family != IWL_DEVICE_FAMILY_8000) {
+ if (!cfg->ext_nvm) {
num_of_ch = IWL_NUM_CHANNELS;
nvm_chan = &iwl_nvm_channels[0];
num_2ghz_channels = NUM_2GHZ_CHANNELS;
} else {
- num_of_ch = IWL_NUM_CHANNELS_FAMILY_8000;
- nvm_chan = &iwl_nvm_channels_family_8000[0];
- num_2ghz_channels = NUM_2GHZ_CHANNELS_FAMILY_8000;
+ num_of_ch = IWL_NUM_CHANNELS_EXT;
+ nvm_chan = &iwl_ext_nvm_channels[0];
+ num_2ghz_channels = NUM_2GHZ_CHANNELS_EXT;
}
for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) {
@@ -479,7 +470,7 @@ IWL_EXPORT_SYMBOL(iwl_init_sbands);
static int iwl_get_sku(const struct iwl_cfg *cfg, const __le16 *nvm_sw,
const __le16 *phy_sku)
{
- if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
+ if (!cfg->ext_nvm)
return le16_to_cpup(nvm_sw + SKU);
return le32_to_cpup((__le32 *)(phy_sku + SKU_FAMILY_8000));
@@ -487,20 +478,20 @@ static int iwl_get_sku(const struct iwl_cfg *cfg, const __le16 *nvm_sw,
static int iwl_get_nvm_version(const struct iwl_cfg *cfg, const __le16 *nvm_sw)
{
- if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
+ if (!cfg->ext_nvm)
return le16_to_cpup(nvm_sw + NVM_VERSION);
else
return le32_to_cpup((__le32 *)(nvm_sw +
- NVM_VERSION_FAMILY_8000));
+ NVM_VERSION_EXT_NVM));
}
static int iwl_get_radio_cfg(const struct iwl_cfg *cfg, const __le16 *nvm_sw,
const __le16 *phy_sku)
{
- if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
+ if (!cfg->ext_nvm)
return le16_to_cpup(nvm_sw + RADIO_CFG);
- return le32_to_cpup((__le32 *)(phy_sku + RADIO_CFG_FAMILY_8000));
+ return le32_to_cpup((__le32 *)(phy_sku + RADIO_CFG_FAMILY_EXT_NVM));
}
@@ -508,7 +499,7 @@ static int iwl_get_n_hw_addrs(const struct iwl_cfg *cfg, const __le16 *nvm_sw)
{
int n_hw_addr;
- if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
+ if (!cfg->ext_nvm)
return le16_to_cpup(nvm_sw + N_HW_ADDRS);
n_hw_addr = le32_to_cpup((__le32 *)(nvm_sw + N_HW_ADDRS_FAMILY_8000));
@@ -520,7 +511,7 @@ static void iwl_set_radio_cfg(const struct iwl_cfg *cfg,
struct iwl_nvm_data *data,
u32 radio_cfg)
{
- if (cfg->device_family != IWL_DEVICE_FAMILY_8000) {
+ if (!cfg->ext_nvm) {
data->radio_cfg_type = NVM_RF_CFG_TYPE_MSK(radio_cfg);
data->radio_cfg_step = NVM_RF_CFG_STEP_MSK(radio_cfg);
data->radio_cfg_dash = NVM_RF_CFG_DASH_MSK(radio_cfg);
@@ -529,12 +520,12 @@ static void iwl_set_radio_cfg(const struct iwl_cfg *cfg,
}
/* set the radio configuration for family 8000 */
- data->radio_cfg_type = NVM_RF_CFG_TYPE_MSK_FAMILY_8000(radio_cfg);
- data->radio_cfg_step = NVM_RF_CFG_STEP_MSK_FAMILY_8000(radio_cfg);
- data->radio_cfg_dash = NVM_RF_CFG_DASH_MSK_FAMILY_8000(radio_cfg);
- data->radio_cfg_pnum = NVM_RF_CFG_FLAVOR_MSK_FAMILY_8000(radio_cfg);
- data->valid_tx_ant = NVM_RF_CFG_TX_ANT_MSK_FAMILY_8000(radio_cfg);
- data->valid_rx_ant = NVM_RF_CFG_RX_ANT_MSK_FAMILY_8000(radio_cfg);
+ data->radio_cfg_type = EXT_NVM_RF_CFG_TYPE_MSK(radio_cfg);
+ data->radio_cfg_step = EXT_NVM_RF_CFG_STEP_MSK(radio_cfg);
+ data->radio_cfg_dash = EXT_NVM_RF_CFG_DASH_MSK(radio_cfg);
+ data->radio_cfg_pnum = EXT_NVM_RF_CFG_FLAVOR_MSK(radio_cfg);
+ data->valid_tx_ant = EXT_NVM_RF_CFG_TX_ANT_MSK(radio_cfg);
+ data->valid_rx_ant = EXT_NVM_RF_CFG_RX_ANT_MSK(radio_cfg);
}
static void iwl_flip_hw_address(__le32 mac_addr0, __le32 mac_addr1, u8 *dest)
@@ -587,7 +578,7 @@ static void iwl_set_hw_address_family_8000(struct iwl_trans *trans,
};
hw_addr = (const u8 *)(mac_override +
- MAC_ADDRESS_OVERRIDE_FAMILY_8000);
+ MAC_ADDRESS_OVERRIDE_EXT_NVM);
/*
* Store the MAC address from MAO section.
@@ -629,7 +620,7 @@ static int iwl_set_hw_address(struct iwl_trans *trans,
{
if (cfg->mac_addr_from_csr) {
iwl_set_hw_address_from_csr(trans, data);
- } else if (cfg->device_family != IWL_DEVICE_FAMILY_8000) {
+ } else if (!cfg->ext_nvm) {
const u8 *hw_addr = (const u8 *)(nvm_hw + HW_ADDR);
/* The byte order is little endian 16 bit, meaning 214365 */
@@ -649,6 +640,8 @@ static int iwl_set_hw_address(struct iwl_trans *trans,
return -EINVAL;
}
+ IWL_INFO(trans, "base HW address: %pM\n", data->hw_addr);
+
return 0;
}
@@ -666,7 +659,7 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
u16 lar_config;
const __le16 *ch_section;
- if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
+ if (!cfg->ext_nvm)
data = kzalloc(sizeof(*data) +
sizeof(struct ieee80211_channel) *
IWL_NUM_CHANNELS,
@@ -674,7 +667,7 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
else
data = kzalloc(sizeof(*data) +
sizeof(struct ieee80211_channel) *
- IWL_NUM_CHANNELS_FAMILY_8000,
+ IWL_NUM_CHANNELS_EXT,
GFP_KERNEL);
if (!data)
return NULL;
@@ -700,7 +693,7 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
data->n_hw_addrs = iwl_get_n_hw_addrs(cfg, nvm_sw);
- if (cfg->device_family != IWL_DEVICE_FAMILY_8000) {
+ if (!cfg->ext_nvm) {
/* Checking for required sections */
if (!nvm_calib) {
IWL_ERR(trans,
@@ -715,14 +708,14 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
ch_section = &nvm_sw[NVM_CHANNELS];
} else {
u16 lar_offset = data->nvm_version < 0xE39 ?
- NVM_LAR_OFFSET_FAMILY_8000_OLD :
- NVM_LAR_OFFSET_FAMILY_8000;
+ NVM_LAR_OFFSET_OLD :
+ NVM_LAR_OFFSET;
lar_config = le16_to_cpup(regulatory + lar_offset);
data->lar_enabled = !!(lar_config &
- NVM_LAR_ENABLED_FAMILY_8000);
+ NVM_LAR_ENABLED);
lar_enabled = data->lar_enabled;
- ch_section = &regulatory[NVM_CHANNELS_FAMILY_8000];
+ ch_section = &regulatory[NVM_CHANNELS_EXTENDED];
}
/* If no valid mac address was found - bail out */
@@ -746,7 +739,7 @@ static u32 iwl_nvm_get_regdom_bw_flags(const u8 *nvm_chan,
u32 flags = NL80211_RRF_NO_HT40;
u32 last_5ghz_ht = LAST_5GHZ_HT;
- if (cfg->device_family == IWL_DEVICE_FAMILY_8000)
+ if (cfg->ext_nvm)
last_5ghz_ht = LAST_5GHZ_HT_FAMILY_8000;
if (ch_idx < NUM_2GHZ_CHANNELS &&
@@ -793,8 +786,8 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
{
int ch_idx;
u16 ch_flags, prev_ch_flags = 0;
- const u8 *nvm_chan = cfg->device_family == IWL_DEVICE_FAMILY_8000 ?
- iwl_nvm_channels_family_8000 : iwl_nvm_channels;
+ const u8 *nvm_chan = cfg->ext_nvm ?
+ iwl_ext_nvm_channels : iwl_nvm_channels;
struct ieee80211_regdomain *regd;
int size_of_regd;
struct ieee80211_reg_rule *rule;
@@ -802,8 +795,8 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
int center_freq, prev_center_freq = 0;
int valid_rules = 0;
bool new_rule;
- int max_num_ch = cfg->device_family == IWL_DEVICE_FAMILY_8000 ?
- IWL_NUM_CHANNELS_FAMILY_8000 : IWL_NUM_CHANNELS;
+ int max_num_ch = cfg->ext_nvm ?
+ IWL_NUM_CHANNELS_EXT : IWL_NUM_CHANNELS;
if (WARN_ON_ONCE(num_of_ch > NL80211_MAX_SUPP_REG_RULES))
return ERR_PTR(-EINVAL);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
index 77efbb78e867..6772c59b7764 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
@@ -66,6 +66,7 @@
#ifndef __iwl_prph_h__
#define __iwl_prph_h__
+#include <linux/bitfield.h>
/*
* Registers in this file are internal, not PCI bus memory mapped.
@@ -247,14 +248,14 @@
#define SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN (19)
#define SCD_QUEUE_STTS_REG_MSK (0x017F0000)
-#define SCD_QUEUE_CTX_REG1_CREDIT_POS (8)
-#define SCD_QUEUE_CTX_REG1_CREDIT_MSK (0x00FFFF00)
-#define SCD_QUEUE_CTX_REG1_SUPER_CREDIT_POS (24)
-#define SCD_QUEUE_CTX_REG1_SUPER_CREDIT_MSK (0xFF000000)
-#define SCD_QUEUE_CTX_REG2_WIN_SIZE_POS (0)
-#define SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK (0x0000007F)
-#define SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS (16)
-#define SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK (0x007F0000)
+#define SCD_QUEUE_CTX_REG1_CREDIT (0x00FFFF00)
+#define SCD_QUEUE_CTX_REG1_SUPER_CREDIT (0xFF000000)
+#define SCD_QUEUE_CTX_REG1_VAL(_n, _v) FIELD_PREP(SCD_QUEUE_CTX_REG1_ ## _n, _v)
+
+#define SCD_QUEUE_CTX_REG2_WIN_SIZE (0x0000007F)
+#define SCD_QUEUE_CTX_REG2_FRAME_LIMIT (0x007F0000)
+#define SCD_QUEUE_CTX_REG2_VAL(_n, _v) FIELD_PREP(SCD_QUEUE_CTX_REG2_ ## _n, _v)
+
#define SCD_GP_CTRL_ENABLE_31_QUEUES BIT(0)
#define SCD_GP_CTRL_AUTO_ACTIVE_MODE BIT(18)
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
index 0bde26bab15d..784bdd0ed233 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
@@ -102,6 +102,8 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
if (!trans->dev_cmd_pool)
return NULL;
+ WARN_ON(!ops->wait_txq_empty && !ops->wait_tx_queues_empty);
+
return trans;
}
@@ -115,7 +117,7 @@ int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
int ret;
if (unlikely(!(cmd->flags & CMD_SEND_IN_RFKILL) &&
- test_bit(STATUS_RFKILL, &trans->status)))
+ test_bit(STATUS_RFKILL_OPMODE, &trans->status)))
return -ERFKILL;
if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status)))
@@ -141,6 +143,9 @@ int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
if (!(cmd->flags & CMD_ASYNC))
lock_map_release(&trans->sync_cmd_lockdep_map);
+ if (WARN_ON((cmd->flags & CMD_WANT_SKB) && !ret && !cmd->resp_pkt))
+ return -EIO;
+
return ret;
}
IWL_EXPORT_SYMBOL(iwl_trans_send_cmd);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
index 0ebfdbb22992..57db6250a329 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
@@ -76,6 +76,7 @@
#include "iwl-config.h"
#include "iwl-fw.h"
#include "iwl-op-mode.h"
+#include "iwl-fw-api.h"
/**
* DOC: Transport layer - what is it ?
@@ -111,104 +112,6 @@
* 6) Eventually, the free function will be called.
*/
-/**
- * DOC: Host command section
- *
- * A host command is a command issued by the upper layer to the fw. There are
- * several versions of fw that have several APIs. The transport layer is
- * completely agnostic to these differences.
- * The transport does provide helper functionality (i.e. SYNC / ASYNC mode),
- */
-#define SEQ_TO_QUEUE(s) (((s) >> 8) & 0x1f)
-#define QUEUE_TO_SEQ(q) (((q) & 0x1f) << 8)
-#define SEQ_TO_INDEX(s) ((s) & 0xff)
-#define INDEX_TO_SEQ(i) ((i) & 0xff)
-#define SEQ_RX_FRAME cpu_to_le16(0x8000)
-
-/*
- * those functions retrieve specific information from
- * the id field in the iwl_host_cmd struct which contains
- * the command id, the group id and the version of the command
- * and vice versa
-*/
-static inline u8 iwl_cmd_opcode(u32 cmdid)
-{
- return cmdid & 0xFF;
-}
-
-static inline u8 iwl_cmd_groupid(u32 cmdid)
-{
- return ((cmdid & 0xFF00) >> 8);
-}
-
-static inline u8 iwl_cmd_version(u32 cmdid)
-{
- return ((cmdid & 0xFF0000) >> 16);
-}
-
-static inline u32 iwl_cmd_id(u8 opcode, u8 groupid, u8 version)
-{
- return opcode + (groupid << 8) + (version << 16);
-}
-
-/* make u16 wide id out of u8 group and opcode */
-#define WIDE_ID(grp, opcode) ((grp << 8) | opcode)
-#define DEF_ID(opcode) ((1 << 8) | (opcode))
-
-/* due to the conversion, this group is special; new groups
- * should be defined in the appropriate fw-api header files
- */
-#define IWL_ALWAYS_LONG_GROUP 1
-
-/**
- * struct iwl_cmd_header
- *
- * This header format appears in the beginning of each command sent from the
- * driver, and each response/notification received from uCode.
- */
-struct iwl_cmd_header {
- u8 cmd; /* Command ID: REPLY_RXON, etc. */
- u8 group_id;
- /*
- * The driver sets up the sequence number to values of its choosing.
- * uCode does not use this value, but passes it back to the driver
- * when sending the response to each driver-originated command, so
- * the driver can match the response to the command. Since the values
- * don't get used by uCode, the driver may set up an arbitrary format.
- *
- * There is one exception: uCode sets bit 15 when it originates
- * the response/notification, i.e. when the response/notification
- * is not a direct response to a command sent by the driver. For
- * example, uCode issues REPLY_RX when it sends a received frame
- * to the driver; it is not a direct response to any driver command.
- *
- * The Linux driver uses the following format:
- *
- * 0:7 tfd index - position within TX queue
- * 8:12 TX queue id
- * 13:14 reserved
- * 15 unsolicited RX or uCode-originated notification
- */
- __le16 sequence;
-} __packed;
-
-/**
- * struct iwl_cmd_header_wide
- *
- * This header format appears in the beginning of each command sent from the
- * driver, and each response/notification received from uCode.
- * this is the wide version that contains more information about the command
- * like length, version and command type
- */
-struct iwl_cmd_header_wide {
- u8 cmd;
- u8 group_id;
- __le16 sequence;
- __le16 length;
- u8 reserved;
- u8 version;
-} __packed;
-
#define FH_RSCSR_FRAME_SIZE_MSK 0x00003FFF /* bits 0-13 */
#define FH_RSCSR_FRAME_INVALID 0x55550000
#define FH_RSCSR_FRAME_ALIGN 0x40
@@ -308,7 +211,7 @@ struct iwl_device_cmd {
#define IWL_MAX_CMD_TBS_PER_TFD 2
/**
- * struct iwl_hcmd_dataflag - flag for each one of the chunks of the command
+ * enum iwl_hcmd_dataflag - flag for each one of the chunks of the command
*
* @IWL_HCMD_DFL_NOCOPY: By default, the command is copied to the host command's
* ring. The transport layer doesn't map the command's buffer to DMA, but
@@ -419,7 +322,8 @@ enum iwl_d3_status {
* @STATUS_DEVICE_ENABLED: APM is enabled
* @STATUS_TPOWER_PMI: the device might be asleep (need to wake it up)
* @STATUS_INT_ENABLED: interrupts are enabled
- * @STATUS_RFKILL: the HW RFkill switch is in KILL position
+ * @STATUS_RFKILL_HW: the actual HW state of the RF-kill switch
+ * @STATUS_RFKILL_OPMODE: RF-kill state reported to opmode
* @STATUS_FW_ERROR: the fw is in error state
* @STATUS_TRANS_GOING_IDLE: shutting down the trans, only special commands
* are sent
@@ -431,7 +335,8 @@ enum iwl_trans_status {
STATUS_DEVICE_ENABLED,
STATUS_TPOWER_PMI,
STATUS_INT_ENABLED,
- STATUS_RFKILL,
+ STATUS_RFKILL_HW,
+ STATUS_RFKILL_OPMODE,
STATUS_FW_ERROR,
STATUS_TRANS_GOING_IDLE,
STATUS_TRANS_IDLE,
@@ -533,44 +438,6 @@ struct iwl_trans_txq_scd_cfg {
int frame_limit;
};
-/* Available options for &struct iwl_tx_queue_cfg_cmd */
-enum iwl_tx_queue_cfg_actions {
- TX_QUEUE_CFG_ENABLE_QUEUE = BIT(0),
- TX_QUEUE_CFG_TFD_SHORT_FORMAT = BIT(1),
-};
-
-/**
- * struct iwl_tx_queue_cfg_cmd - txq hw scheduler config command
- * @sta_id: station id
- * @tid: tid of the queue
- * @flags: Bit 0 - on enable, off - disable, Bit 1 - short TFD format
- * @cb_size: size of TFD cyclic buffer. Value is exponent - 3.
- * Minimum value 0 (8 TFDs), maximum value 5 (256 TFDs)
- * @byte_cnt_addr: address of byte count table
- * @tfdq_addr: address of TFD circular buffer
- */
-struct iwl_tx_queue_cfg_cmd {
- u8 sta_id;
- u8 tid;
- __le16 flags;
- __le32 cb_size;
- __le64 byte_cnt_addr;
- __le64 tfdq_addr;
-} __packed; /* TX_QUEUE_CFG_CMD_API_S_VER_2 */
-
-/**
- * struct iwl_tx_queue_cfg_rsp - response to txq hw scheduler config
- * @queue_number: queue number assigned to this RA -TID
- * @flags: set on failure
- * @write_pointer: initial value for write pointer
- */
-struct iwl_tx_queue_cfg_rsp {
- __le16 queue_number;
- __le16 flags;
- __le16 write_pointer;
- __le16 reserved;
-} __packed; /* TX_QUEUE_CFG_RSP_API_S_VER_2 */
-
/**
* struct iwl_trans_ops - transport specific operations
*
@@ -615,11 +482,14 @@ struct iwl_tx_queue_cfg_rsp {
* iwl_trans_ac_txq_enable wrapper. fw_alive must have been called before
* this one. The op_mode must not configure the HCMD queue. The scheduler
* configuration may be %NULL, in which case the hardware will not be
- * configured. May sleep.
+ * configured. If true is returned, the operation mode needs to increment
+ * the sequence number of the packets routed to this queue because of a
+ * hardware scheduler bug. May sleep.
* @txq_disable: de-configure a Tx queue to send AMPDUs
* Must be atomic
* @txq_set_shared_mode: change Tx queue shared/unshared marking
- * @wait_tx_queue_empty: wait until tx queues are empty. May sleep.
+ * @wait_tx_queues_empty: wait until tx queues are empty. May sleep.
+ * @wait_txq_empty: wait until specific tx queue is empty. May sleep.
* @freeze_txq_timer: prevents the timer of the queue from firing until the
* queue is set to awake. Must be atomic.
* @block_txq_ptrs: stop updating the write pointers of the Tx queues. Note
@@ -676,7 +546,7 @@ struct iwl_trans_ops {
void (*reclaim)(struct iwl_trans *trans, int queue, int ssn,
struct sk_buff_head *skbs);
- void (*txq_enable)(struct iwl_trans *trans, int queue, u16 ssn,
+ bool (*txq_enable)(struct iwl_trans *trans, int queue, u16 ssn,
const struct iwl_trans_txq_scd_cfg *cfg,
unsigned int queue_wdg_timeout);
void (*txq_disable)(struct iwl_trans *trans, int queue,
@@ -692,6 +562,7 @@ struct iwl_trans_ops {
bool shared);
int (*wait_tx_queues_empty)(struct iwl_trans *trans, u32 txq_bm);
+ int (*wait_txq_empty)(struct iwl_trans *trans, int queue);
void (*freeze_txq_timer)(struct iwl_trans *trans, unsigned long txqs,
bool freeze);
void (*block_txq_ptrs)(struct iwl_trans *trans, bool block);
@@ -1041,13 +912,7 @@ iwl_trans_dump_data(struct iwl_trans *trans,
static inline struct iwl_device_cmd *
iwl_trans_alloc_tx_cmd(struct iwl_trans *trans)
{
- struct iwl_device_cmd *dev_cmd_ptr =
- kmem_cache_alloc(trans->dev_cmd_pool, GFP_ATOMIC);
-
- if (unlikely(dev_cmd_ptr == NULL))
- return NULL;
-
- return dev_cmd_ptr;
+ return kmem_cache_alloc(trans->dev_cmd_pool, GFP_ATOMIC);
}
int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
@@ -1089,7 +954,7 @@ static inline void iwl_trans_txq_disable(struct iwl_trans *trans, int queue,
trans->ops->txq_disable(trans, queue, configure_scd);
}
-static inline void
+static inline bool
iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn,
const struct iwl_trans_txq_scd_cfg *cfg,
unsigned int queue_wdg_timeout)
@@ -1098,10 +963,11 @@ iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn,
if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
- return;
+ return false;
}
- trans->ops->txq_enable(trans, queue, ssn, cfg, queue_wdg_timeout);
+ return trans->ops->txq_enable(trans, queue, ssn,
+ cfg, queue_wdg_timeout);
}
static inline void
@@ -1198,6 +1064,9 @@ static inline void iwl_trans_block_txq_ptrs(struct iwl_trans *trans,
static inline int iwl_trans_wait_tx_queues_empty(struct iwl_trans *trans,
u32 txqs)
{
+ if (WARN_ON_ONCE(!trans->ops->wait_tx_queues_empty))
+ return -ENOTSUPP;
+
if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
return -EIO;
@@ -1206,6 +1075,19 @@ static inline int iwl_trans_wait_tx_queues_empty(struct iwl_trans *trans,
return trans->ops->wait_tx_queues_empty(trans, txqs);
}
+static inline int iwl_trans_wait_txq_empty(struct iwl_trans *trans, int queue)
+{
+ if (WARN_ON_ONCE(!trans->ops->wait_txq_empty))
+ return -ENOTSUPP;
+
+ if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
+ IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
+ return -EIO;
+ }
+
+ return trans->ops->wait_txq_empty(trans, queue);
+}
+
static inline void iwl_trans_write8(struct iwl_trans *trans, u32 ofs, u8 val)
{
trans->ops->write8(trans, ofs, val);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
index 49b4418e6c35..fe7f1e424f55 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
@@ -914,7 +914,8 @@ void iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
- u32 ant_isolation = le32_to_cpup((void *)pkt->data);
+ struct iwl_mvm_antenna_coupling_notif *notif = (void *)pkt->data;
+ u32 ant_isolation = le32_to_cpu(notif->isolation);
struct iwl_bt_coex_corun_lut_update_cmd cmd = {};
u8 __maybe_unused lower_bound, upper_bound;
u8 lut;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
index 4eeb6b78d952..6fda8627b726 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
@@ -110,6 +110,7 @@
#define IWL_MVM_TOF_IS_RESPONDER 0
#define IWL_MVM_SW_TX_CSUM_OFFLOAD 0
#define IWL_MVM_HW_CSUM_DISABLE 0
+#define IWL_MVM_PARSE_NVM 0
#define IWL_MVM_COLLECT_FW_ERR_DUMP 1
#define IWL_MVM_RS_NUM_TRY_BEFORE_ANT_TOGGLE 1
#define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE 2
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index 7a56a0ac151c..ca2d11f4984e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -7,7 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -34,7 +34,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -1795,12 +1795,6 @@ iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
return ERR_PTR(ret);
}
- /* RF-kill already asserted again... */
- if (!cmd.resp_pkt) {
- fw_status = ERR_PTR(-ERFKILL);
- goto out_free_resp;
- }
-
status_size = sizeof(*fw_status);
len = iwl_rx_packet_payload_len(cmd.resp_pkt);
@@ -1925,12 +1919,6 @@ iwl_mvm_netdetect_query_results(struct iwl_mvm *mvm,
return ret;
}
- /* RF-kill already asserted again... */
- if (!cmd.resp_pkt) {
- ret = -ERFKILL;
- goto out_free_resp;
- }
-
len = iwl_rx_packet_payload_len(cmd.resp_pkt);
if (len < sizeof(*query)) {
IWL_ERR(mvm, "Invalid scan offload profiles query response!\n");
@@ -2087,9 +2075,8 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
bool keep = false;
bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
-
- u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE |
- CMD_WAKE_UP_TRANS;
+ bool d0i3_first = fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_D0I3_END_FIRST);
mutex_lock(&mvm->mutex);
@@ -2110,6 +2097,15 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
/* query SRAM first in case we want event logging */
iwl_mvm_read_d3_sram(mvm);
+ if (d0i3_first) {
+ ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, 0, 0, NULL);
+ if (ret < 0) {
+ IWL_ERR(mvm, "Failed to send D0I3_END_CMD first (%d)\n",
+ ret);
+ goto err;
+ }
+ }
+
/*
* Query the current location and source from the D3 firmware so we
* can play it back when we re-intiailize the D0 firmware
@@ -2155,9 +2151,14 @@ out_iterate:
iwl_mvm_d3_disconnect_iter, keep ? vif : NULL);
out:
+ /* no need to reset the device in unified images, if successful */
if (unified_image && !ret) {
- ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, flags, 0, NULL);
- if (!ret) /* D3 ended successfully - no need to reset device */
+ /* nothing else to do if we already sent D0I3_END_CMD */
+ if (d0i3_first)
+ return 0;
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, 0, 0, NULL);
+ if (!ret)
return 0;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
index 5d475b4850ae..a7ac281e5cde 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
@@ -7,7 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -34,7 +34,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -1304,11 +1304,11 @@ static ssize_t iwl_dbgfs_low_latency_read(struct file *file,
char buf[30] = {};
int len;
- len = snprintf(buf, sizeof(buf) - 1,
- "traffic=%d\ndbgfs=%d\nvcmd=%d\n",
- mvmvif->low_latency_traffic,
- mvmvif->low_latency_dbgfs,
- mvmvif->low_latency_vcmd);
+ len = scnprintf(buf, sizeof(buf) - 1,
+ "traffic=%d\ndbgfs=%d\nvcmd=%d\n",
+ mvmvif->low_latency_traffic,
+ mvmvif->low_latency_dbgfs,
+ mvmvif->low_latency_vcmd);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
@@ -1385,10 +1385,12 @@ static ssize_t iwl_dbgfs_rx_phyinfo_read(struct file *file,
struct ieee80211_vif *vif = file->private_data;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
char buf[8];
+ int len;
- snprintf(buf, sizeof(buf), "0x%04x\n", mvmvif->mvm->dbgfs_rx_phyinfo);
+ len = scnprintf(buf, sizeof(buf), "0x%04x\n",
+ mvmvif->mvm->dbgfs_rx_phyinfo);
- return simple_read_from_buffer(user_buf, count, ppos, buf, sizeof(buf));
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
static void iwl_dbgfs_quota_check(void *data, u8 *mac,
@@ -1439,7 +1441,7 @@ static ssize_t iwl_dbgfs_quota_min_read(struct file *file,
char buf[10];
int len;
- len = snprintf(buf, sizeof(buf), "%d\n", mvmvif->dbgfs_quota_min);
+ len = scnprintf(buf, sizeof(buf), "%d\n", mvmvif->dbgfs_quota_min);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index 402846650cbe..c2a1aeef74ec 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -7,7 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -34,6 +34,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -82,7 +83,8 @@ static ssize_t iwl_dbgfs_ctdp_budget_read(struct file *file,
char buf[16];
int pos, budget;
- if (!mvm->ucode_loaded || mvm->cur_ucode != IWL_UCODE_REGULAR)
+ if (!iwl_mvm_firmware_running(mvm) ||
+ mvm->cur_ucode != IWL_UCODE_REGULAR)
return -EIO;
mutex_lock(&mvm->mutex);
@@ -102,7 +104,8 @@ static ssize_t iwl_dbgfs_stop_ctdp_write(struct iwl_mvm *mvm, char *buf,
{
int ret;
- if (!mvm->ucode_loaded || mvm->cur_ucode != IWL_UCODE_REGULAR)
+ if (!iwl_mvm_firmware_running(mvm) ||
+ mvm->cur_ucode != IWL_UCODE_REGULAR)
return -EIO;
mutex_lock(&mvm->mutex);
@@ -116,18 +119,30 @@ static ssize_t iwl_dbgfs_tx_flush_write(struct iwl_mvm *mvm, char *buf,
size_t count, loff_t *ppos)
{
int ret;
- u32 scd_q_msk;
+ u32 flush_arg;
- if (!mvm->ucode_loaded || mvm->cur_ucode != IWL_UCODE_REGULAR)
+ if (!iwl_mvm_firmware_running(mvm) ||
+ mvm->cur_ucode != IWL_UCODE_REGULAR)
return -EIO;
- if (sscanf(buf, "%x", &scd_q_msk) != 1)
+ if (kstrtou32(buf, 0, &flush_arg))
return -EINVAL;
- IWL_ERR(mvm, "FLUSHING queues: scd_q_msk = 0x%x\n", scd_q_msk);
+ if (iwl_mvm_has_new_tx_api(mvm)) {
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "FLUSHING all tids queues on sta_id = %d\n",
+ flush_arg);
+ mutex_lock(&mvm->mutex);
+ ret = iwl_mvm_flush_sta_tids(mvm, flush_arg, 0xFF, 0) ? : count;
+ mutex_unlock(&mvm->mutex);
+ return ret;
+ }
+
+ IWL_DEBUG_TX_QUEUES(mvm, "FLUSHING queues mask to flush = 0x%x\n",
+ flush_arg);
mutex_lock(&mvm->mutex);
- ret = iwl_mvm_flush_tx_path(mvm, scd_q_msk, 0) ? : count;
+ ret = iwl_mvm_flush_tx_path(mvm, flush_arg, 0) ? : count;
mutex_unlock(&mvm->mutex);
return ret;
@@ -139,7 +154,8 @@ static ssize_t iwl_dbgfs_sta_drain_write(struct iwl_mvm *mvm, char *buf,
struct iwl_mvm_sta *mvmsta;
int sta_id, drain, ret;
- if (!mvm->ucode_loaded || mvm->cur_ucode != IWL_UCODE_REGULAR)
+ if (!iwl_mvm_firmware_running(mvm) ||
+ mvm->cur_ucode != IWL_UCODE_REGULAR)
return -EIO;
if (sscanf(buf, "%d %d", &sta_id, &drain) != 2)
@@ -172,7 +188,7 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file, char __user *user_buf,
size_t ret;
u8 *ptr;
- if (!mvm->ucode_loaded)
+ if (!iwl_mvm_firmware_running(mvm))
return -EINVAL;
/* default is to dump the entire data segment */
@@ -205,7 +221,7 @@ static ssize_t iwl_dbgfs_sram_write(struct iwl_mvm *mvm, char *buf,
u32 offset, len;
u32 img_offset, img_len;
- if (!mvm->ucode_loaded)
+ if (!iwl_mvm_firmware_running(mvm))
return -EINVAL;
img = &mvm->fw->img[mvm->cur_ucode];
@@ -258,7 +274,7 @@ static ssize_t iwl_dbgfs_set_nic_temperature_write(struct iwl_mvm *mvm,
{
int temperature;
- if (!mvm->ucode_loaded && !mvm->temperature_test)
+ if (!iwl_mvm_firmware_running(mvm) && !mvm->temperature_test)
return -EIO;
if (kstrtoint(buf, 10, &temperature))
@@ -305,7 +321,7 @@ static ssize_t iwl_dbgfs_nic_temp_read(struct file *file,
int pos, ret;
s32 temp;
- if (!mvm->ucode_loaded)
+ if (!iwl_mvm_firmware_running(mvm))
return -EIO;
mutex_lock(&mvm->mutex);
@@ -371,7 +387,7 @@ static ssize_t iwl_dbgfs_disable_power_off_write(struct iwl_mvm *mvm, char *buf,
{
int ret, val;
- if (!mvm->ucode_loaded)
+ if (!iwl_mvm_firmware_running(mvm))
return -EIO;
if (!strncmp("disable_power_off_d0=", buf, 21)) {
@@ -583,7 +599,11 @@ iwl_dbgfs_bt_force_ant_write(struct iwl_mvm *mvm, char *buf,
mvm->bt_force_ant_mode = bt_force_ant_mode;
IWL_DEBUG_COEX(mvm, "Force mode: %s\n",
modes_str[mvm->bt_force_ant_mode]);
- ret = iwl_send_bt_init_conf(mvm);
+
+ if (iwl_mvm_firmware_running(mvm))
+ ret = iwl_send_bt_init_conf(mvm);
+ else
+ ret = 0;
out:
mutex_unlock(&mvm->mutex);
@@ -800,6 +820,9 @@ static ssize_t iwl_dbgfs_fw_restart_write(struct iwl_mvm *mvm, char *buf,
{
int __maybe_unused ret;
+ if (!iwl_mvm_firmware_running(mvm))
+ return -EIO;
+
mutex_lock(&mvm->mutex);
/* allow one more restart that we're provoking here */
@@ -817,7 +840,12 @@ static ssize_t iwl_dbgfs_fw_restart_write(struct iwl_mvm *mvm, char *buf,
static ssize_t iwl_dbgfs_fw_nmi_write(struct iwl_mvm *mvm, char *buf,
size_t count, loff_t *ppos)
{
- int ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_NMI);
+ int ret;
+
+ if (!iwl_mvm_firmware_running(mvm))
+ return -EIO;
+
+ ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_NMI);
if (ret)
return ret;
@@ -857,6 +885,9 @@ iwl_dbgfs_scan_ant_rxchain_write(struct iwl_mvm *mvm, char *buf,
{
u8 scan_rx_ant;
+ if (!iwl_mvm_firmware_running(mvm))
+ return -EIO;
+
if (sscanf(buf, "%hhx", &scan_rx_ant) != 1)
return -EINVAL;
if (scan_rx_ant > ANT_ABC)
@@ -911,7 +942,11 @@ static ssize_t iwl_dbgfs_indirection_tbl_write(struct iwl_mvm *mvm,
netdev_rss_key_fill(cmd.secret_key, sizeof(cmd.secret_key));
mutex_lock(&mvm->mutex);
- ret = iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd);
+ if (iwl_mvm_firmware_running(mvm))
+ ret = iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0,
+ sizeof(cmd), &cmd);
+ else
+ ret = 0;
mutex_unlock(&mvm->mutex);
return ret ?: count;
@@ -931,6 +966,9 @@ static ssize_t iwl_dbgfs_inject_packet_write(struct iwl_mvm *mvm,
int bin_len = count / 2;
int ret = -EINVAL;
+ if (!iwl_mvm_firmware_running(mvm))
+ return -EIO;
+
/* supporting only 9000 descriptor */
if (!mvm->trans->cfg->mq_rx_supported)
return -ENOTSUPP;
@@ -1004,11 +1042,14 @@ static ssize_t iwl_dbgfs_cont_recording_write(struct iwl_mvm *mvm,
struct iwl_continuous_record_cmd cont_rec = {};
int ret, rec_mode;
+ if (!iwl_mvm_firmware_running(mvm))
+ return -EIO;
+
if (!dest)
return -EOPNOTSUPP;
if (dest->monitor_mode != SMEM_MODE ||
- trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
+ trans->cfg->device_family < IWL_DEVICE_FAMILY_8000)
return -EOPNOTSUPP;
ret = kstrtoint(buf, 0, &rec_mode);
@@ -1034,6 +1075,9 @@ static ssize_t iwl_dbgfs_fw_dbg_conf_write(struct iwl_mvm *mvm,
unsigned int conf_id;
int ret;
+ if (!iwl_mvm_firmware_running(mvm))
+ return -EIO;
+
ret = kstrtouint(buf, 0, &conf_id);
if (ret)
return ret;
@@ -1052,8 +1096,12 @@ static ssize_t iwl_dbgfs_fw_dbg_collect_write(struct iwl_mvm *mvm,
char *buf, size_t count,
loff_t *ppos)
{
- int ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PRPH_WRITE);
+ int ret;
+
+ if (!iwl_mvm_firmware_running(mvm))
+ return -EIO;
+ ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PRPH_WRITE);
if (ret)
return ret;
if (count == 0)
@@ -1184,7 +1232,8 @@ static ssize_t iwl_dbgfs_bcast_filters_write(struct iwl_mvm *mvm, char *buf,
&filter, sizeof(filter));
/* send updated bcast filtering configuration */
- if (mvm->dbgfs_bcast_filtering.override &&
+ if (iwl_mvm_firmware_running(mvm) &&
+ mvm->dbgfs_bcast_filtering.override &&
iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
err = iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0,
sizeof(cmd), &cmd);
@@ -1256,7 +1305,8 @@ static ssize_t iwl_dbgfs_bcast_filters_macs_write(struct iwl_mvm *mvm,
&mac, sizeof(mac));
/* send updated bcast filtering configuration */
- if (mvm->dbgfs_bcast_filtering.override &&
+ if (iwl_mvm_firmware_running(mvm) &&
+ mvm->dbgfs_bcast_filtering.override &&
iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
err = iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0,
sizeof(cmd), &cmd);
@@ -1473,6 +1523,9 @@ iwl_dbgfs_send_echo_cmd_write(struct iwl_mvm *mvm, char *buf,
{
int ret;
+ if (!iwl_mvm_firmware_running(mvm))
+ return -EIO;
+
mutex_lock(&mvm->mutex);
ret = iwl_mvm_send_cmd_pdu(mvm, ECHO_CMD, 0, 0, NULL);
mutex_unlock(&mvm->mutex);
@@ -1534,6 +1587,9 @@ static ssize_t iwl_dbgfs_mem_read(struct file *file, char __user *user_buf,
size_t delta;
ssize_t ret, len;
+ if (!iwl_mvm_firmware_running(mvm))
+ return -EIO;
+
hcmd.id = iwl_cmd_id(*ppos >> 24 ? UMAC_RD_WR : LMAC_RD_WR,
DEBUG_GROUP, 0);
cmd.op = cpu_to_le32(DEBUG_MEM_OP_READ);
@@ -1586,6 +1642,9 @@ static ssize_t iwl_dbgfs_mem_write(struct file *file,
u32 op, len;
ssize_t ret;
+ if (!iwl_mvm_firmware_running(mvm))
+ return -EIO;
+
hcmd.id = iwl_cmd_id(*ppos >> 24 ? UMAC_RD_WR : LMAC_RD_WR,
DEBUG_GROUP, 0);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-coex.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-coex.h
index 204c1b13988b..c432fdb98630 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-coex.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-coex.h
@@ -99,8 +99,8 @@ enum iwl_bt_coex_enabled_modules {
/**
* struct iwl_bt_coex_cmd - bt coex configuration command
- * @mode: enum %iwl_bt_coex_mode
- * @enabled_modules: enum %iwl_bt_coex_enabled_modules
+ * @mode: &enum iwl_bt_coex_mode
+ * @enabled_modules: &enum iwl_bt_coex_enabled_modules
*
* The structure is used for the BT_COEX command.
*/
@@ -234,9 +234,9 @@ enum iwl_bt_ci_compliance {
* @mbox_msg: message from BT to WiFi
* @msg_idx: the index of the message
* @bt_ci_compliance: enum %iwl_bt_ci_compliance
- * @primary_ch_lut: LUT used for primary channel enum %iwl_bt_coex_lut_type
- * @secondary_ch_lut: LUT used for secondary channel enume %iwl_bt_coex_lut_type
- * @bt_activity_grading: the activity of BT enum %iwl_bt_activity_grading
+ * @primary_ch_lut: LUT used for primary channel &enum iwl_bt_coex_lut_type
+ * @secondary_ch_lut: LUT used for secondary channel &enum iwl_bt_coex_lut_type
+ * @bt_activity_grading: the activity of BT &enum iwl_bt_activity_grading
* @ttc_rrc_status: is TTC or RRC enabled - one bit per PHY
*/
struct iwl_bt_coex_profile_notif {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-d3.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-d3.h
index 5f22cc7ac26a..edde49202786 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-d3.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-d3.h
@@ -7,7 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
- * Copyright(c) 2015 Intel Deutschland GmbH
+ * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -34,6 +34,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -254,6 +255,17 @@ enum iwl_wowlan_flags {
ENABLE_STORE_BEACON = BIT(4),
};
+/**
+ * struct iwl_wowlan_config_cmd - WoWLAN configuration
+ * @wakeup_filter: filter from &enum iwl_wowlan_wakeup_filters
+ * @non_qos_seq: non-QoS sequence counter to use next
+ * @qos_seq: QoS sequence counters to use next
+ * @wowlan_ba_teardown_tids: bitmap of BA sessions to tear down
+ * @is_11n_connection: indicates HT connection
+ * @offloading_tid: TID reserved for firmware use
+ * @flags: extra flags, see &enum iwl_wowlan_flags
+ * @reserved: reserved
+ */
struct iwl_wowlan_config_cmd {
__le32 wakeup_filter;
__le16 non_qos_seq;
@@ -370,6 +382,21 @@ struct iwl_wowlan_gtk_status {
struct iwl_wowlan_rsc_tsc_params_cmd rsc;
} __packed; /* WOWLAN_GTK_MATERIAL_VER_1 */
+/**
+ * struct iwl_wowlan_status - WoWLAN status
+ * @gtk: GTK data
+ * @replay_ctr: GTK rekey replay counter
+ * @pattern_number: number of the matched pattern
+ * @non_qos_seq_ctr: non-QoS sequence counter to use next
+ * @qos_seq_ctr: QoS sequence counters to use next
+ * @wakeup_reasons: wakeup reasons, see &enum iwl_wowlan_wakeup_reason
+ * @num_of_gtk_rekeys: number of GTK rekeys
+ * @transmitted_ndps: number of transmitted neighbor discovery packets
+ * @received_beacons: number of received beacons
+ * @wake_packet_length: wakeup packet length
+ * @wake_packet_bufsize: wakeup packet buffer size
+ * @wake_packet: wakeup packet
+ */
struct iwl_wowlan_status {
struct iwl_wowlan_gtk_status gtk;
__le64 replay_ctr;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-mac.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-mac.h
index 970b030ed28d..1d970bf0d735 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-mac.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-mac.h
@@ -199,6 +199,7 @@ struct iwl_mac_data_ibss {
* @dtim_reciprocal: 2^32 / dtim_interval , applicable only when associated
* @listen_interval: in beacon intervals, applicable only when associated
* @assoc_id: unique ID assigned by the AP during association
+ * @assoc_beacon_arrive_time: TSF of first beacon after association
*/
struct iwl_mac_data_sta {
__le32 is_assoc;
@@ -329,17 +330,17 @@ struct iwl_ac_qos {
* ( MAC_CONTEXT_CMD = 0x28 )
* @id_and_color: ID and color of the MAC
* @action: action to perform, one of FW_CTXT_ACTION_*
- * @mac_type: one of FW_MAC_TYPE_*
- * @tsd_id: TSF HW timer, one of TSF_ID_*
+ * @mac_type: one of &enum iwl_mac_types
+ * @tsd_id: TSF HW timer, one of &enum iwl_tsf_id
* @node_addr: MAC address
* @bssid_addr: BSSID
* @cck_rates: basic rates available for CCK
* @ofdm_rates: basic rates available for OFDM
- * @protection_flags: combination of MAC_PROT_FLG_FLAG_*
+ * @protection_flags: combination of &enum iwl_mac_protection_flags
* @cck_short_preamble: 0x20 for enabling short preamble, 0 otherwise
* @short_slot: 0x10 for enabling short slots, 0 otherwise
- * @filter_flags: combination of MAC_FILTER_*
- * @qos_flags: from MAC_QOS_FLG_*
+ * @filter_flags: combination of &enum iwl_mac_filter_flags
+ * @qos_flags: from &enum iwl_mac_qos_flags
* @ac: one iwl_mac_qos configuration for each AC
* @mac_specific: one of struct iwl_mac_data_*, according to mac_type
*/
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-power.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-power.h
index 750510aff70b..9d87fddd29b6 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-power.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-power.h
@@ -82,6 +82,8 @@
* @LTR_CFG_FLAG_SW_SET_SHORT: fixed static short LTR register
* @LTR_CFG_FLAG_SW_SET_LONG: fixed static short LONG register
* @LTR_CFG_FLAG_DENIE_C10_ON_PD: allow going into C10 on PD
+ * @LTR_CFG_FLAG_UPDATE_VALUES: update config values and short
+ * idle timeout
*/
enum iwl_ltr_config_flags {
LTR_CFG_FLAG_FEATURE_ENABLE = BIT(0),
@@ -91,11 +93,14 @@ enum iwl_ltr_config_flags {
LTR_CFG_FLAG_SW_SET_SHORT = BIT(4),
LTR_CFG_FLAG_SW_SET_LONG = BIT(5),
LTR_CFG_FLAG_DENIE_C10_ON_PD = BIT(6),
+ LTR_CFG_FLAG_UPDATE_VALUES = BIT(7),
};
/**
* struct iwl_ltr_config_cmd_v1 - configures the LTR
- * @flags: See %enum iwl_ltr_config_flags
+ * @flags: See &enum iwl_ltr_config_flags
+ * @static_long: static LTR Long register value.
+ * @static_short: static LTR Short register value.
*/
struct iwl_ltr_config_cmd_v1 {
__le32 flags;
@@ -107,11 +112,14 @@ struct iwl_ltr_config_cmd_v1 {
/**
* struct iwl_ltr_config_cmd - configures the LTR
- * @flags: See %enum iwl_ltr_config_flags
- * @static_long:
- * @static_short:
- * @ltr_cfg_values:
- * @ltr_short_idle_timeout:
+ * @flags: See &enum iwl_ltr_config_flags
+ * @static_long: static LTR Long register value.
+ * @static_short: static LTR Short register value.
+ * @ltr_cfg_values: LTR parameters table values (in usec) in folowing order:
+ * TX, RX, Short Idle, Long Idle. Used only if %LTR_CFG_FLAG_UPDATE_VALUES
+ * is set.
+ * @ltr_short_idle_timeout: LTR Short Idle timeout (in usec). Used only if
+ * %LTR_CFG_FLAG_UPDATE_VALUES is set.
*/
struct iwl_ltr_config_cmd {
__le32 flags;
@@ -140,7 +148,7 @@ struct iwl_ltr_config_cmd {
* PBW Snoozing enabled
* @POWER_FLAGS_ADVANCE_PM_ENA_MSK: Advanced PM (uAPSD) enable mask
* @POWER_FLAGS_LPRX_ENA_MSK: Low Power RX enable.
- * @POWER_FLAGS_AP_UAPSD_MISBEHAVING_ENA_MSK: AP/GO's uAPSD misbehaving
+ * @POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK: AP/GO's uAPSD misbehaving
* detection enablement
*/
enum iwl_power_flags {
@@ -166,6 +174,7 @@ enum iwl_power_flags {
* Minimum allowed:- 3 * DTIM. Keep alive period must be
* set regardless of power scheme or current power state.
* FW use this value also when PM is disabled.
+ * @debug_flags: debug flags
* @rx_data_timeout: Minimum time (usec) from last Rx packet for AM to
* PSM transition - legacy PM
* @tx_data_timeout: Minimum time (usec) from last Tx packet for AM to
@@ -191,7 +200,8 @@ struct iwl_powertable_cmd {
/**
* enum iwl_device_power_flags - masks for device power command flags
- * @DEVIC_POWER_FLAGS_POWER_SAVE_ENA_MSK: '1' Allow to save power by turning off
+ * @DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK:
+ * '1' Allow to save power by turning off
* receiver and transmitter. '0' - does not allow.
*/
enum iwl_device_power_flags {
@@ -202,7 +212,8 @@ enum iwl_device_power_flags {
* struct iwl_device_power_cmd - device wide power command.
* DEVICE_POWER_CMD = 0x77 (command, has simple generic response)
*
- * @flags: Power table command flags from DEVICE_POWER_FLAGS_*
+ * @flags: Power table command flags from &enum iwl_device_power_flags
+ * @reserved: reserved (padding)
*/
struct iwl_device_power_cmd {
/* PM_POWER_TABLE_CMD_API_S_VER_6 */
@@ -213,7 +224,7 @@ struct iwl_device_power_cmd {
/**
* struct iwl_mac_power_cmd - New power command containing uAPSD support
* MAC_PM_POWER_TABLE = 0xA9 (command, has simple generic response)
- * @id_and_color: MAC contex identifier
+ * @id_and_color: MAC contex identifier, &enum iwl_mvm_id_and_color
* @flags: Power table command flags from POWER_FLAGS_*
* @keep_alive_seconds: Keep alive period in seconds. Default - 25 sec.
* Minimum allowed:- 3 * DTIM. Keep alive period must be
@@ -223,7 +234,6 @@ struct iwl_device_power_cmd {
* PSM transition - legacy PM
* @tx_data_timeout: Minimum time (usec) from last Tx packet for AM to
* PSM transition - legacy PM
- * @sleep_interval: not in use
* @skip_dtim_periods: Number of DTIM periods to skip if Skip over DTIM flag
* is set. For example, if it is required to skip over
* one DTIM, this value need to be set to 2 (DTIM periods).
@@ -233,7 +243,6 @@ struct iwl_device_power_cmd {
* PSM transition - uAPSD
* @lprx_rssi_threshold: Signal strength up to which LP RX can be enabled.
* Default: 80dbm
- * @num_skip_dtim: Number of DTIMs to skip if Skip over DTIM flag is set
* @snooze_interval: Maximum time between attempts to retrieve buffered data
* from the AP [msec]
* @snooze_window: A window of time in which PBW snoozing insures that all
@@ -251,8 +260,9 @@ struct iwl_device_power_cmd {
* @heavy_rx_thld_packets: RX threshold measured in number of packets
* @heavy_tx_thld_percentage: TX threshold measured in load's percentage
* @heavy_rx_thld_percentage: RX threshold measured in load's percentage
- * @limited_ps_threshold:
-*/
+ * @limited_ps_threshold: (unused)
+ * @reserved: reserved (padding)
+ */
struct iwl_mac_power_cmd {
/* CONTEXT_DESC_API_T_VER_1 */
__le32 id_and_color;
@@ -343,6 +353,7 @@ struct iwl_dev_tx_power_cmd_v3 {
* @v3: version 3 of the command, embedded here for easier software handling
* @enable_ack_reduction: enable or disable close range ack TX power
* reduction.
+ * @reserved: reserved (padding)
*/
struct iwl_dev_tx_power_cmd {
/* v4 is just an extension of v3 - keep this here */
@@ -393,7 +404,6 @@ struct iwl_geo_tx_power_profiles_cmd {
/**
* struct iwl_beacon_filter_cmd
* REPLY_BEACON_FILTERING_CMD = 0xd2 (command)
- * @id_and_color: MAC contex identifier
* @bf_energy_delta: Used for RSSI filtering, if in 'normal' state. Send beacon
* to driver if delta in Energy values calculated for this and last
* passed beacon is greater than this threshold. Zero value means that
@@ -411,7 +421,7 @@ struct iwl_geo_tx_power_profiles_cmd {
* Threshold. Typical energy threshold is -72dBm.
* @bf_temp_threshold: This threshold determines the type of temperature
* filtering (Slow or Fast) that is selected (Units are in Celsuis):
- * If the current temperature is above this threshold - Fast filter
+ * If the current temperature is above this threshold - Fast filter
* will be used, If the current temperature is below this threshold -
* Slow filter will be used.
* @bf_temp_fast_filter: Send Beacon to driver if delta in temperature values
@@ -425,7 +435,8 @@ struct iwl_geo_tx_power_profiles_cmd {
* beacon filtering; beacons will not be forced to be sent to driver
* regardless of whether its temerature has been changed.
* @bf_enable_beacon_filter: 1, beacon filtering is enabled; 0, disabled.
- * @bf_filter_escape_timer: Send beacons to to driver if no beacons were passed
+ * @bf_debug_flag: beacon filtering debug configuration
+ * @bf_escape_timer: Send beacons to to driver if no beacons were passed
* for a specific period of time. Units: Beacons.
* @ba_escape_timer: Fully receive and parse beacon if no beacons were passed
* for a longer period of time then this escape-timeout. Units: Beacons.
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rs.h
index a10c6aae9ab9..1e34e41f52bc 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rs.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rs.h
@@ -392,7 +392,7 @@ enum {
struct iwl_lq_cmd {
u8 sta_id;
u8 reduced_tpc;
- u16 control;
+ __le16 control;
/* LINK_QUAL_GENERAL_PARAMS_API_S_VER_1 */
u8 flags;
u8 mimo_delim;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h
index b530fa47d68a..ad7ab6dd86cb 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h
@@ -7,7 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
+ * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -34,7 +34,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
+ * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -147,7 +147,8 @@ enum iwl_csum_rx_assist_info {
/**
* struct iwl_rx_mpdu_res_start - phy info
- * @assist: see CSUM_RX_ASSIST_ above
+ * @byte_count: byte count of the frame
+ * @assist: see &enum iwl_csum_rx_assist_info
*/
struct iwl_rx_mpdu_res_start {
__le16 byte_count;
@@ -348,35 +349,106 @@ enum iwl_rx_mpdu_mac_info {
IWL_RX_MPDU_PHY_PHY_INDEX_MASK = 0xf0,
};
+/**
+ * struct iwl_rx_mpdu_desc - RX MPDU descriptor
+ */
struct iwl_rx_mpdu_desc {
/* DW2 */
+ /**
+ * @mpdu_len: MPDU length
+ */
__le16 mpdu_len;
+ /**
+ * @mac_flags1: &enum iwl_rx_mpdu_mac_flags1
+ */
u8 mac_flags1;
+ /**
+ * @mac_flags2: &enum iwl_rx_mpdu_mac_flags2
+ */
u8 mac_flags2;
/* DW3 */
+ /**
+ * @amsdu_info: &enum iwl_rx_mpdu_amsdu_info
+ */
u8 amsdu_info;
+ /**
+ * @phy_info: &enum iwl_rx_mpdu_phy_info
+ */
__le16 phy_info;
+ /**
+ * @mac_phy_idx: MAC/PHY index
+ */
u8 mac_phy_idx;
/* DW4 - carries csum data only when rpa_en == 1 */
- __le16 raw_csum; /* alledgedly unreliable */
+ /**
+ * @raw_csum: raw checksum (alledgedly unreliable)
+ */
+ __le16 raw_csum;
+ /**
+ * @l3l4_flags: &enum iwl_rx_l3l4_flags
+ */
__le16 l3l4_flags;
/* DW5 */
+ /**
+ * @status: &enum iwl_rx_mpdu_status
+ */
__le16 status;
+ /**
+ * @hash_filter: hash filter value
+ */
u8 hash_filter;
+ /**
+ * @sta_id_flags: &enum iwl_rx_mpdu_sta_id_flags
+ */
u8 sta_id_flags;
/* DW6 */
+ /**
+ * @reorder_data: &enum iwl_rx_mpdu_reorder_data
+ */
__le32 reorder_data;
/* DW7 - carries rss_hash only when rpa_en == 1 */
+ /**
+ * @rss_hash: RSS hash value
+ */
__le32 rss_hash;
/* DW8 - carries filter_match only when rpa_en == 1 */
+ /**
+ * @filter_match: filter match value
+ */
__le32 filter_match;
/* DW9 */
+ /**
+ * @rate_n_flags: RX rate/flags encoding
+ */
__le32 rate_n_flags;
/* DW10 */
- u8 energy_a, energy_b, channel, mac_context;
+ /**
+ * @energy_a: energy chain A
+ */
+ u8 energy_a;
+ /**
+ * @energy_b: energy chain B
+ */
+ u8 energy_b;
+ /**
+ * @channel: channel number
+ */
+ u8 channel;
+ /**
+ * @mac_context: MAC context mask
+ */
+ u8 mac_context;
/* DW11 */
+ /**
+ * @gp2_on_air_rise: GP2 timer value on air rise (INA)
+ */
__le32 gp2_on_air_rise;
- /* DW12 & DW13 - carries TSF only TSF_OVERLOAD bit == 0 */
+ /* DW12 & DW13 */
+ /**
+ * @tsf_on_air_rise:
+ * TSF value on air rise (INA), only valid if
+ * %IWL_RX_MPDU_PHY_TSF_OVERLOAD isn't set
+ */
__le64 tsf_on_air_rise;
} __packed;
@@ -447,7 +519,7 @@ struct iwl_rxq_sync_notification {
} __packed; /* MULTI_QUEUE_DRV_SYNC_HDR_CMD_API_S_VER_1 */
/**
- * Internal message identifier
+ * enum iwl_mvm_rxq_notif_type - Internal message identifier
*
* @IWL_MVM_RXQ_EMPTY: empty sync notification
* @IWL_MVM_RXQ_NOTIF_DEL_BA: notify RSS queues of delBA
@@ -491,7 +563,7 @@ enum iwl_mvm_pm_event {
/**
* struct iwl_mvm_pm_state_notification - station PM state notification
* @sta_id: station ID of the station changing state
- * @type: the new powersave state, see IWL_MVM_PM_EVENT_ above
+ * @type: the new powersave state, see &enum iwl_mvm_pm_event
*/
struct iwl_mvm_pm_state_notification {
u8 sta_id;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-scan.h
index 3178eb96e395..f8e92026bdc6 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-scan.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-scan.h
@@ -7,7 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -34,6 +34,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -67,8 +68,6 @@
#ifndef __fw_api_scan_h__
#define __fw_api_scan_h__
-#include "fw-api.h"
-
/* Scan Commands, Responses, Notifications */
/* Max number of IEs for direct SSID scans in a command */
@@ -111,7 +110,7 @@ enum scan_framework_client {
};
/**
- * iwl_scan_offload_blacklist - SCAN_OFFLOAD_BLACKLIST_S
+ * struct iwl_scan_offload_blacklist - SCAN_OFFLOAD_BLACKLIST_S
* @ssid: MAC address to filter out
* @reported_rssi: AP rssi reported to the host
* @client_bitmap: clients ignore this entry - enum scan_framework_client
@@ -135,7 +134,7 @@ enum iwl_scan_offload_band_selection {
};
/**
- * iwl_scan_offload_profile - SCAN_OFFLOAD_PROFILE_S
+ * struct iwl_scan_offload_profile - SCAN_OFFLOAD_PROFILE_S
* @ssid_index: index to ssid list in fixed part
* @unicast_cipher: encryption algorithm to match - bitmap
* @aut_alg: authentication algorithm to match - bitmap
@@ -154,8 +153,7 @@ struct iwl_scan_offload_profile {
} __packed;
/**
- * iwl_scan_offload_profile_cfg - SCAN_OFFLOAD_PROFILES_CFG_API_S_VER_1
- * @blaclist: AP list to filter off from scan results
+ * struct iwl_scan_offload_profile_cfg - SCAN_OFFLOAD_PROFILES_CFG_API_S_VER_1
* @profiles: profiles to search for match
* @blacklist_len: length of blacklist
* @num_profiles: num of profiles in the list
@@ -176,7 +174,7 @@ struct iwl_scan_offload_profile_cfg {
} __packed;
/**
- * iwl_scan_schedule_lmac - schedule of scan offload
+ * struct iwl_scan_schedule_lmac - schedule of scan offload
* @delay: delay between iterations, in seconds.
* @iterations: num of scan iterations
* @full_scan_mul: number of partial scans before each full scan
@@ -200,7 +198,7 @@ enum iwl_scan_ebs_status {
};
/**
- * iwl_scan_req_tx_cmd - SCAN_REQ_TX_CMD_API_S
+ * struct iwl_scan_req_tx_cmd - SCAN_REQ_TX_CMD_API_S
* @tx_flags: combination of TX_CMD_FLG_*
* @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is
* cleared. Combination of RATE_MCS_*
@@ -220,7 +218,7 @@ enum iwl_scan_channel_flags_lmac {
};
/**
- * iwl_scan_channel_cfg_lmac - SCAN_CHANNEL_CFG_S_VER2
+ * struct iwl_scan_channel_cfg_lmac - SCAN_CHANNEL_CFG_S_VER2
* @flags: bits 1-20: directed scan to i'th ssid
* other bits &enum iwl_scan_channel_flags_lmac
* @channel_number: channel number 1-13 etc
@@ -235,7 +233,7 @@ struct iwl_scan_channel_cfg_lmac {
} __packed;
/*
- * iwl_scan_probe_segment - PROBE_SEGMENT_API_S_VER_1
+ * struct iwl_scan_probe_segment - PROBE_SEGMENT_API_S_VER_1
* @offset: offset in the data block
* @len: length of the segment
*/
@@ -263,7 +261,7 @@ enum iwl_scan_channel_flags {
IWL_SCAN_CHANNEL_FLAG_CACHE_ADD = BIT(2),
};
-/* iwl_scan_channel_opt - CHANNEL_OPTIMIZATION_API_S
+/* struct iwl_scan_channel_opt - CHANNEL_OPTIMIZATION_API_S
* @flags: enum iwl_scan_channel_flags
* @non_ebs_ratio: defines the ratio of number of scan iterations where EBS is
* involved.
@@ -276,7 +274,7 @@ struct iwl_scan_channel_opt {
} __packed;
/**
- * iwl_mvm_lmac_scan_flags
+ * enum iwl_mvm_lmac_scan_flags - LMAC scan flags
* @IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL: pass all beacons and probe responses
* without filtering.
* @IWL_MVM_LMAC_SCAN_FLAG_PASSIVE: force passive scan on all channels
@@ -320,7 +318,7 @@ enum iwl_scan_priority_ext {
};
/**
- * iwl_scan_req_lmac - SCAN_REQUEST_CMD_API_S_VER_1
+ * struct iwl_scan_req_lmac - SCAN_REQUEST_CMD_API_S_VER_1
* @reserved1: for alignment and future use
* @channel_num: num of channels to scan
* @active-dwell: dwell time for active channels
@@ -410,7 +408,7 @@ struct iwl_lmac_scan_complete_notif {
} __packed;
/**
- * iwl_scan_offload_complete - PERIODIC_SCAN_COMPLETE_NTF_API_S_VER_2
+ * struct iwl_scan_offload_complete - PERIODIC_SCAN_COMPLETE_NTF_API_S_VER_2
* @last_schedule_line: last schedule line executed (fast or regular)
* @last_schedule_iteration: last scan iteration executed before scan abort
* @status: enum iwl_scan_offload_complete_status
@@ -547,12 +545,12 @@ struct iwl_scan_config {
} __packed; /* SCAN_CONFIG_DB_CMD_API_S_3 */
/**
- * iwl_umac_scan_flags
- *@IWL_UMAC_SCAN_FLAG_PREEMPTIVE: scan process triggered by this scan request
+ * enum iwl_umac_scan_flags - UMAC scan flags
+ * @IWL_UMAC_SCAN_FLAG_PREEMPTIVE: scan process triggered by this scan request
* can be preempted by other scan requests with higher priority.
* The low priority scan will be resumed when the higher proirity scan is
* completed.
- *@IWL_UMAC_SCAN_FLAG_START_NOTIF: notification will be sent to the driver
+ * @IWL_UMAC_SCAN_FLAG_START_NOTIF: notification will be sent to the driver
* when scan starts.
*/
enum iwl_umac_scan_flags {
@@ -771,7 +769,7 @@ struct iwl_scan_offload_profiles_query {
* @last_channel: last channel that was scanned
* @start_tsf: TSF timer in usecs of the scan start time for the mac specified
* in &struct iwl_scan_req_umac.
- * @results: array of scan results, only "scanned_channels" of them are valid
+ * @results: array of scan results, length in @scanned_channels
*/
struct iwl_umac_scan_iter_complete_notif {
__le32 uid;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h
index 421b9dd1fb66..c14ebd7ff77d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h
@@ -197,7 +197,15 @@ enum iwl_sta_modify_flag {
STA_MODIFY_QUEUES = BIT(7),
};
-#define STA_MODE_MODIFY 1
+/**
+ * enum iwl_sta_mode - station command mode
+ * @STA_MODE_ADD: add new station
+ * @STA_MODE_MODIFY: modify the station
+ */
+enum iwl_sta_mode {
+ STA_MODE_ADD = 0,
+ STA_MODE_MODIFY = 1,
+};
/**
* enum iwl_sta_sleep_flag - type of sleep of the station
@@ -223,7 +231,7 @@ enum iwl_sta_sleep_flag {
/**
* struct iwl_mvm_keyinfo - key information
- * @key_flags: type %iwl_sta_key_flag
+ * @key_flags: type &enum iwl_sta_key_flag
* @tkip_rx_tsc_byte2: TSC[2] for key mix ph1 detection
* @tkip_rx_ttak: 10-byte unicast TKIP TTAK for Rx
* @key_offset: key offset in the fw's key table
@@ -253,17 +261,19 @@ struct iwl_mvm_keyinfo {
/**
* struct iwl_mvm_add_sta_cmd_v7 - Add/modify a station in the fw's sta table.
* ( REPLY_ADD_STA = 0x18 )
- * @add_modify: 1: modify existing, 0: add new station
+ * @add_modify: see &enum iwl_sta_mode
* @awake_acs:
* @tid_disable_tx: is tid BIT(tid) enabled for Tx. Clear BIT(x) to enable
* AMPDU for tid x. Set %STA_MODIFY_TID_DISABLE_TX to change this field.
- * @mac_id_n_color: the Mac context this station belongs to
- * @addr[ETH_ALEN]: station's MAC address
+ * @mac_id_n_color: the Mac context this station belongs to,
+ * see &enum iwl_mvm_id_and_color
+ * @addr: station's MAC address
* @sta_id: index of station in uCode's station table
* @modify_mask: STA_MODIFY_*, selects which parameters to modify vs. leave
* alone. 1 - modify, 0 - don't change.
- * @station_flags: look at %iwl_sta_flags
- * @station_flags_msk: what of %station_flags have changed
+ * @station_flags: look at &enum iwl_sta_flags
+ * @station_flags_msk: what of %station_flags have changed,
+ * also &enum iwl_sta_flags
* @add_immediate_ba_tid: tid for which to add block-ack support (Rx)
* Set %STA_MODIFY_ADD_BA_TID to use this field, and also set
* add_immediate_ba_ssn.
@@ -274,7 +284,7 @@ struct iwl_mvm_keyinfo {
* @sleep_tx_count: number of packets to transmit to station even though it is
* asleep. Used to synchronise PS-poll and u-APSD responses while ucode
* keeps track of STA sleep state.
- * @sleep_state_flags: Look at %iwl_sta_sleep_flag.
+ * @sleep_state_flags: Look at &enum iwl_sta_sleep_flag.
* @assoc_id: assoc_id to be sent in VHT PLCP (9-bit), for grp use 0, for AP
* mac-addr.
* @beamform_flags: beam forming controls
@@ -330,17 +340,19 @@ enum iwl_sta_type {
/**
* struct iwl_mvm_add_sta_cmd - Add/modify a station in the fw's sta table.
* ( REPLY_ADD_STA = 0x18 )
- * @add_modify: 1: modify existing, 0: add new station
+ * @add_modify: see &enum iwl_sta_mode
* @awake_acs:
* @tid_disable_tx: is tid BIT(tid) enabled for Tx. Clear BIT(x) to enable
* AMPDU for tid x. Set %STA_MODIFY_TID_DISABLE_TX to change this field.
- * @mac_id_n_color: the Mac context this station belongs to
- * @addr[ETH_ALEN]: station's MAC address
+ * @mac_id_n_color: the Mac context this station belongs to,
+ * see &enum iwl_mvm_id_and_color
+ * @addr: station's MAC address
* @sta_id: index of station in uCode's station table
* @modify_mask: STA_MODIFY_*, selects which parameters to modify vs. leave
* alone. 1 - modify, 0 - don't change.
- * @station_flags: look at %iwl_sta_flags
- * @station_flags_msk: what of %station_flags have changed
+ * @station_flags: look at &enum iwl_sta_flags
+ * @station_flags_msk: what of %station_flags have changed,
+ * also &enum iwl_sta_flags
* @add_immediate_ba_tid: tid for which to add block-ack support (Rx)
* Set %STA_MODIFY_ADD_BA_TID to use this field, and also set
* add_immediate_ba_ssn.
@@ -352,7 +364,7 @@ enum iwl_sta_type {
* asleep. Used to synchronise PS-poll and u-APSD responses while ucode
* keeps track of STA sleep state.
* @station_type: type of this station. See &enum iwl_sta_type.
- * @sleep_state_flags: Look at %iwl_sta_sleep_flag.
+ * @sleep_state_flags: Look at &enum iwl_sta_sleep_flag.
* @assoc_id: assoc_id to be sent in VHT PLCP (9-bit), for grp use 0, for AP
* mac-addr.
* @beamform_flags: beam forming controls
@@ -401,7 +413,7 @@ struct iwl_mvm_add_sta_cmd {
* ( REPLY_ADD_STA_KEY = 0x17 )
* @sta_id: index of station in uCode's station table
* @key_offset: key offset in key storage
- * @key_flags: type %iwl_sta_key_flag
+ * @key_flags: type &enum iwl_sta_key_flag
* @key: key material data
* @rx_secur_seq_cnt: RX security sequence counter for the key
*/
@@ -468,7 +480,7 @@ struct iwl_mvm_rm_sta_cmd {
/**
* struct iwl_mvm_mgmt_mcast_key_cmd_v1
* ( MGMT_MCAST_KEY = 0x1f )
- * @ctrl_flags: %iwl_sta_key_flag
+ * @ctrl_flags: &enum iwl_sta_key_flag
* @igtk:
* @k1: unused
* @k2: unused
@@ -489,7 +501,7 @@ struct iwl_mvm_mgmt_mcast_key_cmd_v1 {
/**
* struct iwl_mvm_mgmt_mcast_key_cmd
* ( MGMT_MCAST_KEY = 0x1f )
- * @ctrl_flags: %iwl_sta_key_flag
+ * @ctrl_flags: &enum iwl_sta_key_flag
* @igtk: IGTK master key
* @sta_id: station ID that support IGTK
* @key_id:
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-stats.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-stats.h
index 6371c342b96d..4286222f54f7 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-stats.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-stats.h
@@ -7,7 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -72,7 +72,7 @@ struct mvm_statistics_dbg {
__le32 burst_check;
__le32 burst_count;
__le32 wait_for_silence_timeout_cnt;
- __le32 reserved[3];
+ u8 reserved[12];
} __packed; /* STATISTICS_DEBUG_API_S_VER_2 */
struct mvm_statistics_div {
@@ -323,9 +323,30 @@ struct iwl_notif_statistics_cdb {
struct mvm_statistics_load_cdb load_stats;
} __packed; /* STATISTICS_NTFY_API_S_VER_12 */
-#define IWL_STATISTICS_FLG_CLEAR 0x1
-#define IWL_STATISTICS_FLG_DISABLE_NOTIF 0x2
+/**
+ * enum iwl_statistics_notif_flags - flags used in statistics notification
+ * @IWL_STATISTICS_REPLY_FLG_CLEAR: statistics were cleared after this report
+ */
+enum iwl_statistics_notif_flags {
+ IWL_STATISTICS_REPLY_FLG_CLEAR = 0x1,
+};
+/**
+ * enum iwl_statistics_cmd_flags - flags used in statistics command
+ * @IWL_STATISTICS_FLG_CLEAR: request to clear statistics after the report
+ * that's sent after this command
+ * @IWL_STATISTICS_FLG_DISABLE_NOTIF: disable unilateral statistics
+ * notifications
+ */
+enum iwl_statistics_cmd_flags {
+ IWL_STATISTICS_FLG_CLEAR = 0x1,
+ IWL_STATISTICS_FLG_DISABLE_NOTIF = 0x2,
+};
+
+/**
+ * struct iwl_statistics_cmd - statistics config command
+ * @flags: flags from &enum iwl_statistics_cmd_flags
+ */
struct iwl_statistics_cmd {
__le32 flags;
} __packed; /* STATISTICS_CMD_API_S_VER_1 */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tof.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tof.h
index 86aa51b2210e..e2acf39f784d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tof.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tof.h
@@ -63,8 +63,6 @@
#ifndef __fw_api_tof_h__
#define __fw_api_tof_h__
-#include "fw-api.h"
-
/* ToF sub-group command IDs */
enum iwl_mvm_tof_sub_grp_ids {
TOF_RANGE_REQ_CMD = 0x1,
@@ -118,11 +116,17 @@ struct iwl_tof_config_cmd {
* @bandwidth: current AP Bandwidth: 0 20MHz, 1 40MHz, 2 80MHz
* @rate: current AP rate
* @ctrl_ch_position: coding of the control channel position relative to
- * the center frequency.
- * 40MHz 0 below center, 1 above center
- * 80MHz bits [0..1]: 0 the near 20MHz to the center,
- * 1 the far 20MHz to the center
- * bit[2] as above 40MHz
+ * the center frequency:
+ *
+ * 40 MHz
+ * 0 below center, 1 above center
+ *
+ * 80 MHz
+ * bits [0..1]
+ * * 0 the near 20MHz to the center,
+ * * 1 the far 20MHz to the center
+ * bit[2]
+ * as above 40MHz
* @ftm_per_burst: FTMs per Burst
* @ftm_resp_ts_avail: '0' - we don't measure over the Initial FTM Response,
* '1' - we measure over the Initial FTM Response
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h
index 1360ebfdc51b..a37c584b0b48 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h
@@ -74,6 +74,7 @@
* Otherwise, use rate_n_flags from the TX command
* @TX_CMD_FLG_BAR: this frame is a BA request, immediate BAR is expected
* Must set TX_CMD_FLG_ACK with this flag.
+ * @TX_CMD_FLG_TXOP_PROT: TXOP protection requested
* @TX_CMD_FLG_VHT_NDPA: mark frame is NDPA for VHT beamformer sequence
* @TX_CMD_FLG_HT_NDPA: mark frame is NDPA for HT beamformer sequence
* @TX_CMD_FLG_CSI_FDBK2HOST: mark to send feedback to host (only if good CRC)
@@ -177,29 +178,6 @@ enum iwl_tx_cmd_sec_ctrl {
TX_CMD_SEC_KEY_FROM_TABLE = 0x10,
};
-/* TODO: how does these values are OK with only 16 bit variable??? */
-/*
- * TX command next frame info
- *
- * bits 0:2 - security control (TX_CMD_SEC_*)
- * bit 3 - immediate ACK required
- * bit 4 - rate is taken from STA table
- * bit 5 - frame belongs to BA stream
- * bit 6 - immediate BA response expected
- * bit 7 - unused
- * bits 8:15 - Station ID
- * bits 16:31 - rate
- */
-#define TX_CMD_NEXT_FRAME_ACK_MSK (0x8)
-#define TX_CMD_NEXT_FRAME_STA_RATE_MSK (0x10)
-#define TX_CMD_NEXT_FRAME_BA_MSK (0x20)
-#define TX_CMD_NEXT_FRAME_IMM_BA_RSP_MSK (0x40)
-#define TX_CMD_NEXT_FRAME_FLAGS_MSK (0xf8)
-#define TX_CMD_NEXT_FRAME_STA_ID_MSK (0xff00)
-#define TX_CMD_NEXT_FRAME_STA_ID_POS (8)
-#define TX_CMD_NEXT_FRAME_RATE_MSK (0xffff0000)
-#define TX_CMD_NEXT_FRAME_RATE_POS (16)
-
/*
* TX command Frame life time in us - to be written in pm_frame_timeout
*/
@@ -224,7 +202,7 @@ enum iwl_tx_cmd_sec_ctrl {
/**
* enum iwl_tx_offload_assist_flags_pos - set %iwl_tx_cmd offload_assist values
- * @TX_CMD_OFFLD_IP_HDR_OFFSET: offset to start of IP header (in words)
+ * @TX_CMD_OFFLD_IP_HDR: offset to start of IP header (in words)
* from mac header end. For normal case it is 4 words for SNAP.
* note: tx_cmd, mac header and pad are not counted in the offset.
* This is used to help the offload in case there is tunneling such as
@@ -258,22 +236,27 @@ enum iwl_tx_offload_assist_flags_pos {
* @len: in bytes of the payload, see below for details
* @offload_assist: TX offload configuration
* @tx_flags: combination of TX_CMD_FLG_*
+ * @scratch: scratch buffer used by the device
* @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is
* cleared. Combination of RATE_MCS_*
* @sta_id: index of destination station in FW station table
* @sec_ctl: security control, TX_CMD_SEC_*
* @initial_rate_index: index into the the rate table for initial TX attempt.
* Applied if TX_CMD_FLG_STA_RATE_MSK is set, normally 0 for data frames.
+ * @reserved2: reserved
* @key: security key
- * @next_frame_flags: TX_CMD_SEC_* and TX_CMD_NEXT_FRAME_*
+ * @reserved3: reserved
* @life_time: frame life time (usecs??)
* @dram_lsb_ptr: Physical address of scratch area in the command (try_cnt +
* btkill_cnd + reserved), first 32 bits. "0" disables usage.
* @dram_msb_ptr: upper bits of the scratch physical address
* @rts_retry_limit: max attempts for RTS
* @data_retry_limit: max attempts to send the data packet
- * @tid_spec: TID/tspec
+ * @tid_tspec: TID/tspec
* @pm_frame_timeout: PM TX frame timeout
+ * @reserved4: reserved
+ * @payload: payload (same as @hdr)
+ * @hdr: 802.11 header (same as @payload)
*
* The byte count (both len and next_frame_len) includes MAC header
* (24/26/30/32 bytes)
@@ -327,10 +310,11 @@ struct iwl_dram_sec_info {
* ( TX_CMD = 0x1c )
* @len: in bytes of the payload, see below for details
* @offload_assist: TX offload configuration
- * @tx_flags: combination of &iwl_tx_cmd_flags
+ * @flags: combination of &enum iwl_tx_cmd_flags
* @dram_info: FW internal DRAM storage
* @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is
* cleared. Combination of RATE_MCS_*
+ * @hdr: 802.11 header
*/
struct iwl_tx_cmd_gen2 {
__le16 len;
@@ -504,7 +488,7 @@ enum iwl_tx_agg_status {
/**
* struct agg_tx_status - per packet TX aggregation status
- * @status: enum iwl_tx_agg_status
+ * @status: See &enum iwl_tx_agg_status
* @sequence: Sequence # for this frame's Tx cmd (not SSN!)
*/
struct agg_tx_status {
@@ -529,7 +513,7 @@ struct agg_tx_status {
#define IWL_MVM_TX_RES_GET_RA(_ra_tid) ((_ra_tid) >> 4)
/**
- * struct iwl_mvm_tx_resp - notifies that fw is TXing a packet
+ * struct iwl_mvm_tx_resp_v3 - notifies that fw is TXing a packet
* ( REPLY_TX = 0x1c )
* @frame_count: 1 no aggregation, >1 aggregation
* @bt_kill_count: num of times blocked by bluetooth (unused for agg)
@@ -545,6 +529,8 @@ struct agg_tx_status {
* @pa_integ_res_b: tx power info
* @pa_integ_res_c: tx power info
* @measurement_req_id: tx power info
+ * @reduced_tpc: transmit power reduction used
+ * @reserved: reserved
* @tfd_info: TFD information set by the FH
* @seq_ctl: sequence control from the Tx cmd
* @byte_cnt: byte count from the Tx cmd
@@ -554,7 +540,63 @@ struct agg_tx_status {
* @tx_queue: TX queue for this response
* @status: for non-agg: frame status TX_STATUS_*
* for agg: status of 1st frame, AGG_TX_STATE_*; other frame status fields
- * follow this one, up to frame_count.
+ * follow this one, up to frame_count. Length in @frame_count.
+ *
+ * After the array of statuses comes the SSN of the SCD. Look at
+ * %iwl_mvm_get_scd_ssn for more details.
+ */
+struct iwl_mvm_tx_resp_v3 {
+ u8 frame_count;
+ u8 bt_kill_count;
+ u8 failure_rts;
+ u8 failure_frame;
+ __le32 initial_rate;
+ __le16 wireless_media_time;
+
+ u8 pa_status;
+ u8 pa_integ_res_a[3];
+ u8 pa_integ_res_b[3];
+ u8 pa_integ_res_c[3];
+ __le16 measurement_req_id;
+ u8 reduced_tpc;
+ u8 reserved;
+
+ __le32 tfd_info;
+ __le16 seq_ctl;
+ __le16 byte_cnt;
+ u8 tlc_info;
+ u8 ra_tid;
+ __le16 frame_ctrl;
+ struct agg_tx_status status[];
+} __packed; /* TX_RSP_API_S_VER_3 */
+
+/**
+ * struct iwl_mvm_tx_resp - notifies that fw is TXing a packet
+ * ( REPLY_TX = 0x1c )
+ * @frame_count: 1 no aggregation, >1 aggregation
+ * @bt_kill_count: num of times blocked by bluetooth (unused for agg)
+ * @failure_rts: num of failures due to unsuccessful RTS
+ * @failure_frame: num failures due to no ACK (unused for agg)
+ * @initial_rate: for non-agg: rate of the successful Tx. For agg: rate of the
+ * Tx of all the batch. RATE_MCS_*
+ * @wireless_media_time: for non-agg: RTS + CTS + frame tx attempts time + ACK.
+ * for agg: RTS + CTS + aggregation tx time + block-ack time.
+ * in usec.
+ * @pa_status: tx power info
+ * @pa_integ_res_a: tx power info
+ * @pa_integ_res_b: tx power info
+ * @pa_integ_res_c: tx power info
+ * @measurement_req_id: tx power info
+ * @reduced_tpc: transmit power reduction used
+ * @reserved: reserved
+ * @tfd_info: TFD information set by the FH
+ * @seq_ctl: sequence control from the Tx cmd
+ * @byte_cnt: byte count from the Tx cmd
+ * @tlc_info: TLC rate info
+ * @ra_tid: bits [3:0] = ra, bits [7:4] = tid
+ * @frame_ctrl: frame control
+ * @tx_queue: TX queue for this response
+ * @status: for non-agg: frame status TX_STATUS_*
* For version 6 TX response isn't received for aggregation at all.
*
* After the array of statuses comes the SSN of the SCD. Look at
@@ -582,26 +624,19 @@ struct iwl_mvm_tx_resp {
u8 tlc_info;
u8 ra_tid;
__le16 frame_ctrl;
- union {
- struct {
- struct agg_tx_status status;
- } v3;/* TX_RSP_API_S_VER_3 */
- struct {
- __le16 tx_queue;
- __le16 reserved2;
- struct agg_tx_status status;
- } v6;
- };
+ __le16 tx_queue;
+ __le16 reserved2;
+ struct agg_tx_status status;
} __packed; /* TX_RSP_API_S_VER_6 */
/**
* struct iwl_mvm_ba_notif - notifies about reception of BA
* ( BA_NOTIF = 0xc5 )
- * @sta_addr_lo32: lower 32 bits of the MAC address
- * @sta_addr_hi16: upper 16 bits of the MAC address
+ * @sta_addr: MAC address
+ * @reserved: reserved
* @sta_id: Index of recipient (BA-sending) station in fw's station table
* @tid: tid of the session
- * @seq_ctl:
+ * @seq_ctl: sequence control field
* @bitmap: the bitmap of the BA notification as seen in the air
* @scd_flow: the tx queue this BA relates to
* @scd_ssn: the index of the last contiguously sent packet
@@ -610,10 +645,10 @@ struct iwl_mvm_tx_resp {
* @reduced_txp: power reduced according to TPC. This is the actual value and
* not a copy from the LQ command. Thus, if not the first rate was used
* for Tx-ing then this value will be set to 0 by FW.
+ * @reserved1: reserved
*/
struct iwl_mvm_ba_notif {
- __le32 sta_addr_lo32;
- __le16 sta_addr_hi16;
+ u8 sta_addr[ETH_ALEN];
__le16 reserved;
u8 sta_id;
@@ -633,13 +668,13 @@ struct iwl_mvm_ba_notif {
* @q_num: TFD queue number
* @tfd_index: Index of first un-acked frame in the TFD queue
* @scd_queue: For debug only - the physical queue the TFD queue is bound to
+ * @reserved: reserved for alignment
*/
struct iwl_mvm_compressed_ba_tfd {
__le16 q_num;
__le16 tfd_index;
u8 scd_queue;
- u8 reserved;
- __le16 reserved2;
+ u8 reserved[3];
} __packed; /* COMPRESSED_BA_TFD_API_S_VER_1 */
/**
@@ -687,11 +722,12 @@ enum iwl_mvm_ba_resp_flags {
* @query_frame_cnt: SCD query frame count
* @txed: number of frames sent in the aggregation (all-TIDs)
* @done: number of frames that were Acked by the BA (all-TIDs)
+ * @reserved: reserved (for alignment)
* @wireless_time: Wireless-media time
* @tx_rate: the rate the aggregation was sent at
* @tfd_cnt: number of TFD-Q elements
* @ra_tid_cnt: number of RATID-Q elements
- * @ba_tfd: array of TFD queue status updates. See &iwl_mvm_compressed_ba_tfd
+ * @tfd: array of TFD queue status updates. See &iwl_mvm_compressed_ba_tfd
* for details.
* @ra_tid: array of RA-TID queue status updates. For debug purposes only. See
* &iwl_mvm_compressed_ba_ratid for more details.
@@ -765,6 +801,7 @@ struct iwl_mac_beacon_cmd_v7 {
* struct iwl_mac_beacon_cmd - beacon template command with offloaded CSA
* @byte_cnt: byte count of the beacon frame
* @flags: for future use
+ * @reserved: reserved
* @data: see &iwl_mac_beacon_cmd_data
*/
struct iwl_mac_beacon_cmd {
@@ -809,12 +846,24 @@ enum iwl_dump_control {
* @flush_ctl: control flags
* @reserved: reserved
*/
-struct iwl_tx_path_flush_cmd {
+struct iwl_tx_path_flush_cmd_v1 {
__le32 queues_ctl;
__le16 flush_ctl;
__le16 reserved;
} __packed; /* TX_PATH_FLUSH_CMD_API_S_VER_1 */
+/**
+ * struct iwl_tx_path_flush_cmd -- queue/FIFO flush command
+ * @sta_id: station ID to flush
+ * @tid_mask: TID mask to flush
+ * @reserved: reserved
+ */
+struct iwl_tx_path_flush_cmd {
+ __le32 sta_id;
+ __le16 tid_mask;
+ __le16 reserved;
+} __packed; /* TX_PATH_FLUSH_CMD_API_S_VER_2 */
+
/* Available options for the SCD_QUEUE_CFG HCMD */
enum iwl_scd_cfg_actions {
SCD_CFG_DISABLE_QUEUE = 0x0,
@@ -824,16 +873,17 @@ enum iwl_scd_cfg_actions {
/**
* struct iwl_scd_txq_cfg_cmd - New txq hw scheduler config command
- * @token:
+ * @token: unused
* @sta_id: station id
- * @tid:
+ * @tid: TID
* @scd_queue: scheduler queue to confiug
* @action: 1 queue enable, 0 queue disable, 2 change txq's tid owner
- * Value is one of %iwl_scd_cfg_actions options
+ * Value is one of &enum iwl_scd_cfg_actions options
* @aggregate: 1 aggregated queue, 0 otherwise
- * @tx_fifo: %enum iwl_mvm_tx_fifo
+ * @tx_fifo: &enum iwl_mvm_tx_fifo
* @window: BA window size
* @ssn: SSN for the BA agreement
+ * @reserved: reserved
*/
struct iwl_scd_txq_cfg_cmd {
u8 token;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h
index f545c5f9e4e3..b84e8ddbbbc9 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h
@@ -130,42 +130,116 @@ enum iwl_mvm_tx_fifo {
};
-/* commands */
-enum {
+/**
+ * enum iwl_legacy_cmds - legacy group command IDs
+ */
+enum iwl_legacy_cmds {
+ /**
+ * @MVM_ALIVE:
+ * Alive data from the firmware, as described in
+ * &struct mvm_alive_resp_v3 or &struct mvm_alive_resp.
+ */
MVM_ALIVE = 0x1,
+
+ /**
+ * @REPLY_ERROR: Cause an error in the firmware, for testing purposes.
+ */
REPLY_ERROR = 0x2,
+
+ /**
+ * @ECHO_CMD: Send data to the device to have it returned immediately.
+ */
ECHO_CMD = 0x3,
+ /**
+ * @INIT_COMPLETE_NOTIF: Notification that initialization is complete.
+ */
INIT_COMPLETE_NOTIF = 0x4,
- /* PHY context commands */
+ /**
+ * @PHY_CONTEXT_CMD:
+ * Add/modify/remove a PHY context, using &struct iwl_phy_context_cmd.
+ */
PHY_CONTEXT_CMD = 0x8,
+
+ /**
+ * @DBG_CFG: Debug configuration command.
+ */
DBG_CFG = 0x9,
+
+ /**
+ * @ANTENNA_COUPLING_NOTIFICATION:
+ * Antenna coupling data, &struct iwl_mvm_antenna_coupling_notif
+ */
ANTENNA_COUPLING_NOTIFICATION = 0xa,
- /* UMAC scan commands */
+ /**
+ * @SCAN_ITERATION_COMPLETE_UMAC:
+ * Firmware indicates a scan iteration completed, using
+ * &struct iwl_umac_scan_iter_complete_notif.
+ */
SCAN_ITERATION_COMPLETE_UMAC = 0xb5,
+
+ /**
+ * @SCAN_CFG_CMD:
+ * uses &struct iwl_scan_config_v1 or &struct iwl_scan_config
+ */
SCAN_CFG_CMD = 0xc,
SCAN_REQ_UMAC = 0xd,
SCAN_ABORT_UMAC = 0xe,
+
+ /**
+ * @SCAN_COMPLETE_UMAC: uses &struct iwl_umac_scan_complete
+ */
SCAN_COMPLETE_UMAC = 0xf,
BA_WINDOW_STATUS_NOTIFICATION_ID = 0x13,
- /* station table */
+ /**
+ * @ADD_STA_KEY:
+ * &struct iwl_mvm_add_sta_key_cmd_v1 or
+ * &struct iwl_mvm_add_sta_key_cmd.
+ */
ADD_STA_KEY = 0x17,
+
+ /**
+ * @ADD_STA:
+ * &struct iwl_mvm_add_sta_cmd or &struct iwl_mvm_add_sta_cmd_v7.
+ */
ADD_STA = 0x18,
+ /**
+ * @REMOVE_STA: &struct iwl_mvm_rm_sta_cmd
+ */
REMOVE_STA = 0x19,
/* paging get item */
FW_GET_ITEM_CMD = 0x1a,
- /* TX */
+ /**
+ * @TX_CMD: uses &struct iwl_tx_cmd or &struct iwl_tx_cmd_gen2,
+ * response in &struct iwl_mvm_tx_resp or
+ * &struct iwl_mvm_tx_resp_v3
+ */
TX_CMD = 0x1c,
+
+ /**
+ * @TXPATH_FLUSH: &struct iwl_tx_path_flush_cmd
+ */
TXPATH_FLUSH = 0x1e,
+
+ /**
+ * @MGMT_MCAST_KEY:
+ * &struct iwl_mvm_mgmt_mcast_key_cmd or
+ * &struct iwl_mvm_mgmt_mcast_key_cmd_v1
+ */
MGMT_MCAST_KEY = 0x1f,
/* scheduler config */
+ /**
+ * @SCD_QUEUE_CFG: &struct iwl_scd_txq_cfg_cmd for older hardware,
+ * &struct iwl_tx_queue_cfg_cmd with &struct iwl_tx_queue_cfg_rsp
+ * for newer (A000) hardware.
+ */
SCD_QUEUE_CFG = 0x1d,
/* global key */
@@ -179,17 +253,40 @@ enum {
TDLS_CHANNEL_SWITCH_NOTIFICATION = 0xaa,
TDLS_CONFIG_CMD = 0xa7,
- /* MAC and Binding commands */
+ /**
+ * @MAC_CONTEXT_CMD: &struct iwl_mac_ctx_cmd
+ */
MAC_CONTEXT_CMD = 0x28,
+
+ /**
+ * @TIME_EVENT_CMD:
+ * &struct iwl_time_event_cmd, response in &struct iwl_time_event_resp
+ */
TIME_EVENT_CMD = 0x29, /* both CMD and response */
+ /**
+ * @TIME_EVENT_NOTIFICATION: &struct iwl_time_event_notif
+ */
TIME_EVENT_NOTIFICATION = 0x2a,
+ /**
+ * @BINDING_CONTEXT_CMD:
+ * &struct iwl_binding_cmd or &struct iwl_binding_cmd_v1
+ */
BINDING_CONTEXT_CMD = 0x2b,
+ /**
+ * @TIME_QUOTA_CMD: &struct iwl_time_quota_cmd
+ */
TIME_QUOTA_CMD = 0x2c,
NON_QOS_TX_COUNTER_CMD = 0x2d,
+ /**
+ * @LQ_CMD: using &struct iwl_lq_cmd
+ */
LQ_CMD = 0x4e,
- /* paging block to FW cpu2 */
+ /**
+ * @FW_PAGING_BLOCK_CMD:
+ * &struct iwl_fw_paging_cmd or &struct iwl_fw_paging_cmd_v1
+ */
FW_PAGING_BLOCK_CMD = 0x4f,
/* Scan offload */
@@ -203,6 +300,9 @@ enum {
SCAN_ITERATION_COMPLETE = 0xe7,
/* Phy */
+ /**
+ * @PHY_CONFIGURATION_CMD: &struct iwl_phy_cfg_cmd
+ */
PHY_CONFIGURATION_CMD = 0x6a,
CALIB_RES_NOTIF_PHY_DB = 0x6b,
PHY_DB_CMD = 0x6c,
@@ -211,7 +311,9 @@ enum {
TOF_CMD = 0x10,
TOF_NOTIFICATION = 0x11,
- /* Power - legacy power table command */
+ /**
+ * @POWER_TABLE_CMD: &struct iwl_device_power_cmd
+ */
POWER_TABLE_CMD = 0x77,
PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION = 0x78,
LTR_CONFIG = 0xee,
@@ -222,17 +324,44 @@ enum {
/* Set/Get DC2DC frequency tune */
DC2DC_CONFIG_CMD = 0x83,
- /* NVM */
+ /**
+ * @NVM_ACCESS_CMD: using &struct iwl_nvm_access_cmd
+ */
NVM_ACCESS_CMD = 0x88,
SET_CALIB_DEFAULT_CMD = 0x8e,
BEACON_NOTIFICATION = 0x90,
+ /**
+ * @BEACON_TEMPLATE_CMD:
+ * Uses one of &struct iwl_mac_beacon_cmd_v6,
+ * &struct iwl_mac_beacon_cmd_v7 or &struct iwl_mac_beacon_cmd
+ * depending on the device version.
+ */
BEACON_TEMPLATE_CMD = 0x91,
+ /**
+ * @TX_ANT_CONFIGURATION_CMD: &struct iwl_tx_ant_cfg_cmd
+ */
TX_ANT_CONFIGURATION_CMD = 0x98,
+
+ /**
+ * @STATISTICS_CMD: &struct iwl_statistics_cmd
+ */
STATISTICS_CMD = 0x9c,
+
+ /**
+ * @STATISTICS_NOTIFICATION:
+ * one of &struct iwl_notif_statistics_v10,
+ * &struct iwl_notif_statistics_v11,
+ * &struct iwl_notif_statistics_cdb
+ */
STATISTICS_NOTIFICATION = 0x9d,
EOSP_NOTIFICATION = 0x9e,
+
+ /**
+ * @REDUCE_TX_POWER_CMD:
+ * &struct iwl_dev_tx_power_cmd_v3 or &struct iwl_dev_tx_power_cmd
+ */
REDUCE_TX_POWER_CMD = 0x9f,
/* RF-KILL commands and notifications */
@@ -241,20 +370,43 @@ enum {
MISSED_BEACONS_NOTIFICATION = 0xa2,
- /* Power - new power table command */
+ /**
+ * @MAC_PM_POWER_TABLE: using &struct iwl_mac_power_cmd
+ */
MAC_PM_POWER_TABLE = 0xa9,
+ /**
+ * @MFUART_LOAD_NOTIFICATION: &struct iwl_mfuart_load_notif
+ */
MFUART_LOAD_NOTIFICATION = 0xb1,
+ /**
+ * @RSS_CONFIG_CMD: &struct iwl_rss_config_cmd
+ */
RSS_CONFIG_CMD = 0xb3,
+ /**
+ * @REPLY_RX_PHY_CMD: &struct iwl_rx_phy_info
+ */
REPLY_RX_PHY_CMD = 0xc0,
+
+ /**
+ * @REPLY_RX_MPDU_CMD:
+ * &struct iwl_rx_mpdu_res_start or &struct iwl_rx_mpdu_desc
+ */
REPLY_RX_MPDU_CMD = 0xc1,
FRAME_RELEASE = 0xc3,
BA_NOTIF = 0xc5,
/* Location Aware Regulatory */
+ /**
+ * @MCC_UPDATE_CMD: using &struct iwl_mcc_update_cmd
+ */
MCC_UPDATE_CMD = 0xc8,
+
+ /**
+ * @MCC_CHUB_UPDATE_CMD: using &struct iwl_mcc_chub_notif
+ */
MCC_CHUB_UPDATE_CMD = 0xc9,
MARKER_CMD = 0xcb,
@@ -262,14 +414,29 @@ enum {
/* BT Coex */
BT_COEX_PRIO_TABLE = 0xcc,
BT_COEX_PROT_ENV = 0xcd,
+ /**
+ * @BT_PROFILE_NOTIFICATION: &struct iwl_bt_coex_profile_notif
+ */
BT_PROFILE_NOTIFICATION = 0xce,
+ /**
+ * @BT_CONFIG: &struct iwl_bt_coex_cmd
+ */
BT_CONFIG = 0x9b,
BT_COEX_UPDATE_SW_BOOST = 0x5a,
BT_COEX_UPDATE_CORUN_LUT = 0x5b,
BT_COEX_UPDATE_REDUCED_TXP = 0x5c,
+ /**
+ * @BT_COEX_CI: &struct iwl_bt_coex_ci_cmd
+ */
BT_COEX_CI = 0x5d,
+ /**
+ * @REPLY_SF_CFG_CMD: &struct iwl_sf_cfg_cmd
+ */
REPLY_SF_CFG_CMD = 0xd1,
+ /**
+ * @REPLY_BEACON_FILTERING_CMD: &struct iwl_beacon_filter_cmd
+ */
REPLY_BEACON_FILTERING_CMD = 0xd2,
/* DTS measurements */
@@ -283,19 +450,39 @@ enum {
BCAST_FILTER_CMD = 0xcf,
MCAST_FILTER_CMD = 0xd0,
- /* D3 commands/notifications */
+ /**
+ * @D3_CONFIG_CMD: &struct iwl_d3_manager_config
+ */
D3_CONFIG_CMD = 0xd3,
PROT_OFFLOAD_CONFIG_CMD = 0xd4,
OFFLOADS_QUERY_CMD = 0xd5,
REMOTE_WAKE_CONFIG_CMD = 0xd6,
D0I3_END_CMD = 0xed,
- /* for WoWLAN in particular */
+ /**
+ * @WOWLAN_PATTERNS: &struct iwl_wowlan_patterns_cmd
+ */
WOWLAN_PATTERNS = 0xe0,
+
+ /**
+ * @WOWLAN_CONFIGURATION: &struct iwl_wowlan_config_cmd
+ */
WOWLAN_CONFIGURATION = 0xe1,
+ /**
+ * @WOWLAN_TSC_RSC_PARAM: &struct iwl_wowlan_rsc_tsc_params_cmd
+ */
WOWLAN_TSC_RSC_PARAM = 0xe2,
+ /**
+ * @WOWLAN_TKIP_PARAM: &struct iwl_wowlan_tkip_params_cmd
+ */
WOWLAN_TKIP_PARAM = 0xe3,
+ /**
+ * @WOWLAN_KEK_KCK_MATERIAL: &struct iwl_wowlan_kek_kck_material_cmd
+ */
WOWLAN_KEK_KCK_MATERIAL = 0xe4,
+ /**
+ * @WOWLAN_GET_STATUSES: response in &struct iwl_wowlan_status
+ */
WOWLAN_GET_STATUSES = 0xe5,
WOWLAN_TX_POWER_PER_DB = 0xe6,
@@ -303,8 +490,6 @@ enum {
SCAN_OFFLOAD_PROFILES_QUERY_CMD = 0x56,
SCAN_OFFLOAD_HOTSPOTS_CONFIG_CMD = 0x58,
SCAN_OFFLOAD_HOTSPOTS_QUERY_CMD = 0x59,
-
- REPLY_MAX = 0xff,
};
/* Please keep this enum *SORTED* by hex value.
@@ -316,21 +501,42 @@ enum iwl_mac_conf_subcmd_ids {
CHANNEL_SWITCH_NOA_NOTIF = 0xFF,
};
+/**
+ * enum iwl_phy_ops_subcmd_ids - PHY group commands
+ */
enum iwl_phy_ops_subcmd_ids {
CMD_DTS_MEASUREMENT_TRIGGER_WIDE = 0x0,
CTDP_CONFIG_CMD = 0x03,
+
+ /**
+ * @TEMP_REPORTING_THRESHOLDS_CMD: &struct temp_report_ths_cmd
+ */
TEMP_REPORTING_THRESHOLDS_CMD = 0x04,
GEO_TX_POWER_LIMIT = 0x05,
CT_KILL_NOTIFICATION = 0xFE,
DTS_MEASUREMENT_NOTIF_WIDE = 0xFF,
};
+/**
+ * enum iwl_system_subcmd_ids - system group command IDs
+ */
enum iwl_system_subcmd_ids {
+ /**
+ * @SHARED_MEM_CFG_CMD:
+ * response in &struct iwl_shared_mem_cfg or
+ * &struct iwl_shared_mem_cfg_v1
+ */
SHARED_MEM_CFG_CMD = 0x0,
INIT_EXTENDED_CFG_CMD = 0x03,
};
+/**
+ * enum iwl_data_path_subcmd_ids - data path group commands
+ */
enum iwl_data_path_subcmd_ids {
+ /**
+ * @DQA_ENABLE_CMD: &struct iwl_dqa_enable_cmd
+ */
DQA_ENABLE_CMD = 0x0,
UPDATE_MU_GROUPS_CMD = 0x1,
TRIGGER_RX_QUEUES_NOTIF_CMD = 0x2,
@@ -345,16 +551,54 @@ enum iwl_prot_offload_subcmd_ids {
enum iwl_regulatory_and_nvm_subcmd_ids {
NVM_ACCESS_COMPLETE = 0x0,
+ NVM_GET_INFO = 0x2,
};
+/**
+ * enum iwl_debug_cmds - debug commands
+ */
enum iwl_debug_cmds {
+ /**
+ * @LMAC_RD_WR:
+ * LMAC memory read/write, using &struct iwl_dbg_mem_access_cmd and
+ * &struct iwl_dbg_mem_access_rsp
+ */
LMAC_RD_WR = 0x0,
+ /**
+ * @UMAC_RD_WR:
+ * UMAC memory read/write, using &struct iwl_dbg_mem_access_cmd and
+ * &struct iwl_dbg_mem_access_rsp
+ */
UMAC_RD_WR = 0x1,
+ /**
+ * @MFU_ASSERT_DUMP_NTF:
+ * &struct iwl_mfu_assert_dump_notif
+ */
MFU_ASSERT_DUMP_NTF = 0xFE,
};
-/* command groups */
-enum {
+/**
+ * enum iwl_mvm_command_groups - command groups for the firmware
+ * @LEGACY_GROUP: legacy group, uses command IDs from &enum iwl_legacy_cmds
+ * @LONG_GROUP: legacy group with long header, also uses command IDs
+ * from &enum iwl_legacy_cmds
+ * @SYSTEM_GROUP: system group, uses command IDs from
+ * &enum iwl_system_subcmd_ids
+ * @MAC_CONF_GROUP: MAC configuration group, uses command IDs from
+ * &enum iwl_mac_conf_subcmd_ids
+ * @PHY_OPS_GROUP: PHY operations group, uses command IDs from
+ * &enum iwl_phy_ops_subcmd_ids
+ * @DATA_PATH_GROUP: data path group, uses command IDs from
+ * &enum iwl_data_path_subcmd_ids
+ * @NAN_GROUP: NAN group, uses command IDs from &enum iwl_nan_subcmd_ids
+ * @TOF_GROUP: TOF group, uses command IDs from &enum iwl_tof_subcmd_ids
+ * @PROT_OFFLOAD_GROUP: protocol offload group, uses command IDs from
+ * &enum iwl_prot_offload_subcmd_ids
+ * @REGULATORY_AND_NVM_GROUP: regulatory/NVM group, uses command IDs from
+ * &enum iwl_regulatory_and_nvm_subcmd_ids
+ * @DEBUG_GROUP: Debug group, uses command IDs from &enum iwl_debug_cmds
+ */
+enum iwl_mvm_command_groups {
LEGACY_GROUP = 0x0,
LONG_GROUP = 0x1,
SYSTEM_GROUP = 0x2,
@@ -390,13 +634,13 @@ struct iwl_tx_ant_cfg_cmd {
__le32 valid;
} __packed;
-/*
- * Calibration control struct.
+/**
+ * struct iwl_calib_ctrl - Calibration control struct.
* Sent as part of the phy configuration command.
* @flow_trigger: bitmap for which calibrations to perform according to
- * flow triggers.
+ * flow triggers, using &enum iwl_calib_cfg
* @event_trigger: bitmap for which calibrations to perform according to
- * event triggers.
+ * event triggers, using &enum iwl_calib_cfg
*/
struct iwl_calib_ctrl {
__le32 flow_trigger;
@@ -428,8 +672,10 @@ enum iwl_calib_cfg {
IWL_CALIB_CFG_AGC_IDX = BIT(18),
};
-/*
- * Phy configuration command.
+/**
+ * struct iwl_phy_cfg_cmd - Phy configuration command
+ * @phy_cfg: PHY configuration value, uses &enum iwl_fw_phy_cfg
+ * @calib_control: calibration control data
*/
struct iwl_phy_cfg_cmd {
__le32 phy_cfg;
@@ -448,15 +694,39 @@ struct iwl_phy_cfg_cmd {
#define PHY_CFG_RX_CHAIN_C BIT(14)
-/* Target of the NVM_ACCESS_CMD */
-enum {
+/**
+ * enum iwl_nvm_access_op - NVM access opcode
+ * @IWL_NVM_READ: read NVM
+ * @IWL_NVM_WRITE: write NVM
+ */
+enum iwl_nvm_access_op {
+ IWL_NVM_READ = 0,
+ IWL_NVM_WRITE = 1,
+};
+
+/**
+ * enum iwl_nvm_access_target - target of the NVM_ACCESS_CMD
+ * @NVM_ACCESS_TARGET_CACHE: access the cache
+ * @NVM_ACCESS_TARGET_OTP: access the OTP
+ * @NVM_ACCESS_TARGET_EEPROM: access the EEPROM
+ */
+enum iwl_nvm_access_target {
NVM_ACCESS_TARGET_CACHE = 0,
NVM_ACCESS_TARGET_OTP = 1,
NVM_ACCESS_TARGET_EEPROM = 2,
};
-/* Section types for NVM_ACCESS_CMD */
-enum {
+/**
+ * enum iwl_nvm_section_type - section types for NVM_ACCESS_CMD
+ * @NVM_SECTION_TYPE_SW: software section
+ * @NVM_SECTION_TYPE_REGULATORY: regulatory section
+ * @NVM_SECTION_TYPE_CALIBRATION: calibration section
+ * @NVM_SECTION_TYPE_PRODUCTION: production section
+ * @NVM_SECTION_TYPE_MAC_OVERRIDE: MAC override section
+ * @NVM_SECTION_TYPE_PHY_SKU: PHY SKU section
+ * @NVM_MAX_NUM_SECTIONS: number of sections
+ */
+enum iwl_nvm_section_type {
NVM_SECTION_TYPE_SW = 1,
NVM_SECTION_TYPE_REGULATORY = 3,
NVM_SECTION_TYPE_CALIBRATION = 4,
@@ -467,10 +737,10 @@ enum {
};
/**
- * struct iwl_nvm_access_cmd_ver2 - Request the device to send an NVM section
- * @op_code: 0 - read, 1 - write
- * @target: NVM_ACCESS_TARGET_*
- * @type: NVM_SECTION_TYPE_*
+ * struct iwl_nvm_access_cmd - Request the device to send an NVM section
+ * @op_code: &enum iwl_nvm_access_op
+ * @target: &enum iwl_nvm_access_target
+ * @type: &enum iwl_nvm_section_type
* @offset: offset in bytes into the section
* @length: in bytes, to read/write
* @data: if write operation, the data to write. On read its empty
@@ -486,7 +756,26 @@ struct iwl_nvm_access_cmd {
#define NUM_OF_FW_PAGING_BLOCKS 33 /* 32 for data and 1 block for CSS */
-/*
+/**
+ * struct iwl_fw_paging_cmd_v1 - paging layout
+ *
+ * (FW_PAGING_BLOCK_CMD = 0x4f)
+ *
+ * Send to FW the paging layout in the driver.
+ *
+ * @flags: various flags for the command
+ * @block_size: the block size in powers of 2
+ * @block_num: number of blocks specified in the command.
+ * @device_phy_addr: virtual addresses from device side
+ */
+struct iwl_fw_paging_cmd_v1 {
+ __le32 flags;
+ __le32 block_size;
+ __le32 block_num;
+ __le32 device_phy_addr[NUM_OF_FW_PAGING_BLOCKS];
+} __packed; /* FW_PAGING_BLOCK_CMD_API_S_VER_1 */
+
+/**
* struct iwl_fw_paging_cmd - paging layout
*
* (FW_PAGING_BLOCK_CMD = 0x4f)
@@ -497,16 +786,12 @@ struct iwl_nvm_access_cmd {
* @block_size: the block size in powers of 2
* @block_num: number of blocks specified in the command.
* @device_phy_addr: virtual addresses from device side
- * 32 bit address for API version 1, 64 bit address for API version 2.
-*/
+ */
struct iwl_fw_paging_cmd {
__le32 flags;
__le32 block_size;
__le32 block_num;
- union {
- __le32 addr32[NUM_OF_FW_PAGING_BLOCKS];
- __le64 addr64[NUM_OF_FW_PAGING_BLOCKS];
- } device_phy_addr;
+ __le64 device_phy_addr[NUM_OF_FW_PAGING_BLOCKS];
} __packed; /* FW_PAGING_BLOCK_CMD_API_S_VER_2 */
/*
@@ -679,12 +964,21 @@ struct iwl_error_resp {
#define MAX_MACS_IN_BINDING (3)
#define MAX_BINDINGS (4)
-/* Used to extract ID and color from the context dword */
-#define FW_CTXT_ID_POS (0)
-#define FW_CTXT_ID_MSK (0xff << FW_CTXT_ID_POS)
-#define FW_CTXT_COLOR_POS (8)
-#define FW_CTXT_COLOR_MSK (0xff << FW_CTXT_COLOR_POS)
-#define FW_CTXT_INVALID (0xffffffff)
+/**
+ * enum iwl_mvm_id_and_color - ID and color fields in context dword
+ * @FW_CTXT_ID_POS: position of the ID
+ * @FW_CTXT_ID_MSK: mask of the ID
+ * @FW_CTXT_COLOR_POS: position of the color
+ * @FW_CTXT_COLOR_MSK: mask of the color
+ * @FW_CTXT_INVALID: value used to indicate unused/invalid
+ */
+enum iwl_mvm_id_and_color {
+ FW_CTXT_ID_POS = 0,
+ FW_CTXT_ID_MSK = 0xff << FW_CTXT_ID_POS,
+ FW_CTXT_COLOR_POS = 8,
+ FW_CTXT_COLOR_MSK = 0xff << FW_CTXT_COLOR_POS,
+ FW_CTXT_INVALID = 0xffffffff,
+};
#define FW_CMD_ID_AND_COLOR(_id, _color) ((_id << FW_CTXT_ID_POS) |\
(_color << FW_CTXT_COLOR_POS))
@@ -832,7 +1126,8 @@ enum {
#define TE_V2_PLACEMENT_POS 12
#define TE_V2_ABSENCE_POS 15
-/* Time event policy values
+/**
+ * enum iwl_time_event_policy - Time event policy values
* A notification (both event and fragment) includes a status indicating weather
* the FW was able to schedule the event or not. For fragment start/end
* notification the status is always success. There is no start/end fragment
@@ -847,12 +1142,13 @@ enum {
* @TE_V2_NOTIF_HOST_FRAG_END:request/receive notification on frag end
* @TE_V2_NOTIF_INTERNAL_FRAG_START: internal FW use.
* @TE_V2_NOTIF_INTERNAL_FRAG_END: internal FW use.
+ * @T2_V2_START_IMMEDIATELY: start time event immediately
* @TE_V2_DEP_OTHER: depends on another time event
* @TE_V2_DEP_TSF: depends on a specific time
* @TE_V2_EVENT_SOCIOPATHIC: can't co-exist with other events of tha same MAC
* @TE_V2_ABSENCE: are we present or absent during the Time Event.
*/
-enum {
+enum iwl_time_event_policy {
TE_V2_DEFAULT_POLICY = 0x0,
/* notifications (event start/stop, fragment start/stop) */
@@ -867,8 +1163,6 @@ enum {
TE_V2_NOTIF_INTERNAL_FRAG_END = BIT(7),
T2_V2_START_IMMEDIATELY = BIT(11),
- TE_V2_NOTIF_MSK = 0xff,
-
/* placement characteristics */
TE_V2_DEP_OTHER = BIT(TE_V2_PLACEMENT_POS),
TE_V2_DEP_TSF = BIT(TE_V2_PLACEMENT_POS + 1),
@@ -879,12 +1173,13 @@ enum {
};
/**
- * struct iwl_time_event_cmd_api - configuring Time Events
+ * struct iwl_time_event_cmd - configuring Time Events
* with struct MAC_TIME_EVENT_DATA_API_S_VER_2 (see also
* with version 1. determined by IWL_UCODE_TLV_FLAGS)
* ( TIME_EVENT_CMD = 0x29 )
- * @id_and_color: ID and color of the relevant MAC
- * @action: action to perform, one of FW_CTXT_ACTION_*
+ * @id_and_color: ID and color of the relevant MAC,
+ * &enum iwl_mvm_id_and_color
+ * @action: action to perform, one of &enum iwl_phy_ctxt_action
* @id: this field has two meanings, depending on the action:
* If the action is ADD, then it means the type of event to add.
* For all other actions it is the unique event ID assigned when the
@@ -900,7 +1195,8 @@ enum {
* on event and/or fragment start and/or end
* using one of TE_INDEPENDENT, TE_DEP_OTHER, TE_DEP_TSF
* TE_EVENT_SOCIOPATHIC
- * using TE_ABSENCE and using TE_NOTIF_*
+ * using TE_ABSENCE and using TE_NOTIF_*,
+ * &enum iwl_time_event_policy
*/
struct iwl_time_event_cmd {
/* COMMON_INDEX_HDR_API_S_VER_1 */
@@ -923,7 +1219,8 @@ struct iwl_time_event_cmd {
* @status: bit 0 indicates success, all others specify errors
* @id: the Time Event type
* @unique_id: the unique ID assigned (in ADD) or given (others) to the TE
- * @id_and_color: ID and color of the relevant MAC
+ * @id_and_color: ID and color of the relevant MAC,
+ * &enum iwl_mvm_id_and_color
*/
struct iwl_time_event_resp {
__le32 status;
@@ -939,7 +1236,7 @@ struct iwl_time_event_resp {
* @session_id: session's unique id
* @unique_id: unique id of the Time Event itself
* @id_and_color: ID and color of the relevant MAC
- * @action: one of TE_NOTIF_START or TE_NOTIF_END
+ * @action: &enum iwl_time_event_policy
* @status: true if scheduled, false otherwise (not executed)
*/
struct iwl_time_event_notif {
@@ -955,12 +1252,35 @@ struct iwl_time_event_notif {
/* Bindings and Time Quota */
/**
+ * struct iwl_binding_cmd_v1 - configuring bindings
+ * ( BINDING_CONTEXT_CMD = 0x2b )
+ * @id_and_color: ID and color of the relevant Binding,
+ * &enum iwl_mvm_id_and_color
+ * @action: action to perform, one of FW_CTXT_ACTION_*
+ * @macs: array of MAC id and colors which belong to the binding,
+ * &enum iwl_mvm_id_and_color
+ * @phy: PHY id and color which belongs to the binding,
+ * &enum iwl_mvm_id_and_color
+ */
+struct iwl_binding_cmd_v1 {
+ /* COMMON_INDEX_HDR_API_S_VER_1 */
+ __le32 id_and_color;
+ __le32 action;
+ /* BINDING_DATA_API_S_VER_1 */
+ __le32 macs[MAX_MACS_IN_BINDING];
+ __le32 phy;
+} __packed; /* BINDING_CMD_API_S_VER_1 */
+
+/**
* struct iwl_binding_cmd - configuring bindings
* ( BINDING_CONTEXT_CMD = 0x2b )
- * @id_and_color: ID and color of the relevant Binding
+ * @id_and_color: ID and color of the relevant Binding,
+ * &enum iwl_mvm_id_and_color
* @action: action to perform, one of FW_CTXT_ACTION_*
* @macs: array of MAC id and colors which belong to the binding
+ * &enum iwl_mvm_id_and_color
* @phy: PHY id and color which belongs to the binding
+ * &enum iwl_mvm_id_and_color
* @lmac_id: the lmac id the binding belongs to
*/
struct iwl_binding_cmd {
@@ -970,11 +1290,10 @@ struct iwl_binding_cmd {
/* BINDING_DATA_API_S_VER_1 */
__le32 macs[MAX_MACS_IN_BINDING];
__le32 phy;
- /* BINDING_CMD_API_S_VER_1 */
__le32 lmac_id;
} __packed; /* BINDING_CMD_API_S_VER_2 */
-#define IWL_BINDING_CMD_SIZE_V1 offsetof(struct iwl_binding_cmd, lmac_id)
+#define IWL_BINDING_CMD_SIZE_V1 sizeof(struct iwl_binding_cmd_v1)
#define IWL_LMAC_24G_INDEX 0
#define IWL_LMAC_5G_INDEX 1
@@ -983,7 +1302,8 @@ struct iwl_binding_cmd {
/**
* struct iwl_time_quota_data - configuration of time quota per binding
- * @id_and_color: ID and color of the relevant Binding
+ * @id_and_color: ID and color of the relevant Binding,
+ * &enum iwl_mvm_id_and_color
* @quota: absolute time quota in TU. The scheduler will try to divide the
* remainig quota (after Time Events) according to this quota.
* @max_duration: max uninterrupted context duration in TU
@@ -1539,8 +1859,8 @@ enum iwl_sf_scenario {
#define SF_CFG_DUMMY_NOTIF_OFF BIT(16)
/**
- * Smart Fifo configuration command.
- * @state: smart fifo state, types listed in enum %iwl_sf_sate.
+ * struct iwl_sf_cfg_cmd - Smart Fifo configuration command.
+ * @state: smart fifo state, types listed in &enum iwl_sf_state.
* @watermark: Minimum allowed availabe free space in RXF for transient state.
* @long_delay_timeouts: aging and idle timer values for each scenario
* in long delay state.
@@ -1590,11 +1910,11 @@ struct iwl_mcc_update_cmd {
u8 source_id;
u8 reserved;
__le32 key;
- __le32 reserved2[5];
+ u8 reserved2[20];
} __packed; /* LAR_UPDATE_MCC_CMD_API_S_VER_2 */
/**
- * iwl_mcc_update_resp_v1 - response to MCC_UPDATE_CMD.
+ * struct iwl_mcc_update_resp_v1 - response to MCC_UPDATE_CMD.
* Contains the new channel control profile map, if changed, and the new MCC
* (mobile country code).
* The new MCC may be different than what was requested in MCC_UPDATE_CMD.
@@ -1617,7 +1937,7 @@ struct iwl_mcc_update_resp_v1 {
} __packed; /* LAR_UPDATE_MCC_CMD_RESP_S_VER_1 */
/**
- * iwl_mcc_update_resp - response to MCC_UPDATE_CMD.
+ * struct iwl_mcc_update_resp - response to MCC_UPDATE_CMD.
* Contains the new channel control profile map, if changed, and the new MCC
* (mobile country code).
* The new MCC may be different than what was requested in MCC_UPDATE_CMD.
@@ -1659,7 +1979,7 @@ struct iwl_mcc_update_resp {
* @reserved1: reserved for alignment
*/
struct iwl_mcc_chub_notif {
- u16 mcc;
+ __le16 mcc;
u8 source_id;
u8 reserved1;
} __packed; /* LAR_MCC_NOTIFY_S */
@@ -1699,10 +2019,10 @@ enum iwl_dts_measurement_flags {
};
/**
- * iwl_dts_measurement_cmd - request DTS temperature and/or voltage measurements
+ * struct iwl_dts_measurement_cmd - request DTS temp and/or voltage measurements
*
- * @flags: indicates which measurements we want as specified in &enum
- * iwl_dts_measurement_flags
+ * @flags: indicates which measurements we want as specified in
+ * &enum iwl_dts_measurement_flags
*/
struct iwl_dts_measurement_cmd {
__le32 flags;
@@ -1754,7 +2074,7 @@ enum iwl_dts_bit_mode {
};
/**
- * iwl_ext_dts_measurement_cmd - request extended DTS temperature measurements
+ * struct iwl_ext_dts_measurement_cmd - request extended DTS temp measurements
* @control_mode: see &enum iwl_dts_control_measurement_mode
* @temperature: used when over write DTS mode is selected
* @sensor: set temperature sensor to use. See &enum iwl_dts_used
@@ -1810,7 +2130,7 @@ struct ct_kill_notif {
* enum ctdp_cmd_operation - CTDP command operations
* @CTDP_CMD_OPERATION_START: update the current budget
* @CTDP_CMD_OPERATION_STOP: stop ctdp
-* @CTDP_CMD_OPERATION_REPORT: get the avgerage budget
+* @CTDP_CMD_OPERATION_REPORT: get the average budget
*/
enum iwl_mvm_ctdp_cmd_operation {
CTDP_CMD_OPERATION_START = 0x1,
@@ -1834,7 +2154,7 @@ struct iwl_mvm_ctdp_cmd {
#define IWL_MAX_DTS_TRIPS 8
/**
- * struct iwl_temp_report_ths_cmd - set temperature thresholds
+ * struct temp_report_ths_cmd - set temperature thresholds
*
* @num_temps: number of temperature thresholds passed
* @thresholds: array with the thresholds to be configured
@@ -1856,7 +2176,7 @@ enum iwl_tdls_channel_switch_type {
}; /* TDLS_STA_CHANNEL_SWITCH_CMD_TYPE_API_E_VER_1 */
/**
- * Switch timing sub-element in a TDLS channel-switch command
+ * struct iwl_tdls_channel_switch_timing - Switch timing in TDLS channel-switch
* @frame_timestamp: GP2 timestamp of channel-switch request/response packet
* received from peer
* @max_offchan_duration: What amount of microseconds out of a DTIM is given
@@ -1876,7 +2196,7 @@ struct iwl_tdls_channel_switch_timing {
#define IWL_TDLS_CH_SW_FRAME_MAX_SIZE 200
/**
- * TDLS channel switch frame template
+ * struct iwl_tdls_channel_switch_frame - TDLS channel switch frame template
*
* A template representing a TDLS channel-switch request or response frame
*
@@ -1891,7 +2211,7 @@ struct iwl_tdls_channel_switch_frame {
} __packed; /* TDLS_STA_CHANNEL_SWITCH_FRAME_API_S_VER_1 */
/**
- * TDLS channel switch command
+ * struct iwl_tdls_channel_switch_cmd - TDLS channel switch command
*
* The command is sent to initiate a channel switch and also in response to
* incoming TDLS channel-switch request/response packets from remote peers.
@@ -1911,7 +2231,7 @@ struct iwl_tdls_channel_switch_cmd {
} __packed; /* TDLS_STA_CHANNEL_SWITCH_CMD_API_S_VER_1 */
/**
- * TDLS channel switch start notification
+ * struct iwl_tdls_channel_switch_notif - TDLS channel switch start notification
*
* @status: non-zero on success
* @offchannel_duration: duration given in microseconds
@@ -1924,7 +2244,7 @@ struct iwl_tdls_channel_switch_notif {
} __packed; /* TDLS_STA_CHANNEL_SWITCH_NTFY_API_S_VER_1 */
/**
- * TDLS station info
+ * struct iwl_tdls_sta_info - TDLS station info
*
* @sta_id: station id of the TDLS peer
* @tx_to_peer_tid: TID reserved vs. the peer for FW based Tx
@@ -1939,7 +2259,7 @@ struct iwl_tdls_sta_info {
} __packed; /* TDLS_STA_INFO_VER_1 */
/**
- * TDLS basic config command
+ * struct iwl_tdls_config_cmd - TDLS basic config command
*
* @id_and_color: MAC id and color being configured
* @tdls_peer_count: amount of currently connected TDLS peers
@@ -1963,7 +2283,7 @@ struct iwl_tdls_config_cmd {
} __packed; /* TDLS_CONFIG_CMD_API_S_VER_1 */
/**
- * TDLS per-station config information from FW
+ * struct iwl_tdls_config_sta_info_res - TDLS per-station config information
*
* @sta_id: station id of the TDLS peer
* @tx_to_peer_last_seq: last sequence number used by FW during FW-based Tx to
@@ -1975,7 +2295,7 @@ struct iwl_tdls_config_sta_info_res {
} __packed; /* TDLS_STA_INFO_RSP_VER_1 */
/**
- * TDLS config information from FW
+ * struct iwl_tdls_config_res - TDLS config information from FW
*
* @tx_to_ap_last_seq: last sequence number used by FW during FW-based Tx to AP
* @sta_info: per-station TDLS config information
@@ -1991,7 +2311,7 @@ struct iwl_tdls_config_res {
#define TX_FIFO_INTERNAL_MAX_NUM 6
/**
- * Shared memory configuration information from the FW
+ * struct iwl_shared_mem_cfg_v1 - Shared memory configuration information
*
* @shared_mem_addr: shared memory addr (pre 8000 HW set to 0x0 as MARBH is not
* accessible)
@@ -2045,7 +2365,7 @@ struct iwl_shared_mem_lmac_cfg {
} __packed; /* SHARED_MEM_ALLOC_LMAC_API_S_VER_1 */
/**
- * Shared memory configuration information from the FW
+ * struct iwl_shared_mem_cfg - Shared memory configuration information
*
* @shared_mem_addr: shared memory address
* @shared_mem_size: shared memory size
@@ -2073,7 +2393,7 @@ struct iwl_shared_mem_cfg {
} __packed; /* SHARED_MEM_ALLOC_API_S_VER_3 */
/**
- * VHT MU-MIMO group configuration
+ * struct iwl_mu_group_mgmt_cmd - VHT MU-MIMO group configuration
*
* @membership_status: a bitmap of MU groups
* @user_position:the position of station in a group. If the station is in the
@@ -2100,7 +2420,7 @@ struct iwl_mu_group_mgmt_notif {
#define MAX_STORED_BEACON_SIZE 600
/**
- * Stored beacon notification
+ * struct iwl_stored_beacon_notif - Stored beacon notification
*
* @system_time: system time on air rise
* @tsf: TSF on air rise
@@ -2135,7 +2455,7 @@ enum iwl_lqm_status {
};
/**
- * Link Quality Measurement command
+ * struct iwl_link_qual_msrmnt_cmd - Link Quality Measurement command
* @cmd_operatrion: command operation to be performed (start or stop)
* as defined above.
* @mac_id: MAC ID the measurement applies to.
@@ -2150,7 +2470,7 @@ struct iwl_link_qual_msrmnt_cmd {
} __packed /* LQM_CMD_API_S_VER_1 */;
/**
- * Link Quality Measurement notification
+ * struct iwl_link_qual_msrmnt_notif - Link Quality Measurement notification
*
* @frequent_stations_air_time: an array containing the total air time
* (in uSec) used by the most frequently transmitting stations.
@@ -2174,11 +2494,11 @@ struct iwl_link_qual_msrmnt_notif {
__le32 tx_frame_dropped;
__le32 mac_id;
__le32 status;
- __le32 reserved[3];
+ u8 reserved[12];
} __packed; /* LQM_MEASUREMENT_COMPLETE_NTF_API_S_VER1 */
/**
- * Channel switch NOA notification
+ * struct iwl_channel_switch_noa_notif - Channel switch NOA notification
*
* @id_and_color: ID and color of the MAC
*/
@@ -2259,4 +2579,88 @@ struct iwl_init_extended_cfg_cmd {
__le32 init_flags;
} __packed; /* INIT_EXTENDED_CFG_CMD_API_S_VER_1 */
+/*
+ * struct iwl_nvm_get_info - request to get NVM data
+ */
+struct iwl_nvm_get_info {
+ __le32 reserved;
+} __packed; /* GRP_REGULATORY_NVM_GET_INFO_CMD_S_VER_1 */
+
+/**
+ * struct iwl_nvm_get_info_general - general NVM data
+ * @flags: 1 - empty, 0 - valid
+ * @nvm_version: nvm version
+ * @board_type: board type
+ */
+struct iwl_nvm_get_info_general {
+ __le32 flags;
+ __le16 nvm_version;
+ u8 board_type;
+ u8 reserved;
+} __packed; /* GRP_REGULATORY_NVM_GET_INFO_GENERAL_S_VER_1 */
+
+/**
+ * struct iwl_nvm_get_info_sku - mac information
+ * @enable_24g: band 2.4G enabled
+ * @enable_5g: band 5G enabled
+ * @enable_11n: 11n enabled
+ * @enable_11ac: 11ac enabled
+ * @mimo_disable: MIMO enabled
+ * @ext_crypto: Extended crypto enabled
+ */
+struct iwl_nvm_get_info_sku {
+ __le32 enable_24g;
+ __le32 enable_5g;
+ __le32 enable_11n;
+ __le32 enable_11ac;
+ __le32 mimo_disable;
+ __le32 ext_crypto;
+} __packed; /* GRP_REGULATORY_NVM_GET_INFO_MAC_SKU_SECTION_S_VER_1 */
+
+/**
+ * struct iwl_nvm_get_info_phy - phy information
+ * @tx_chains: BIT 0 chain A, BIT 1 chain B
+ * @rx_chains: BIT 0 chain A, BIT 1 chain B
+ */
+struct iwl_nvm_get_info_phy {
+ __le32 tx_chains;
+ __le32 rx_chains;
+} __packed; /* GRP_REGULATORY_NVM_GET_INFO_PHY_SKU_SECTION_S_VER_1 */
+
+#define IWL_NUM_CHANNELS (51)
+
+/**
+ * struct iwl_nvm_get_info_regulatory - regulatory information
+ * @lar_enabled: is LAR enabled
+ * @channel_profile: regulatory data of this channel
+ * @regulatory: regulatory data, see &enum iwl_nvm_channel_flags for data
+ */
+struct iwl_nvm_get_info_regulatory {
+ __le32 lar_enabled;
+ __le16 channel_profile[IWL_NUM_CHANNELS];
+ __le16 reserved;
+} __packed; /* GRP_REGULATORY_NVM_GET_INFO_REGULATORY_S_VER_1 */
+
+/**
+ * struct iwl_nvm_get_info_rsp - response to get NVM data
+ * @general: general NVM data
+ * @mac_sku: data relating to MAC sku
+ * @phy_sku: data relating to PHY sku
+ * @regulatory: regulatory data
+ */
+struct iwl_nvm_get_info_rsp {
+ struct iwl_nvm_get_info_general general;
+ struct iwl_nvm_get_info_sku mac_sku;
+ struct iwl_nvm_get_info_phy phy_sku;
+ struct iwl_nvm_get_info_regulatory regulatory;
+} __packed; /* GRP_REGULATORY_NVM_GET_INFO_CMD_RSP_S_VER_1 */
+
+/**
+ * struct iwl_mvm_antenna_coupling_notif - antenna coupling notification
+ * @isolation: antenna isolation value
+ */
+struct iwl_mvm_antenna_coupling_notif {
+ __le32 isolation;
+} __packed;
+
#endif /* __fw_api_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
index c8712e6eea74..1602b360353c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
@@ -319,10 +319,8 @@ static void iwl_mvm_dump_fifos(struct iwl_mvm *mvm,
void iwl_mvm_free_fw_dump_desc(struct iwl_mvm *mvm)
{
- if (mvm->fw_dump_desc == &iwl_mvm_dump_desc_assert)
- return;
-
- kfree(mvm->fw_dump_desc);
+ if (mvm->fw_dump_desc != &iwl_mvm_dump_desc_assert)
+ kfree(mvm->fw_dump_desc);
mvm->fw_dump_desc = NULL;
}
@@ -640,18 +638,21 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
}
/* Make room for PRPH registers */
- for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr_comm); i++) {
- /* The range includes both boundaries */
- int num_bytes_in_chunk =
- iwl_prph_dump_addr_comm[i].end -
- iwl_prph_dump_addr_comm[i].start + 4;
-
- prph_len += sizeof(*dump_data) +
- sizeof(struct iwl_fw_error_dump_prph) +
- num_bytes_in_chunk;
+ if (!mvm->trans->cfg->gen2) {
+ for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr_comm);
+ i++) {
+ /* The range includes both boundaries */
+ int num_bytes_in_chunk =
+ iwl_prph_dump_addr_comm[i].end -
+ iwl_prph_dump_addr_comm[i].start + 4;
+
+ prph_len += sizeof(*dump_data) +
+ sizeof(struct iwl_fw_error_dump_prph) +
+ num_bytes_in_chunk;
+ }
}
- if (mvm->cfg->mq_rx_supported) {
+ if (!mvm->trans->cfg->gen2 && mvm->cfg->mq_rx_supported) {
for (i = 0; i <
ARRAY_SIZE(iwl_prph_dump_addr_9000); i++) {
/* The range includes both boundaries */
@@ -691,7 +692,8 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
}
/* Make room for fw's virtual image pages, if it exists */
- if (mvm->fw->img[mvm->cur_ucode].paging_mem_size &&
+ if (!mvm->trans->cfg->gen2 &&
+ mvm->fw->img[mvm->cur_ucode].paging_mem_size &&
mvm->fw_paging_db[0].fw_paging_block)
file_len += mvm->num_of_paging_blk *
(sizeof(*dump_data) +
@@ -704,14 +706,6 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
sizeof(*dump_info);
}
- /*
- * In 8000 HW family B-step include the ICCM (which resides separately)
- */
- if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_8000 &&
- CSR_HW_REV_STEP(mvm->trans->hw_rev) == SILICON_B_STEP)
- file_len += sizeof(*dump_data) + sizeof(*dump_mem) +
- IWL8260_ICCM_LEN;
-
if (mvm->fw_dump_desc)
file_len += sizeof(*dump_data) + sizeof(*dump_trig) +
mvm->fw_dump_desc->len;
@@ -836,21 +830,9 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
dump_data = iwl_fw_error_next_data(dump_data);
}
- if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_8000 &&
- CSR_HW_REV_STEP(mvm->trans->hw_rev) == SILICON_B_STEP) {
- dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
- dump_data->len = cpu_to_le32(IWL8260_ICCM_LEN +
- sizeof(*dump_mem));
- dump_mem = (void *)dump_data->data;
- dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM);
- dump_mem->offset = cpu_to_le32(IWL8260_ICCM_OFFSET);
- iwl_trans_read_mem_bytes(mvm->trans, IWL8260_ICCM_OFFSET,
- dump_mem->data, IWL8260_ICCM_LEN);
- dump_data = iwl_fw_error_next_data(dump_data);
- }
-
/* Dump fw's virtual image */
- if (mvm->fw->img[mvm->cur_ucode].paging_mem_size &&
+ if (!mvm->trans->cfg->gen2 &&
+ mvm->fw->img[mvm->cur_ucode].paging_mem_size &&
mvm->fw_paging_db[0].fw_paging_block) {
for (i = 1; i < mvm->num_of_paging_blk + 1; i++) {
struct iwl_fw_error_dump_paging *paging;
@@ -931,6 +913,10 @@ int iwl_mvm_fw_dbg_collect_desc(struct iwl_mvm *mvm,
if (trigger)
delay = msecs_to_jiffies(le32_to_cpu(trigger->stop_delay));
+ if (WARN(mvm->trans->state == IWL_TRANS_NO_FW,
+ "Can't collect dbg data when FW isn't alive\n"))
+ return -EIO;
+
if (test_and_set_bit(IWL_MVM_STATUS_DUMPING_FW_LOG, &mvm->status))
return -EBUSY;
@@ -943,7 +929,7 @@ int iwl_mvm_fw_dbg_collect_desc(struct iwl_mvm *mvm,
mvm->fw_dump_desc = desc;
mvm->fw_dump_trig = trigger;
- queue_delayed_work(system_wq, &mvm->fw_dump_wk, delay);
+ schedule_delayed_work(&mvm->fw_dump_wk, delay);
return 0;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index e6c9528eeeda..24cc406d87ef 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -384,20 +384,23 @@ static int iwl_save_fw_paging(struct iwl_mvm *mvm,
/* send paging cmd to FW in case CPU2 has paging image */
static int iwl_send_paging_cmd(struct iwl_mvm *mvm, const struct fw_img *fw)
{
- struct iwl_fw_paging_cmd paging_cmd = {
- .flags =
+ union {
+ struct iwl_fw_paging_cmd v2;
+ struct iwl_fw_paging_cmd_v1 v1;
+ } paging_cmd = {
+ .v2.flags =
cpu_to_le32(PAGING_CMD_IS_SECURED |
PAGING_CMD_IS_ENABLED |
(mvm->num_of_pages_in_last_blk <<
PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS)),
- .block_size = cpu_to_le32(BLOCK_2_EXP_SIZE),
- .block_num = cpu_to_le32(mvm->num_of_paging_blk),
+ .v2.block_size = cpu_to_le32(BLOCK_2_EXP_SIZE),
+ .v2.block_num = cpu_to_le32(mvm->num_of_paging_blk),
};
- int blk_idx, size = sizeof(paging_cmd);
+ int blk_idx, size = sizeof(paging_cmd.v2);
/* A bit hard coded - but this is the old API and will be deprecated */
if (!iwl_mvm_has_new_tx_api(mvm))
- size -= NUM_OF_FW_PAGING_BLOCKS * 4;
+ size = sizeof(paging_cmd.v1);
/* loop for for all paging blocks + CSS block */
for (blk_idx = 0; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) {
@@ -408,11 +411,11 @@ static int iwl_send_paging_cmd(struct iwl_mvm *mvm, const struct fw_img *fw)
if (iwl_mvm_has_new_tx_api(mvm)) {
__le64 phy_addr = cpu_to_le64(addr);
- paging_cmd.device_phy_addr.addr64[blk_idx] = phy_addr;
+ paging_cmd.v2.device_phy_addr[blk_idx] = phy_addr;
} else {
__le32 phy_addr = cpu_to_le32(addr);
- paging_cmd.device_phy_addr.addr32[blk_idx] = phy_addr;
+ paging_cmd.v1.device_phy_addr[blk_idx] = phy_addr;
}
}
@@ -619,7 +622,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
if (WARN_ON(!fw))
return -EINVAL;
mvm->cur_ucode = ucode_type;
- mvm->ucode_loaded = false;
+ clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
iwl_init_notification_wait(&mvm->notif_wait, &alive_wait,
alive_cmd, ARRAY_SIZE(alive_cmd),
@@ -641,12 +644,12 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
if (ret) {
struct iwl_trans *trans = mvm->trans;
- if (trans->cfg->gen2)
+ if (trans->cfg->device_family == IWL_DEVICE_FAMILY_A000)
IWL_ERR(mvm,
"SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
iwl_read_prph(trans, UMAG_SB_CPU_1_STATUS),
iwl_read_prph(trans, UMAG_SB_CPU_2_STATUS));
- else if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
+ else if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000)
IWL_ERR(mvm,
"SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
iwl_read_prph(trans, SB_CPU_1_STATUS),
@@ -693,7 +696,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
for (i = 0; i < IEEE80211_MAX_QUEUES; i++)
atomic_set(&mvm->mac80211_queue_stop_count[i], 0);
- mvm->ucode_loaded = true;
+ set_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
return 0;
}
@@ -738,9 +741,13 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
goto error;
}
- /* Read the NVM only at driver load time, no need to do this twice */
- if (read_nvm) {
- /* Read nvm */
+ /* Load NVM to NIC if needed */
+ if (mvm->nvm_file_name) {
+ iwl_mvm_read_external_nvm(mvm);
+ iwl_mvm_load_nvm_to_nic(mvm);
+ }
+
+ if (IWL_MVM_PARSE_NVM && read_nvm) {
ret = iwl_nvm_init(mvm, true);
if (ret) {
IWL_ERR(mvm, "Failed to read NVM: %d\n", ret);
@@ -748,14 +755,6 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
}
}
- /* In case we read the NVM from external file, load it to the NIC */
- if (mvm->nvm_file_name)
- iwl_mvm_load_nvm_to_nic(mvm);
-
- ret = iwl_nvm_check_version(mvm->nvm_data, mvm->trans);
- if (WARN_ON(ret))
- goto error;
-
ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(REGULATORY_AND_NVM_GROUP,
NVM_ACCESS_COMPLETE), 0,
sizeof(nvm_complete), &nvm_complete);
@@ -766,8 +765,21 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
}
/* We wait for the INIT complete notification */
- return iwl_wait_notification(&mvm->notif_wait, &init_wait,
- MVM_UCODE_ALIVE_TIMEOUT);
+ ret = iwl_wait_notification(&mvm->notif_wait, &init_wait,
+ MVM_UCODE_ALIVE_TIMEOUT);
+ if (ret)
+ return ret;
+
+ /* Read the NVM only at driver load time, no need to do this twice */
+ if (!IWL_MVM_PARSE_NVM && read_nvm) {
+ ret = iwl_mvm_nvm_get_from_fw(mvm);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to read NVM: %d\n", ret);
+ return ret;
+ }
+ }
+
+ return 0;
error:
iwl_remove_notification(&mvm->notif_wait, &init_wait);
@@ -1627,7 +1639,8 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
IWL_DEBUG_INFO(mvm, "RT uCode started.\n");
return 0;
error:
- iwl_mvm_stop_device(mvm);
+ if (!iwlmvm_mod_params.init_dbg)
+ iwl_mvm_stop_device(mvm);
return ret;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/led.c b/drivers/net/wireless/intel/iwlwifi/mvm/led.c
index 1e51fbe95f7c..3cac4278a5fd 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/led.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/led.c
@@ -123,14 +123,17 @@ int iwl_mvm_leds_init(struct iwl_mvm *mvm)
return ret;
}
+ mvm->init_status |= IWL_MVM_INIT_STATUS_LEDS_INIT_COMPLETE;
return 0;
}
void iwl_mvm_leds_exit(struct iwl_mvm *mvm)
{
- if (iwlwifi_mod_params.led_mode == IWL_LED_DISABLE)
+ if (iwlwifi_mod_params.led_mode == IWL_LED_DISABLE ||
+ !(mvm->init_status & IWL_MVM_INIT_STATUS_LEDS_INIT_COMPLETE))
return;
led_classdev_unregister(&mvm->led);
kfree(mvm->led.name);
+ mvm->init_status &= ~IWL_MVM_INIT_STATUS_LEDS_INIT_COMPLETE;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index 15d13017c1df..71f23ac608eb 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -846,6 +846,8 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm,
cpu_to_le64(vif->bss_conf.sync_tsf + dtim_offs);
ctxt_sta->dtim_time =
cpu_to_le32(vif->bss_conf.sync_device_ts + dtim_offs);
+ ctxt_sta->assoc_beacon_arrive_time =
+ cpu_to_le32(vif->bss_conf.sync_device_ts);
IWL_DEBUG_INFO(mvm, "DTIM TBTT is 0x%llx/0x%x, offset %d\n",
le64_to_cpu(ctxt_sta->dtim_tsf),
@@ -1457,6 +1459,7 @@ void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
beacon_notify_hdr = &beacon->beacon_notify_hdr;
mvm->ap_last_beacon_gp2 = le32_to_cpu(beacon->gp2);
+ mvm->ibss_manager = beacon->ibss_mgr_status != 0;
agg_status = iwl_mvm_get_agg_status(mvm, beacon_notify_hdr);
status = le16_to_cpu(agg_status->status) & TX_STATUS_MSK;
@@ -1632,9 +1635,9 @@ void iwl_mvm_channel_switch_noa_notif(struct iwl_mvm *mvm,
IWL_DEBUG_INFO(mvm, "Channel Switch Started Notification\n");
- queue_delayed_work(system_wq, &mvm->cs_tx_unblock_dwork,
- msecs_to_jiffies(IWL_MVM_CS_UNBLOCK_TX_TIMEOUT *
- csa_vif->bss_conf.beacon_int));
+ schedule_delayed_work(&mvm->cs_tx_unblock_dwork,
+ msecs_to_jiffies(IWL_MVM_CS_UNBLOCK_TX_TIMEOUT *
+ csa_vif->bss_conf.beacon_int));
ieee80211_csa_finish(csa_vif);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index a67aa1f5a51c..d04a88c6c593 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -735,6 +735,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
ret = ieee80211_register_hw(mvm->hw);
if (ret)
iwl_mvm_leds_exit(mvm);
+ mvm->init_status |= IWL_MVM_INIT_STATUS_REG_HW_INIT_COMPLETE;
if (mvm->cfg->vht_mu_mimo_supported)
wiphy_ext_feature_set(hw->wiphy,
@@ -1243,6 +1244,17 @@ static void iwl_mvm_mac_stop(struct ieee80211_hw *hw)
flush_work(&mvm->d0i3_exit_work);
flush_work(&mvm->async_handlers_wk);
flush_work(&mvm->add_stream_wk);
+
+ /*
+ * Lock and clear the firmware running bit here already, so that
+ * new commands coming in elsewhere, e.g. from debugfs, will not
+ * be able to proceed. This is important here because one of those
+ * debugfs files causes the fw_dump_wk to be triggered, and if we
+ * don't stop debugfs accesses before canceling that it could be
+ * retriggered after we flush it but before we've cleared the bit.
+ */
+ clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
+
cancel_delayed_work_sync(&mvm->fw_dump_wk);
cancel_delayed_work_sync(&mvm->cs_tx_unblock_dwork);
cancel_delayed_work_sync(&mvm->scan_timeout_dwork);
@@ -1451,7 +1463,7 @@ static void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm,
{
u32 tfd_msk = iwl_mvm_mac_get_queues_mask(vif);
- if (tfd_msk) {
+ if (tfd_msk && !iwl_mvm_is_dqa_supported(mvm)) {
/*
* mac80211 first removes all the stations of the vif and
* then removes the vif. When it removes a station it also
@@ -1460,6 +1472,8 @@ static void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm,
* of these AMPDU sessions are properly closed.
* We still need to take care of the shared queues of the vif.
* Flush them here.
+ * For DQA mode there is no need - broacast and multicast queue
+ * are flushed separately.
*/
mutex_lock(&mvm->mutex);
iwl_mvm_flush_tx_path(mvm, tfd_msk, 0);
@@ -1974,14 +1988,32 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
WARN_ONCE(iwl_mvm_sf_update(mvm, vif, false),
"Failed to update SF upon disassociation\n");
- /* remove AP station now that the MAC is unassoc */
- ret = iwl_mvm_rm_sta_id(mvm, vif, mvmvif->ap_sta_id);
- if (ret)
- IWL_ERR(mvm, "failed to remove AP station\n");
+ /*
+ * If we get an assert during the connection (after the
+ * station has been added, but before the vif is set
+ * to associated), mac80211 will re-add the station and
+ * then configure the vif. Since the vif is not
+ * associated, we would remove the station here and
+ * this would fail the recovery.
+ */
+ if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART,
+ &mvm->status)) {
+ /*
+ * Remove AP station now that
+ * the MAC is unassoc
+ */
+ ret = iwl_mvm_rm_sta_id(mvm, vif,
+ mvmvif->ap_sta_id);
+ if (ret)
+ IWL_ERR(mvm,
+ "failed to remove AP station\n");
+
+ if (mvm->d0i3_ap_sta_id == mvmvif->ap_sta_id)
+ mvm->d0i3_ap_sta_id =
+ IWL_MVM_INVALID_STA;
+ mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
+ }
- if (mvm->d0i3_ap_sta_id == mvmvif->ap_sta_id)
- mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
- mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
/* remove quota for this interface */
ret = iwl_mvm_update_quotas(mvm, false, NULL);
if (ret)
@@ -2381,7 +2413,7 @@ static void __iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
__set_bit(tid_data->txq_id, &txqs);
- if (iwl_mvm_tid_queued(tid_data) == 0)
+ if (iwl_mvm_tid_queued(mvm, tid_data) == 0)
continue;
__set_bit(tid, &tids);
@@ -2869,7 +2901,8 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
case WLAN_CIPHER_SUITE_CCMP:
case WLAN_CIPHER_SUITE_GCMP:
case WLAN_CIPHER_SUITE_GCMP_256:
- key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
+ if (!iwl_mvm_has_new_tx_api(mvm))
+ key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
break;
case WLAN_CIPHER_SUITE_AES_CMAC:
case WLAN_CIPHER_SUITE_BIP_GMAC_128:
@@ -2915,8 +2948,13 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
ret = -EOPNOTSUPP;
else
ret = 0;
- key->hw_key_idx = STA_KEY_IDX_INVALID;
- break;
+
+ if (key->cipher != WLAN_CIPHER_SUITE_GCMP &&
+ key->cipher != WLAN_CIPHER_SUITE_GCMP_256 &&
+ !iwl_mvm_has_new_tx_api(mvm)) {
+ key->hw_key_idx = STA_KEY_IDX_INVALID;
+ break;
+ }
}
/* During FW restart, in order to restore the state as it was,
@@ -3717,6 +3755,13 @@ static int iwl_mvm_switch_vif_chanctx(struct ieee80211_hw *hw,
return ret;
}
+static int iwl_mvm_tx_last_beacon(struct ieee80211_hw *hw)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ return mvm->ibss_manager;
+}
+
static int iwl_mvm_set_tim(struct ieee80211_hw *hw,
struct ieee80211_sta *sta,
bool set)
@@ -3988,21 +4033,23 @@ static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
/* make sure only TDLS peers or the AP are flushed */
WARN_ON(i != mvmvif->ap_sta_id && !sta->tdls);
- msk |= mvmsta->tfd_queue_msk;
+ if (drop) {
+ if (iwl_mvm_flush_sta(mvm, mvmsta, false, 0))
+ IWL_ERR(mvm, "flush request fail\n");
+ } else {
+ msk |= mvmsta->tfd_queue_msk;
+ if (iwl_mvm_has_new_tx_api(mvm))
+ iwl_mvm_wait_sta_queues_empty(mvm, mvmsta);
+ }
}
- if (drop) {
- if (iwl_mvm_flush_tx_path(mvm, msk, 0))
- IWL_ERR(mvm, "flush request fail\n");
- mutex_unlock(&mvm->mutex);
- } else {
- mutex_unlock(&mvm->mutex);
+ mutex_unlock(&mvm->mutex);
- /* this can take a while, and we may need/want other operations
- * to succeed while doing this, so do it without the mutex held
- */
+ /* this can take a while, and we may need/want other operations
+ * to succeed while doing this, so do it without the mutex held
+ */
+ if (!drop && !iwl_mvm_has_new_tx_api(mvm))
iwl_trans_wait_tx_queues_empty(mvm->trans, msk);
- }
}
static int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx,
@@ -4023,7 +4070,7 @@ static int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx,
mutex_lock(&mvm->mutex);
- if (mvm->ucode_loaded) {
+ if (iwl_mvm_firmware_running(mvm)) {
ret = iwl_mvm_request_statistics(mvm, false);
if (ret)
goto out;
@@ -4248,11 +4295,13 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
goto out;
}
- if (notif->sync)
+ if (notif->sync) {
ret = wait_event_timeout(mvm->rx_sync_waitq,
- atomic_read(&mvm->queue_sync_counter) == 0,
+ atomic_read(&mvm->queue_sync_counter) == 0 ||
+ iwl_mvm_is_radio_killed(mvm),
HZ);
- WARN_ON_ONCE(!ret);
+ WARN_ON_ONCE(!ret && !iwl_mvm_is_radio_killed(mvm));
+ }
out:
atomic_set(&mvm->queue_sync_counter, 0);
@@ -4316,6 +4365,8 @@ const struct ieee80211_ops iwl_mvm_hw_ops = {
.join_ibss = iwl_mvm_start_ap_ibss,
.leave_ibss = iwl_mvm_stop_ap_ibss,
+ .tx_last_beacon = iwl_mvm_tx_last_beacon,
+
.set_tim = iwl_mvm_set_tim,
.channel_switch = iwl_mvm_channel_switch,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 52f8d7a6a7dc..3b1f15873034 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -754,6 +754,8 @@ struct iwl_mvm {
struct work_struct roc_done_wk;
+ unsigned long init_status;
+
unsigned long status;
u32 queue_sync_cookie;
@@ -765,7 +767,6 @@ struct iwl_mvm {
struct iwl_mvm_vif *bf_allowed_vif;
enum iwl_ucode_type cur_ucode;
- bool ucode_loaded;
bool hw_registered;
bool calibrating;
u32 error_event_table[2];
@@ -1020,6 +1021,9 @@ struct iwl_mvm {
/* system time of last beacon (for AP/GO interface) */
u32 ap_last_beacon_gp2;
+ /* indicates that we transmitted the last beacon */
+ bool ibss_manager;
+
bool lar_regdom_set;
enum iwl_mcc_source mcc_src;
@@ -1077,6 +1081,18 @@ struct iwl_mvm {
#define IWL_MAC80211_GET_MVM(_hw) \
IWL_OP_MODE_GET_MVM((struct iwl_op_mode *)((_hw)->priv))
+/**
+ * enum iwl_mvm_status - MVM status bits
+ * @IWL_MVM_STATUS_HW_RFKILL: HW RF-kill is asserted
+ * @IWL_MVM_STATUS_HW_CTKILL: CT-kill is active
+ * @IWL_MVM_STATUS_ROC_RUNNING: remain-on-channel is running
+ * @IWL_MVM_STATUS_IN_HW_RESTART: HW restart is active
+ * @IWL_MVM_STATUS_IN_D0I3: NIC is in D0i3
+ * @IWL_MVM_STATUS_ROC_AUX_RUNNING: AUX remain-on-channel is running
+ * @IWL_MVM_STATUS_D3_RECONFIG: D3 reconfiguration is being done
+ * @IWL_MVM_STATUS_DUMPING_FW_LOG: FW log is being dumped
+ * @IWL_MVM_STATUS_FIRMWARE_RUNNING: firmware is running
+ */
enum iwl_mvm_status {
IWL_MVM_STATUS_HW_RFKILL,
IWL_MVM_STATUS_HW_CTKILL,
@@ -1086,6 +1102,15 @@ enum iwl_mvm_status {
IWL_MVM_STATUS_ROC_AUX_RUNNING,
IWL_MVM_STATUS_D3_RECONFIG,
IWL_MVM_STATUS_DUMPING_FW_LOG,
+ IWL_MVM_STATUS_FIRMWARE_RUNNING,
+};
+
+/* Keep track of completed init configuration */
+enum iwl_mvm_init_status {
+ IWL_MVM_INIT_STATUS_THERMAL_INIT_COMPLETE = BIT(0),
+ IWL_MVM_INIT_STATUS_LEDS_INIT_COMPLETE = BIT(1),
+ IWL_MVM_INIT_STATUS_REG_HW_INIT_COMPLETE = BIT(2),
+ IWL_MVM_INIT_STATUS_TOF_INIT_COMPLETE = BIT(3),
};
static inline bool iwl_mvm_is_radio_killed(struct iwl_mvm *mvm)
@@ -1099,6 +1124,11 @@ static inline bool iwl_mvm_is_radio_hw_killed(struct iwl_mvm *mvm)
return test_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
}
+static inline bool iwl_mvm_firmware_running(struct iwl_mvm *mvm)
+{
+ return test_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
+}
+
/* Must be called with rcu_read_lock() held and it can only be
* released when mvmsta is not needed anymore.
*/
@@ -1188,7 +1218,7 @@ static inline bool iwl_mvm_is_lar_supported(struct iwl_mvm *mvm)
* Enable LAR only if it is supported by the FW (TLV) &&
* enabled in the NVM
*/
- if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_8000)
+ if (mvm->cfg->ext_nvm)
return nvm_lar && tlv_lar;
else
return tlv_lar;
@@ -1266,14 +1296,13 @@ static inline bool iwl_mvm_is_cdb_supported(struct iwl_mvm *mvm)
IWL_UCODE_TLV_CAPA_CDB_SUPPORT);
}
-static inline struct agg_tx_status*
-iwl_mvm_get_agg_status(struct iwl_mvm *mvm,
- struct iwl_mvm_tx_resp *tx_resp)
+static inline struct agg_tx_status *
+iwl_mvm_get_agg_status(struct iwl_mvm *mvm, void *tx_resp)
{
if (iwl_mvm_has_new_tx_api(mvm))
- return &tx_resp->v6.status;
+ return &((struct iwl_mvm_tx_resp *)tx_resp)->status;
else
- return &tx_resp->v3.status;
+ return ((struct iwl_mvm_tx_resp_v3 *)tx_resp)->status;
}
static inline bool iwl_mvm_is_tt_in_fw(struct iwl_mvm *mvm)
@@ -1355,6 +1384,10 @@ const char *iwl_mvm_get_tx_fail_reason(u32 status);
static inline const char *iwl_mvm_get_tx_fail_reason(u32 status) { return ""; }
#endif
int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, u32 flags);
+int iwl_mvm_flush_sta(struct iwl_mvm *mvm, void *sta, bool internal, u32 flags);
+int iwl_mvm_flush_sta_tids(struct iwl_mvm *mvm, u32 sta_id,
+ u16 tids, u32 flags);
+
void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm);
static inline void iwl_mvm_set_tx_cmd_ccmp(struct ieee80211_tx_info *info,
@@ -1381,7 +1414,9 @@ void iwl_mvm_accu_radio_stats(struct iwl_mvm *mvm);
/* NVM */
int iwl_nvm_init(struct iwl_mvm *mvm, bool read_nvm_from_nic);
+int iwl_mvm_nvm_get_from_fw(struct iwl_mvm *mvm);
int iwl_mvm_load_nvm_to_nic(struct iwl_mvm *mvm);
+int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm);
static inline u8 iwl_mvm_get_valid_tx_ant(struct iwl_mvm *mvm)
{
@@ -1711,7 +1746,7 @@ static inline bool iwl_mvm_vif_low_latency(struct iwl_mvm_vif *mvmvif)
}
/* hw scheduler queue config */
-void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
+bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg,
unsigned int wdg_timeout);
int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue,
@@ -1755,7 +1790,7 @@ static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm)
{
if (!iwl_mvm_has_new_tx_api(mvm))
iwl_free_fw_paging(mvm);
- mvm->ucode_loaded = false;
+ clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
mvm->fw_dbg_conf = FW_DBG_INVALID;
iwl_trans_stop_device(mvm->trans);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
index 283c41df622c..dac7e542a190 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
@@ -77,7 +77,7 @@
/* Default NVM size to read */
#define IWL_NVM_DEFAULT_CHUNK_SIZE (2*1024)
#define IWL_MAX_NVM_SECTION_SIZE 0x1b58
-#define IWL_MAX_NVM_8000_SECTION_SIZE 0x1ffc
+#define IWL_MAX_EXT_NVM_SECTION_SIZE 0x1ffc
#define NVM_WRITE_OPCODE 1
#define NVM_READ_OPCODE 0
@@ -118,10 +118,6 @@ static int iwl_nvm_write_chunk(struct iwl_mvm *mvm, u16 section,
return ret;
pkt = cmd.resp_pkt;
- if (!pkt) {
- IWL_ERR(mvm, "Error in NVM_ACCESS response\n");
- return -EINVAL;
- }
/* Extract & check NVM write response */
nvm_resp = (void *)pkt->data;
if (le16_to_cpu(nvm_resp->status) != READ_NVM_CHUNK_SUCCEED) {
@@ -300,7 +296,7 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
bool lar_enabled;
/* Checking for required sections */
- if (mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) {
+ if (!mvm->trans->cfg->ext_nvm) {
if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data ||
!mvm->nvm_sections[mvm->cfg->nvm_hw_section_num].data) {
IWL_ERR(mvm, "Can't parse empty OTP/NVM sections\n");
@@ -374,7 +370,7 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
*
* 4. save as "iNVM_xxx.bin" under /lib/firmware
*/
-static int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm)
+int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm)
{
int ret, section_size;
u16 section_id;
@@ -391,19 +387,19 @@ static int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm)
#define NVM_WORD1_LEN(x) (8 * (x & 0x03FF))
#define NVM_WORD2_ID(x) (x >> 12)
-#define NVM_WORD2_LEN_FAMILY_8000(x) (2 * ((x & 0xFF) << 8 | x >> 8))
-#define NVM_WORD1_ID_FAMILY_8000(x) (x >> 4)
+#define EXT_NVM_WORD2_LEN(x) (2 * (((x) & 0xFF) << 8 | (x) >> 8))
+#define EXT_NVM_WORD1_ID(x) ((x) >> 4)
#define NVM_HEADER_0 (0x2A504C54)
#define NVM_HEADER_1 (0x4E564D2A)
#define NVM_HEADER_SIZE (4 * sizeof(u32))
IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from external NVM\n");
- /* Maximal size depends on HW family and step */
- if (mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
+ /* Maximal size depends on NVM version */
+ if (!mvm->trans->cfg->ext_nvm)
max_section_size = IWL_MAX_NVM_SECTION_SIZE;
else
- max_section_size = IWL_MAX_NVM_8000_SECTION_SIZE;
+ max_section_size = IWL_MAX_EXT_NVM_SECTION_SIZE;
/*
* Obtain NVM image via request_firmware. Since we already used
@@ -447,10 +443,9 @@ static int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm)
le32_to_cpu(dword_buff[3]));
/* nvm file validation, dword_buff[2] holds the file version */
- if ((CSR_HW_REV_STEP(mvm->trans->hw_rev) == SILICON_C_STEP &&
- le32_to_cpu(dword_buff[2]) < 0xE4A) ||
- (CSR_HW_REV_STEP(mvm->trans->hw_rev) == SILICON_B_STEP &&
- le32_to_cpu(dword_buff[2]) >= 0xE4A)) {
+ if (mvm->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000 &&
+ CSR_HW_REV_STEP(mvm->trans->hw_rev) == SILICON_C_STEP &&
+ le32_to_cpu(dword_buff[2]) < 0xE4A) {
ret = -EFAULT;
goto out;
}
@@ -472,14 +467,14 @@ static int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm)
break;
}
- if (mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) {
+ if (!mvm->trans->cfg->ext_nvm) {
section_size =
2 * NVM_WORD1_LEN(le16_to_cpu(file_sec->word1));
section_id = NVM_WORD2_ID(le16_to_cpu(file_sec->word2));
} else {
- section_size = 2 * NVM_WORD2_LEN_FAMILY_8000(
+ section_size = 2 * EXT_NVM_WORD2_LEN(
le16_to_cpu(file_sec->word2));
- section_id = NVM_WORD1_ID_FAMILY_8000(
+ section_id = EXT_NVM_WORD1_ID(
le16_to_cpu(file_sec->word1));
}
@@ -551,12 +546,105 @@ int iwl_mvm_load_nvm_to_nic(struct iwl_mvm *mvm)
return ret;
}
+int iwl_mvm_nvm_get_from_fw(struct iwl_mvm *mvm)
+{
+ struct iwl_nvm_get_info cmd = {};
+ struct iwl_nvm_get_info_rsp *rsp;
+ struct iwl_trans *trans = mvm->trans;
+ struct iwl_host_cmd hcmd = {
+ .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL,
+ .data = { &cmd, },
+ .len = { sizeof(cmd) },
+ .id = WIDE_ID(REGULATORY_AND_NVM_GROUP, NVM_GET_INFO)
+ };
+ int ret;
+ bool lar_fw_supported = !iwlwifi_mod_params.lar_disable &&
+ fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
+
+ lockdep_assert_held(&mvm->mutex);
+
+ ret = iwl_mvm_send_cmd(mvm, &hcmd);
+ if (ret)
+ return ret;
+
+ if (WARN(iwl_rx_packet_payload_len(hcmd.resp_pkt) != sizeof(*rsp),
+ "Invalid payload len in NVM response from FW %d",
+ iwl_rx_packet_payload_len(hcmd.resp_pkt))) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ rsp = (void *)hcmd.resp_pkt->data;
+ if (le32_to_cpu(rsp->general.flags)) {
+ IWL_ERR(mvm, "Invalid NVM data from FW\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ mvm->nvm_data = kzalloc(sizeof(*mvm->nvm_data) +
+ sizeof(struct ieee80211_channel) *
+ IWL_NUM_CHANNELS, GFP_KERNEL);
+ if (!mvm->nvm_data) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ iwl_set_hw_address_from_csr(trans, mvm->nvm_data);
+ /* TODO: if platform NVM has MAC address - override it here */
+
+ if (!is_valid_ether_addr(mvm->nvm_data->hw_addr)) {
+ IWL_ERR(trans, "no valid mac address was found\n");
+ ret = -EINVAL;
+ goto err_free;
+ }
+
+ IWL_INFO(trans, "base HW address: %pM\n", mvm->nvm_data->hw_addr);
+
+ /* Initialize general data */
+ mvm->nvm_data->nvm_version = le16_to_cpu(rsp->general.nvm_version);
+
+ /* Initialize MAC sku data */
+ mvm->nvm_data->sku_cap_11ac_enable =
+ le32_to_cpu(rsp->mac_sku.enable_11ac);
+ mvm->nvm_data->sku_cap_11n_enable =
+ le32_to_cpu(rsp->mac_sku.enable_11n);
+ mvm->nvm_data->sku_cap_band_24GHz_enable =
+ le32_to_cpu(rsp->mac_sku.enable_24g);
+ mvm->nvm_data->sku_cap_band_52GHz_enable =
+ le32_to_cpu(rsp->mac_sku.enable_5g);
+ mvm->nvm_data->sku_cap_mimo_disabled =
+ le32_to_cpu(rsp->mac_sku.mimo_disable);
+
+ /* Initialize PHY sku data */
+ mvm->nvm_data->valid_tx_ant = (u8)le32_to_cpu(rsp->phy_sku.tx_chains);
+ mvm->nvm_data->valid_rx_ant = (u8)le32_to_cpu(rsp->phy_sku.rx_chains);
+
+ /* Initialize regulatory data */
+ mvm->nvm_data->lar_enabled =
+ le32_to_cpu(rsp->regulatory.lar_enabled) && lar_fw_supported;
+
+ iwl_init_sbands(trans->dev, trans->cfg, mvm->nvm_data,
+ rsp->regulatory.channel_profile,
+ mvm->nvm_data->valid_tx_ant & mvm->fw->valid_tx_ant,
+ mvm->nvm_data->valid_rx_ant & mvm->fw->valid_rx_ant,
+ rsp->regulatory.lar_enabled && lar_fw_supported);
+
+ iwl_free_resp(&hcmd);
+ return 0;
+
+err_free:
+ kfree(mvm->nvm_data);
+out:
+ iwl_free_resp(&hcmd);
+ return ret;
+}
+
int iwl_nvm_init(struct iwl_mvm *mvm, bool read_nvm_from_nic)
{
int ret, section;
u32 size_read = 0;
u8 *nvm_buffer, *temp;
- const char *nvm_file_B = mvm->cfg->default_nvm_file_B_step;
const char *nvm_file_C = mvm->cfg->default_nvm_file_C_step;
if (WARN_ON_ONCE(mvm->cfg->nvm_hw_section_num >= NVM_MAX_NUM_SECTIONS))
@@ -626,14 +714,7 @@ int iwl_nvm_init(struct iwl_mvm *mvm, bool read_nvm_from_nic)
/* read External NVM file from the mod param */
ret = iwl_mvm_read_external_nvm(mvm);
if (ret) {
- /* choose the nvm_file name according to the
- * HW step
- */
- if (CSR_HW_REV_STEP(mvm->trans->hw_rev) ==
- SILICON_B_STEP)
- mvm->nvm_file_name = nvm_file_B;
- else
- mvm->nvm_file_name = nvm_file_C;
+ mvm->nvm_file_name = nvm_file_C;
if ((ret == -EFAULT || ret == -ENOENT) &&
mvm->nvm_file_name) {
@@ -704,6 +785,10 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
resp_len = sizeof(struct iwl_mcc_update_resp) +
n_channels * sizeof(__le32);
resp_cp = kmemdup(mcc_resp, resp_len, GFP_KERNEL);
+ if (!resp_cp) {
+ resp_cp = ERR_PTR(-ENOMEM);
+ goto exit;
+ }
} else {
struct iwl_mcc_update_resp_v1 *mcc_resp_v1 = (void *)pkt->data;
@@ -711,21 +796,18 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
resp_len = sizeof(struct iwl_mcc_update_resp) +
n_channels * sizeof(__le32);
resp_cp = kzalloc(resp_len, GFP_KERNEL);
-
- if (resp_cp) {
- resp_cp->status = mcc_resp_v1->status;
- resp_cp->mcc = mcc_resp_v1->mcc;
- resp_cp->cap = mcc_resp_v1->cap;
- resp_cp->source_id = mcc_resp_v1->source_id;
- resp_cp->n_channels = mcc_resp_v1->n_channels;
- memcpy(resp_cp->channels, mcc_resp_v1->channels,
- n_channels * sizeof(__le32));
+ if (!resp_cp) {
+ resp_cp = ERR_PTR(-ENOMEM);
+ goto exit;
}
- }
- if (!resp_cp) {
- ret = -ENOMEM;
- goto exit;
+ resp_cp->status = mcc_resp_v1->status;
+ resp_cp->mcc = mcc_resp_v1->mcc;
+ resp_cp->cap = mcc_resp_v1->cap;
+ resp_cp->source_id = mcc_resp_v1->source_id;
+ resp_cp->n_channels = mcc_resp_v1->n_channels;
+ memcpy(resp_cp->channels, mcc_resp_v1->channels,
+ n_channels * sizeof(__le32));
}
status = le32_to_cpu(resp_cp->status);
@@ -745,8 +827,6 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
exit:
iwl_free_resp(&cmd);
- if (ret)
- return ERR_PTR(ret);
return resp_cp;
}
@@ -758,7 +838,7 @@ int iwl_mvm_init_mcc(struct iwl_mvm *mvm)
struct ieee80211_regdomain *regd;
char mcc[3];
- if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
+ if (mvm->cfg->ext_nvm) {
tlv_lar = fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
nvm_lar = mvm->nvm_data->lar_enabled;
@@ -825,8 +905,8 @@ void iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm,
if (WARN_ON_ONCE(!iwl_mvm_is_lar_supported(mvm)))
return;
- mcc[0] = notif->mcc >> 8;
- mcc[1] = notif->mcc & 0xff;
+ mcc[0] = le16_to_cpu(notif->mcc) >> 8;
+ mcc[1] = le16_to_cpu(notif->mcc) & 0xff;
mcc[2] = '\0';
src = notif->source_id;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 3da5ec40aaea..2e4bfe9f07ec 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -7,6 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -33,7 +34,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -172,13 +173,14 @@ static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode)
~CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE);
/*
- * TODO: Bits 7-8 of CSR in 8000 HW family set the ADC sampling, and
- * shouldn't be set to any non-zero value. The same is supposed to be
- * true of the other HW, but unsetting them (such as the 7260) causes
- * automatic tests to fail on seemingly unrelated errors. Need to
- * further investigate this, but for now we'll separate cases.
+ * TODO: Bits 7-8 of CSR in 8000 HW family and higher set the ADC
+ * sampling, and shouldn't be set to any non-zero value.
+ * The same is supposed to be true of the other HW, but unsetting
+ * them (such as the 7260) causes automatic tests to fail on seemingly
+ * unrelated errors. Need to further investigate this, but for now
+ * we'll separate cases.
*/
- if (mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
+ if (mvm->trans->cfg->device_family < IWL_DEVICE_FAMILY_8000)
reg_val |= CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI;
iwl_trans_set_bits_mask(mvm->trans, CSR_HW_IF_CONFIG_REG,
@@ -483,6 +485,7 @@ static const struct iwl_hcmd_names iwl_mvm_prot_offload_names[] = {
*/
static const struct iwl_hcmd_names iwl_mvm_regulatory_and_nvm_names[] = {
HCMD_NAME(NVM_ACCESS_COMPLETE),
+ HCMD_NAME(NVM_GET_INFO),
};
static const struct iwl_hcmd_arr iwl_mvm_groups[] = {
@@ -588,6 +591,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
mvm->fw = fw;
mvm->hw = hw;
+ mvm->init_status = 0;
+
if (iwl_mvm_has_new_rx_api(mvm)) {
op_mode->ops = &iwl_mvm_ops_mq;
trans->rx_mpdu_cmd_hdr_size = sizeof(struct iwl_rx_mpdu_desc);
@@ -752,7 +757,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
iwl_mvm_unref(mvm, IWL_MVM_REF_INIT_UCODE);
mutex_unlock(&mvm->mutex);
/* returns 0 if successful, 1 if success but in rfkill */
- if (err < 0 && !iwlmvm_mod_params.init_dbg) {
+ if (err < 0) {
IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", err);
goto out_free;
}
@@ -790,12 +795,18 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
return op_mode;
out_unregister:
+ if (iwlmvm_mod_params.init_dbg)
+ return op_mode;
+
ieee80211_unregister_hw(mvm->hw);
mvm->hw_registered = false;
iwl_mvm_leds_exit(mvm);
iwl_mvm_thermal_exit(mvm);
out_free:
flush_delayed_work(&mvm->fw_dump_wk);
+
+ if (iwlmvm_mod_params.init_dbg)
+ return op_mode;
iwl_phy_db_free(mvm->phy_db);
kfree(mvm->scan_cmd);
iwl_trans_op_mode_leave(trans);
@@ -820,7 +831,10 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
iwl_mvm_thermal_exit(mvm);
- ieee80211_unregister_hw(mvm->hw);
+ if (mvm->init_status & IWL_MVM_INIT_STATUS_REG_HW_INIT_COMPLETE) {
+ ieee80211_unregister_hw(mvm->hw);
+ mvm->init_status &= ~IWL_MVM_INIT_STATUS_REG_HW_INIT_COMPLETE;
+ }
kfree(mvm->scan_cmd);
kfree(mvm->mcast_filter_cmd);
@@ -835,7 +849,7 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
iwl_phy_db_free(mvm->phy_db);
mvm->phy_db = NULL;
- iwl_free_nvm_data(mvm->nvm_data);
+ kfree(mvm->nvm_data);
for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++)
kfree(mvm->nvm_sections[i].data);
@@ -1080,6 +1094,16 @@ static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int hw_queue)
iwl_mvm_start_mac_queues(mvm, mq);
}
+static void iwl_mvm_set_rfkill_state(struct iwl_mvm *mvm)
+{
+ bool state = iwl_mvm_is_radio_killed(mvm);
+
+ if (state)
+ wake_up(&mvm->rx_sync_waitq);
+
+ wiphy_rfkill_set_hw_state(mvm->hw->wiphy, state);
+}
+
void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state)
{
if (state)
@@ -1087,7 +1111,7 @@ void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state)
else
clear_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status);
- wiphy_rfkill_set_hw_state(mvm->hw->wiphy, iwl_mvm_is_radio_killed(mvm));
+ iwl_mvm_set_rfkill_state(mvm);
}
static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
@@ -1100,7 +1124,7 @@ static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
else
clear_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
- wiphy_rfkill_set_hw_state(mvm->hw->wiphy, iwl_mvm_is_radio_killed(mvm));
+ iwl_mvm_set_rfkill_state(mvm);
/* iwl_run_init_mvm_ucode is waiting for results, abort it */
if (calibrating)
@@ -1157,9 +1181,13 @@ static void iwl_mvm_fw_error_dump_wk(struct work_struct *work)
/* start recording again if the firmware is not crashed */
if (!test_bit(STATUS_FW_ERROR, &mvm->trans->status) &&
- mvm->fw->dbg_dest_tlv)
+ mvm->fw->dbg_dest_tlv) {
iwl_clear_bits_prph(mvm->trans,
MON_BUFF_SAMPLE_CTL, 0x100);
+ iwl_clear_bits_prph(mvm->trans,
+ MON_BUFF_SAMPLE_CTL, 0x1);
+ iwl_set_bits_prph(mvm->trans, MON_BUFF_SAMPLE_CTL, 0x1);
+ }
} else {
u32 in_sample = iwl_read_prph(mvm->trans, DBGC_IN_SAMPLE);
u32 out_ctrl = iwl_read_prph(mvm->trans, DBGC_OUT_CTRL);
@@ -1299,7 +1327,7 @@ static bool iwl_mvm_disallow_offloading(struct iwl_mvm *mvm,
* for offloading in order to prevent reuse of the same
* qos seq counters.
*/
- if (iwl_mvm_tid_queued(tid_data))
+ if (iwl_mvm_tid_queued(mvm, tid_data))
continue;
if (tid_data->state != IWL_AGG_OFF)
@@ -1449,9 +1477,15 @@ int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode)
synchronize_net();
/* Flush the hw queues, in case something got queued during entry */
- ret = iwl_mvm_flush_tx_path(mvm, iwl_mvm_flushable_queues(mvm), flags);
- if (ret)
- return ret;
+ /* TODO new tx api */
+ if (iwl_mvm_has_new_tx_api(mvm)) {
+ WARN_ONCE(1, "d0i3: Need to implement flush TX queue\n");
+ } else {
+ ret = iwl_mvm_flush_tx_path(mvm, iwl_mvm_flushable_queues(mvm),
+ flags);
+ if (ret)
+ return ret;
+ }
/* configure wowlan configuration only if needed */
if (mvm->d0i3_ap_sta_id != IWL_MVM_INVALID_STA) {
@@ -1597,9 +1631,6 @@ static void iwl_mvm_d0i3_exit_work(struct work_struct *wk)
if (ret)
goto out;
- if (!get_status_cmd.resp_pkt)
- goto out;
-
status = (void *)get_status_cmd.resp_pkt->data;
wakeup_reasons = le32_to_cpu(status->wakeup_reasons);
qos_seq = status->qos_seq_ctr;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
index aa785cf3cf68..a02dda8d9ea3 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -2836,7 +2836,11 @@ static void rs_initialize_lq(struct iwl_mvm *mvm,
rs_get_initial_rate(mvm, sta, lq_sta, band, rate);
rs_init_optimal_rate(mvm, sta, lq_sta);
- WARN_ON_ONCE(rate->ant != ANT_A && rate->ant != ANT_B);
+ WARN_ONCE(rate->ant != ANT_A && rate->ant != ANT_B,
+ "ant: 0x%x, chains 0x%x, fw tx ant: 0x%x, nvm tx ant: 0x%x\n",
+ rate->ant, lq_sta->pers.chains, mvm->fw->valid_tx_ant,
+ mvm->nvm_data ? mvm->nvm_data->valid_tx_ant : ANT_INVALID);
+
tbl->column = rs_get_column_from_rate(rate);
rs_set_expected_tpt_table(lq_sta, tbl);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index cf48390f6f68..f3e608196369 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -502,7 +502,7 @@ void iwl_mvm_reorder_timer_expired(unsigned long data)
buf->sta_id, sn);
iwl_mvm_release_frames(buf->mvm, sta, NULL, buf, sn);
rcu_read_unlock();
- } else if (buf->num_stored) {
+ } else {
/*
* If no frame expired and there are stored frames, index is now
* pointing to the first unexpired frame - modify timer
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index 8d1b994ae79f..d48c2ecc0893 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -1421,8 +1421,8 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
mvm->scan_vif = iwl_mvm_vif_from_mac80211(vif);
iwl_mvm_ref(mvm, IWL_MVM_REF_SCAN);
- queue_delayed_work(system_wq, &mvm->scan_timeout_dwork,
- msecs_to_jiffies(SCAN_TIMEOUT));
+ schedule_delayed_work(&mvm->scan_timeout_dwork,
+ msecs_to_jiffies(SCAN_TIMEOUT));
return 0;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index 614d67810d05..02f35a929606 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -734,7 +734,6 @@ static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm,
spin_lock_bh(&mvmsta->lock);
mvmsta->tid_data[tid].txq_id = queue;
mvmsta->tid_data[tid].is_tid_active = true;
- mvmsta->tfd_queue_msk |= BIT(queue);
spin_unlock_bh(&mvmsta->lock);
return 0;
@@ -758,7 +757,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
bool using_inactive_queue = false, same_sta = false;
unsigned long disable_agg_tids = 0;
enum iwl_mvm_agg_state queue_state;
- bool shared_queue = false;
+ bool shared_queue = false, inc_ssn;
int ssn;
unsigned long tfd_queue_mask;
int ret;
@@ -885,8 +884,12 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
}
ssn = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
- iwl_mvm_enable_txq(mvm, queue, mac_queue, ssn, &cfg,
- wdg_timeout);
+ inc_ssn = iwl_mvm_enable_txq(mvm, queue, mac_queue,
+ ssn, &cfg, wdg_timeout);
+ if (inc_ssn) {
+ ssn = (ssn + 1) & IEEE80211_SCTL_SEQ;
+ le16_add_cpu(&hdr->seq_ctrl, 0x10);
+ }
/*
* Mark queue as shared in transport if shared
@@ -898,6 +901,13 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
spin_lock_bh(&mvmsta->lock);
+ /*
+ * This looks racy, but it is not. We have only one packet for
+ * this ra/tid in our Tx path since we stop the Qdisc when we
+ * need to allocate a new TFD queue.
+ */
+ if (inc_ssn)
+ mvmsta->tid_data[tid].seq_number += 0x10;
mvmsta->tid_data[tid].txq_id = queue;
mvmsta->tid_data[tid].is_tid_active = true;
mvmsta->tfd_queue_msk |= BIT(queue);
@@ -1590,6 +1600,29 @@ static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
}
}
+int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm,
+ struct iwl_mvm_sta *mvm_sta)
+{
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
+ u16 txq_id;
+
+ spin_lock_bh(&mvm_sta->lock);
+ txq_id = mvm_sta->tid_data[i].txq_id;
+ spin_unlock_bh(&mvm_sta->lock);
+
+ if (txq_id == IWL_MVM_INVALID_QUEUE)
+ continue;
+
+ ret = iwl_trans_wait_txq_empty(mvm->trans, txq_id);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
@@ -1611,11 +1644,17 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
if (ret)
return ret;
/* flush its queues here since we are freeing mvm_sta */
- ret = iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk, 0);
+ ret = iwl_mvm_flush_sta(mvm, mvm_sta, false, 0);
if (ret)
return ret;
- ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
- mvm_sta->tfd_queue_msk);
+ if (iwl_mvm_has_new_tx_api(mvm)) {
+ ret = iwl_mvm_wait_sta_queues_empty(mvm, mvm_sta);
+ } else {
+ u32 q_mask = mvm_sta->tfd_queue_msk;
+
+ ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
+ q_mask);
+ }
if (ret)
return ret;
ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
@@ -1964,8 +2003,6 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
mvm->probe_queue = queue;
else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
mvm->p2p_dev_queue = queue;
-
- bsta->tfd_queue_msk |= BIT(queue);
}
return 0;
@@ -1975,27 +2012,32 @@ static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ int queue;
lockdep_assert_held(&mvm->mutex);
- if (vif->type == NL80211_IFTYPE_AP ||
- vif->type == NL80211_IFTYPE_ADHOC)
- iwl_mvm_disable_txq(mvm, vif->cab_queue, vif->cab_queue,
- IWL_MAX_TID_COUNT, 0);
+ iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true, 0);
- if (mvmvif->bcast_sta.tfd_queue_msk & BIT(mvm->probe_queue)) {
- iwl_mvm_disable_txq(mvm, mvm->probe_queue,
- vif->hw_queue[0], IWL_MAX_TID_COUNT,
- 0);
- mvmvif->bcast_sta.tfd_queue_msk &= ~BIT(mvm->probe_queue);
+ switch (vif->type) {
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_ADHOC:
+ queue = mvm->probe_queue;
+ break;
+ case NL80211_IFTYPE_P2P_DEVICE:
+ queue = mvm->p2p_dev_queue;
+ break;
+ default:
+ WARN(1, "Can't free bcast queue on vif type %d\n",
+ vif->type);
+ return;
}
- if (mvmvif->bcast_sta.tfd_queue_msk & BIT(mvm->p2p_dev_queue)) {
- iwl_mvm_disable_txq(mvm, mvm->p2p_dev_queue,
- vif->hw_queue[0], IWL_MAX_TID_COUNT,
- 0);
- mvmvif->bcast_sta.tfd_queue_msk &= ~BIT(mvm->p2p_dev_queue);
- }
+ iwl_mvm_disable_txq(mvm, queue, vif->hw_queue[0], IWL_MAX_TID_COUNT, 0);
+ if (iwl_mvm_has_new_tx_api(mvm))
+ return;
+
+ WARN_ON(!(mvmvif->bcast_sta.tfd_queue_msk & BIT(queue)));
+ mvmvif->bcast_sta.tfd_queue_msk &= ~BIT(queue);
}
/* Send the FW a request to remove the station from it's internal data
@@ -2187,6 +2229,8 @@ int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
if (!iwl_mvm_is_dqa_supported(mvm))
return 0;
+ iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true, 0);
+
iwl_mvm_disable_txq(mvm, mvmvif->cab_queue, vif->cab_queue,
IWL_MAX_TID_COUNT, 0);
@@ -2496,6 +2540,7 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
{
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_mvm_tid_data *tid_data;
+ u16 normalized_ssn;
int txq_id;
int ret;
@@ -2583,7 +2628,15 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
mvmsta->sta_id, tid, txq_id, tid_data->ssn,
tid_data->next_reclaimed);
- if (tid_data->ssn == tid_data->next_reclaimed) {
+ /*
+ * In A000 HW, the next_reclaimed index is only 8 bit, so we'll need
+ * to align the wrap around of ssn so we compare relevant values.
+ */
+ normalized_ssn = tid_data->ssn;
+ if (mvm->trans->cfg->gen2)
+ normalized_ssn &= 0xff;
+
+ if (normalized_ssn == tid_data->next_reclaimed) {
tid_data->state = IWL_AGG_STARTING;
ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
} else {
@@ -2781,8 +2834,13 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
"ssn = %d, next_recl = %d\n",
tid_data->ssn, tid_data->next_reclaimed);
- /* There are still packets for this RA / TID in the HW */
- if (tid_data->ssn != tid_data->next_reclaimed) {
+ /*
+ * There are still packets for this RA / TID in the HW.
+ * Not relevant for DQA mode, since there is no need to disable
+ * the queue.
+ */
+ if (!iwl_mvm_is_dqa_supported(mvm) &&
+ tid_data->ssn != tid_data->next_reclaimed) {
tid_data->state = IWL_EMPTYING_HW_QUEUE_DELBA;
err = 0;
break;
@@ -2855,10 +2913,18 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (old_state >= IWL_AGG_ON) {
iwl_mvm_drain_sta(mvm, mvmsta, true);
- if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), 0))
- IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
- iwl_trans_wait_tx_queues_empty(mvm->trans,
- mvmsta->tfd_queue_msk);
+
+ if (iwl_mvm_has_new_tx_api(mvm)) {
+ if (iwl_mvm_flush_sta_tids(mvm, mvmsta->sta_id,
+ BIT(tid), 0))
+ IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
+ iwl_trans_wait_txq_empty(mvm->trans, txq_id);
+ } else {
+ if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), 0))
+ IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
+ iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(txq_id));
+ }
+
iwl_mvm_drain_sta(mvm, mvmsta, false);
iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
@@ -2937,7 +3003,7 @@ static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm,
}
static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
- struct iwl_mvm_sta *mvm_sta,
+ u32 sta_id,
struct ieee80211_key_conf *key, bool mcast,
u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags,
u8 key_offset)
@@ -2955,6 +3021,9 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
bool new_api = fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
+ if (sta_id == IWL_MVM_INVALID_STA)
+ return -EINVAL;
+
keyidx = (key->keyidx << STA_KEY_FLG_KEYID_POS) &
STA_KEY_FLG_KEYID_MSK;
key_flags = cpu_to_le16(keyidx);
@@ -3013,7 +3082,7 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
u.cmd.common.key_offset = key_offset;
u.cmd.common.key_flags = key_flags;
- u.cmd.common.sta_id = mvm_sta->sta_id;
+ u.cmd.common.sta_id = sta_id;
if (new_api) {
u.cmd.transmit_seq_cnt = cpu_to_le64(pn);
@@ -3146,19 +3215,37 @@ static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
u8 key_offset,
bool mcast)
{
- struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
int ret;
const u8 *addr;
struct ieee80211_key_seq seq;
u16 p1k[5];
+ u32 sta_id;
+
+ if (sta) {
+ struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+
+ sta_id = mvm_sta->sta_id;
+ } else if (vif->type == NL80211_IFTYPE_AP &&
+ !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ sta_id = mvmvif->mcast_sta.sta_id;
+ } else {
+ IWL_ERR(mvm, "Failed to find station id\n");
+ return -EINVAL;
+ }
switch (keyconf->cipher) {
case WLAN_CIPHER_SUITE_TKIP:
+ if (vif->type == NL80211_IFTYPE_AP) {
+ ret = -EINVAL;
+ break;
+ }
addr = iwl_mvm_get_mac_addr(mvm, vif, sta);
/* get phase 1 key from mac80211 */
ieee80211_get_key_rx_seq(keyconf, 0, &seq);
ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
- ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
+ ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
seq.tkip.iv32, p1k, 0, key_offset);
break;
case WLAN_CIPHER_SUITE_CCMP:
@@ -3166,11 +3253,11 @@ static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
case WLAN_CIPHER_SUITE_WEP104:
case WLAN_CIPHER_SUITE_GCMP:
case WLAN_CIPHER_SUITE_GCMP_256:
- ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
+ ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
0, NULL, 0, key_offset);
break;
default:
- ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
+ ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
0, NULL, 0, key_offset);
}
@@ -3191,6 +3278,9 @@ static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,
int ret, size;
u32 status;
+ if (sta_id == IWL_MVM_INVALID_STA)
+ return -EINVAL;
+
key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
STA_KEY_FLG_KEYID_MSK);
key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP);
@@ -3234,42 +3324,48 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
{
bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
struct iwl_mvm_sta *mvm_sta;
- u8 sta_id;
+ u8 sta_id = IWL_MVM_INVALID_STA;
int ret;
static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0};
lockdep_assert_held(&mvm->mutex);
- /* Get the station id from the mvm local station table */
- mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
- if (!mvm_sta) {
- IWL_ERR(mvm, "Failed to find station\n");
- return -EINVAL;
- }
- sta_id = mvm_sta->sta_id;
+ if (vif->type != NL80211_IFTYPE_AP ||
+ keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
+ /* Get the station id from the mvm local station table */
+ mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
+ if (!mvm_sta) {
+ IWL_ERR(mvm, "Failed to find station\n");
+ return -EINVAL;
+ }
+ sta_id = mvm_sta->sta_id;
- if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
- keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
- keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) {
- ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false);
- goto end;
- }
+ if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
+ keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
+ keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) {
+ ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id,
+ false);
+ goto end;
+ }
- /*
- * It is possible that the 'sta' parameter is NULL, and thus
- * there is a need to retrieve the sta from the local station table.
- */
- if (!sta) {
- sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
- lockdep_is_held(&mvm->mutex));
- if (IS_ERR_OR_NULL(sta)) {
- IWL_ERR(mvm, "Invalid station id\n");
- return -EINVAL;
+ /*
+ * It is possible that the 'sta' parameter is NULL, and thus
+ * there is a need to retrieve the sta from the local station
+ * table.
+ */
+ if (!sta) {
+ sta = rcu_dereference_protected(
+ mvm->fw_id_to_mac_id[sta_id],
+ lockdep_is_held(&mvm->mutex));
+ if (IS_ERR_OR_NULL(sta)) {
+ IWL_ERR(mvm, "Invalid station id\n");
+ return -EINVAL;
+ }
}
- }
- if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif))
- return -EINVAL;
+ if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif))
+ return -EINVAL;
+ }
/* If the key_offset is not pre-assigned, we need to find a
* new offset to use. In normal cases, the offset is not
@@ -3299,8 +3395,9 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
* to the same key slot (offset).
* If this fails, remove the original as well.
*/
- if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
- keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) {
+ if ((keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
+ keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) &&
+ sta) {
ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf,
key_offset, !mcast);
if (ret) {
@@ -3334,6 +3431,9 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
if (mvm_sta)
sta_id = mvm_sta->sta_id;
+ else if (!sta && vif->type == NL80211_IFTYPE_AP && mcast)
+ sta_id = iwl_mvm_vif_from_mac80211(vif)->mcast_sta.sta_id;
+
IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
keyconf->keyidx, sta_id);
@@ -3356,7 +3456,7 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
}
mvm->fw_key_deleted[keyconf->hw_key_idx] = 0;
- if (!mvm_sta) {
+ if (sta && !mvm_sta) {
IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n");
return 0;
}
@@ -3387,7 +3487,7 @@ void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
if (WARN_ON_ONCE(!mvm_sta))
goto unlock;
- iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
+ iwl_mvm_send_sta_key(mvm, mvm_sta->sta_id, keyconf, mcast,
iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx);
unlock:
@@ -3463,7 +3563,7 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
return;
}
- n_queued = iwl_mvm_tid_queued(tid_data);
+ n_queued = iwl_mvm_tid_queued(mvm, tid_data);
if (n_queued > remaining) {
more_data = true;
remaining = 0;
@@ -3645,3 +3745,17 @@ void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
rcu_read_unlock();
}
+
+u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data)
+{
+ u16 sn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
+
+ /*
+ * In A000 HW, the next_reclaimed index is only 8 bit, so we'll need
+ * to align the wrap around of ssn so we compare relevant values.
+ */
+ if (mvm->trans->cfg->gen2)
+ sn &= 0xff;
+
+ return ieee80211_sn_sub(sn, tid_data->next_reclaimed);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
index ad62b67dceb2..05fecbe87da4 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
@@ -341,12 +341,6 @@ struct iwl_mvm_tid_data {
bool is_tid_active;
};
-static inline u16 iwl_mvm_tid_queued(struct iwl_mvm_tid_data *tid_data)
-{
- return ieee80211_sn_sub(IEEE80211_SEQ_TO_SN(tid_data->seq_number),
- tid_data->next_reclaimed);
-}
-
struct iwl_mvm_key_pn {
struct rcu_head rcu_head;
struct {
@@ -447,6 +441,8 @@ struct iwl_mvm_sta {
u8 avg_energy;
};
+u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data);
+
static inline struct iwl_mvm_sta *
iwl_mvm_sta_from_mac80211(struct ieee80211_sta *sta)
{
@@ -489,6 +485,8 @@ static inline int iwl_mvm_update_sta(struct iwl_mvm *mvm,
return iwl_mvm_sta_send_to_fw(mvm, sta, true, 0);
}
+int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm,
+ struct iwl_mvm_sta *mvm_sta);
int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
index df7cd87199ea..3d97436bbdf5 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
@@ -551,8 +551,7 @@ void iwl_mvm_tdls_ch_switch_work(struct work_struct *work)
/* retry after a DTIM if we failed sending now */
delay = TU_TO_MS(vif->bss_conf.dtim_period * vif->bss_conf.beacon_int);
- queue_delayed_work(system_wq, &mvm->tdls_cs.dwork,
- msecs_to_jiffies(delay));
+ schedule_delayed_work(&mvm->tdls_cs.dwork, msecs_to_jiffies(delay));
out:
mutex_unlock(&mvm->mutex);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
index 2c12789e7550..3e4fa853b44d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
@@ -130,7 +130,10 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk)
* issue as it will have to complete before the next command is
* executed, and a new time event means a new command.
*/
- iwl_mvm_flush_tx_path(mvm, queues, CMD_ASYNC);
+ if (iwl_mvm_is_dqa_supported(mvm))
+ iwl_mvm_flush_sta(mvm, &mvm->aux_sta, true, CMD_ASYNC);
+ else
+ iwl_mvm_flush_tx_path(mvm, queues, CMD_ASYNC);
}
static void iwl_mvm_roc_finished(struct iwl_mvm *mvm)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tof.c b/drivers/net/wireless/intel/iwlwifi/mvm/tof.c
index 16ce8a56b5b9..634175b2480c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tof.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tof.c
@@ -93,17 +93,21 @@ void iwl_mvm_tof_init(struct iwl_mvm *mvm)
cpu_to_le32(TOF_RANGE_REQ_EXT_CMD);
mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID;
+ mvm->init_status |= IWL_MVM_INIT_STATUS_TOF_INIT_COMPLETE;
}
void iwl_mvm_tof_clean(struct iwl_mvm *mvm)
{
struct iwl_mvm_tof_data *tof_data = &mvm->tof_data;
- if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
+ if (!fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_TOF_SUPPORT) ||
+ !(mvm->init_status & IWL_MVM_INIT_STATUS_TOF_INIT_COMPLETE))
return;
memset(tof_data, 0, sizeof(*tof_data));
mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID;
+ mvm->init_status &= ~IWL_MVM_INIT_STATUS_TOF_INIT_COMPLETE;
}
static void iwl_tof_iterator(void *_data, u8 *mac,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
index 506d58104e1c..453a785a3ea5 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
@@ -628,7 +628,8 @@ static int iwl_mvm_tzone_get_temp(struct thermal_zone_device *device,
mutex_lock(&mvm->mutex);
- if (!mvm->ucode_loaded || !(mvm->cur_ucode == IWL_UCODE_REGULAR)) {
+ if (!iwl_mvm_firmware_running(mvm) ||
+ mvm->cur_ucode != IWL_UCODE_REGULAR) {
ret = -EIO;
goto out;
}
@@ -678,7 +679,8 @@ static int iwl_mvm_tzone_set_trip_temp(struct thermal_zone_device *device,
mutex_lock(&mvm->mutex);
- if (!mvm->ucode_loaded || !(mvm->cur_ucode == IWL_UCODE_REGULAR)) {
+ if (!iwl_mvm_firmware_running(mvm) ||
+ mvm->cur_ucode != IWL_UCODE_REGULAR) {
ret = -EIO;
goto out;
}
@@ -792,7 +794,8 @@ static int iwl_mvm_tcool_set_cur_state(struct thermal_cooling_device *cdev,
mutex_lock(&mvm->mutex);
- if (!mvm->ucode_loaded || !(mvm->cur_ucode == IWL_UCODE_REGULAR)) {
+ if (!iwl_mvm_firmware_running(mvm) ||
+ mvm->cur_ucode != IWL_UCODE_REGULAR) {
ret = -EIO;
goto unlock;
}
@@ -884,10 +887,14 @@ void iwl_mvm_thermal_initialize(struct iwl_mvm *mvm, u32 min_backoff)
iwl_mvm_cooling_device_register(mvm);
iwl_mvm_thermal_zone_register(mvm);
#endif
+ mvm->init_status |= IWL_MVM_INIT_STATUS_THERMAL_INIT_COMPLETE;
}
void iwl_mvm_thermal_exit(struct iwl_mvm *mvm)
{
+ if (!(mvm->init_status & IWL_MVM_INIT_STATUS_THERMAL_INIT_COMPLETE))
+ return;
+
cancel_delayed_work_sync(&mvm->thermal_throttle.ct_kill_exit);
IWL_DEBUG_TEMP(mvm, "Exit Thermal Throttling\n");
@@ -895,4 +902,5 @@ void iwl_mvm_thermal_exit(struct iwl_mvm *mvm)
iwl_mvm_cooling_device_unregister(mvm);
iwl_mvm_thermal_zone_unregister(mvm);
#endif
+ mvm->init_status &= ~IWL_MVM_INIT_STATUS_THERMAL_INIT_COMPLETE;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index f21901cd4a4f..c89bb453c496 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -473,15 +473,24 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
if (unlikely(!dev_cmd))
return NULL;
- memset(dev_cmd, 0, sizeof(*dev_cmd));
+ /* Make sure we zero enough of dev_cmd */
+ BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) > sizeof(*tx_cmd));
+
+ memset(dev_cmd, 0, sizeof(dev_cmd->hdr) + sizeof(*tx_cmd));
dev_cmd->hdr.cmd = TX_CMD;
if (iwl_mvm_has_new_tx_api(mvm)) {
struct iwl_tx_cmd_gen2 *cmd = (void *)dev_cmd->payload;
u16 offload_assist = iwl_mvm_tx_csum(mvm, skb, hdr, info);
+ if (ieee80211_is_data_qos(hdr->frame_control)) {
+ u8 *qc = ieee80211_get_qos_ctl(hdr);
+
+ if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT)
+ offload_assist |= BIT(TX_CMD_OFFLD_AMSDU);
+ }
+
/* padding is inserted later in transport */
- /* FIXME - check for AMSDU may need to be removed */
if (ieee80211_hdrlen(hdr->frame_control) % 4 &&
!(offload_assist & BIT(TX_CMD_OFFLD_AMSDU)))
offload_assist |= BIT(TX_CMD_OFFLD_PAD);
@@ -642,6 +651,9 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
info.control.vif->type == NL80211_IFTYPE_STATION &&
queue != mvm->aux_queue) {
queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE;
+ } else if (iwl_mvm_is_dqa_supported(mvm) &&
+ info.control.vif->type == NL80211_IFTYPE_MONITOR) {
+ queue = mvm->aux_queue;
}
}
@@ -1120,13 +1132,14 @@ static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm,
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
struct ieee80211_vif *vif = mvmsta->vif;
+ u16 normalized_ssn;
lockdep_assert_held(&mvmsta->lock);
if ((tid_data->state == IWL_AGG_ON ||
tid_data->state == IWL_EMPTYING_HW_QUEUE_DELBA ||
iwl_mvm_is_dqa_supported(mvm)) &&
- iwl_mvm_tid_queued(tid_data) == 0) {
+ iwl_mvm_tid_queued(mvm, tid_data) == 0) {
/*
* Now that this aggregation or DQA queue is empty tell
* mac80211 so it knows we no longer have frames buffered for
@@ -1135,7 +1148,15 @@ static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm,
ieee80211_sta_set_buffered(sta, tid, false);
}
- if (tid_data->ssn != tid_data->next_reclaimed)
+ /*
+ * In A000 HW, the next_reclaimed index is only 8 bit, so we'll need
+ * to align the wrap around of ssn so we compare relevant values.
+ */
+ normalized_ssn = tid_data->ssn;
+ if (mvm->trans->cfg->gen2)
+ normalized_ssn &= 0xff;
+
+ if (normalized_ssn != tid_data->next_reclaimed)
return;
switch (tid_data->state) {
@@ -1313,6 +1334,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
struct ieee80211_sta *sta;
u16 sequence = le16_to_cpu(pkt->hdr.sequence);
int txq_id = SEQ_TO_QUEUE(sequence);
+ /* struct iwl_mvm_tx_resp_v3 is almost the same */
struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
int sta_id = IWL_MVM_TX_RES_GET_RA(tx_resp->ra_tid);
int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid);
@@ -1330,7 +1352,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
__skb_queue_head_init(&skbs);
if (iwl_mvm_has_new_tx_api(mvm))
- txq_id = le16_to_cpu(tx_resp->v6.tx_queue);
+ txq_id = le16_to_cpu(tx_resp->tx_queue);
seq_ctl = le16_to_cpu(tx_resp->seq_ctl);
@@ -1479,7 +1501,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
if (mvmsta->sleep_tx_count) {
mvmsta->sleep_tx_count--;
if (mvmsta->sleep_tx_count &&
- !iwl_mvm_tid_queued(tid_data)) {
+ !iwl_mvm_tid_queued(mvm, tid_data)) {
/*
* The number of frames in the queue
* dropped to 0 even if we sent less
@@ -1860,7 +1882,7 @@ out:
IWL_DEBUG_TX_REPLY(mvm,
"BA_NOTIFICATION Received from %pM, sta_id = %d\n",
- (u8 *)&ba_notif->sta_addr_lo32, ba_notif->sta_id);
+ ba_notif->sta_addr, ba_notif->sta_id);
IWL_DEBUG_TX_REPLY(mvm,
"TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = %d, scd_ssn = %d sent:%d, acked:%d\n",
@@ -1883,14 +1905,55 @@ out:
int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, u32 flags)
{
int ret;
- struct iwl_tx_path_flush_cmd flush_cmd = {
+ struct iwl_tx_path_flush_cmd_v1 flush_cmd = {
.queues_ctl = cpu_to_le32(tfd_msk),
.flush_ctl = cpu_to_le16(DUMP_TX_FIFO_FLUSH),
};
+ WARN_ON(iwl_mvm_has_new_tx_api(mvm));
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, TXPATH_FLUSH, flags,
+ sizeof(flush_cmd), &flush_cmd);
+ if (ret)
+ IWL_ERR(mvm, "Failed to send flush command (%d)\n", ret);
+ return ret;
+}
+
+int iwl_mvm_flush_sta_tids(struct iwl_mvm *mvm, u32 sta_id,
+ u16 tids, u32 flags)
+{
+ int ret;
+ struct iwl_tx_path_flush_cmd flush_cmd = {
+ .sta_id = cpu_to_le32(sta_id),
+ .tid_mask = cpu_to_le16(tids),
+ };
+
+ WARN_ON(!iwl_mvm_has_new_tx_api(mvm));
+
ret = iwl_mvm_send_cmd_pdu(mvm, TXPATH_FLUSH, flags,
sizeof(flush_cmd), &flush_cmd);
if (ret)
IWL_ERR(mvm, "Failed to send flush command (%d)\n", ret);
return ret;
}
+
+int iwl_mvm_flush_sta(struct iwl_mvm *mvm, void *sta, bool internal, u32 flags)
+{
+ struct iwl_mvm_int_sta *int_sta = sta;
+ struct iwl_mvm_sta *mvm_sta = sta;
+
+ if (iwl_mvm_has_new_tx_api(mvm)) {
+ if (internal)
+ return iwl_mvm_flush_sta_tids(mvm, int_sta->sta_id,
+ BIT(IWL_MGMT_TID), flags);
+
+ return iwl_mvm_flush_sta_tids(mvm, mvm_sta->sta_id,
+ 0xFF, flags);
+ }
+
+ if (internal)
+ return iwl_mvm_flush_tx_path(mvm, int_sta->tfd_queue_msk,
+ flags);
+
+ return iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk, flags);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
index 8f4f176e204e..0093e78dd571 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
@@ -69,6 +69,7 @@
#include "iwl-debug.h"
#include "iwl-io.h"
#include "iwl-prph.h"
+#include "iwl-csr.h"
#include "fw-dbg.h"
#include "mvm.h"
#include "fw-api-rs.h"
@@ -168,11 +169,6 @@ int iwl_mvm_send_cmd_status(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd,
}
pkt = cmd->resp_pkt;
- /* Can happen if RFKILL is asserted */
- if (!pkt) {
- ret = 0;
- goto out_free_resp;
- }
resp_len = iwl_rx_packet_payload_len(pkt);
if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
@@ -502,6 +498,7 @@ static void iwl_mvm_dump_lmac_error_log(struct iwl_mvm *mvm, u32 base)
{
struct iwl_trans *trans = mvm->trans;
struct iwl_error_event_table table;
+ u32 val;
if (mvm->cur_ucode == IWL_UCODE_INIT) {
if (!base)
@@ -520,6 +517,36 @@ static void iwl_mvm_dump_lmac_error_log(struct iwl_mvm *mvm, u32 base)
return;
}
+ /* check if there is a HW error */
+ val = iwl_trans_read_mem32(trans, base);
+ if (((val & ~0xf) == 0xa5a5a5a0) || ((val & ~0xf) == 0x5a5a5a50)) {
+ int err;
+
+ IWL_ERR(trans, "HW error, resetting before reading\n");
+
+ /* reset the device */
+ iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
+ usleep_range(1000, 2000);
+
+ /* set INIT_DONE flag */
+ iwl_set_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+
+ /* and wait for clock stabilization */
+ if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
+ udelay(2);
+
+ err = iwl_poll_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
+ CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
+ 25000);
+ if (err < 0) {
+ IWL_DEBUG_INFO(trans,
+ "Failed to reset the card for the dump\n");
+ return;
+ }
+ }
+
iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
@@ -730,35 +757,39 @@ int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue,
return queue;
}
-void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
+bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg,
unsigned int wdg_timeout)
{
+ struct iwl_scd_txq_cfg_cmd cmd = {
+ .scd_queue = queue,
+ .action = SCD_CFG_ENABLE_QUEUE,
+ .window = cfg->frame_limit,
+ .sta_id = cfg->sta_id,
+ .ssn = cpu_to_le16(ssn),
+ .tx_fifo = cfg->fifo,
+ .aggregate = cfg->aggregate,
+ .tid = cfg->tid,
+ };
+ bool inc_ssn;
+
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
- return;
+ return false;
/* Send the enabling command if we need to */
- if (iwl_mvm_update_txq_mapping(mvm, queue, mac80211_queue,
- cfg->sta_id, cfg->tid)) {
- struct iwl_scd_txq_cfg_cmd cmd = {
- .scd_queue = queue,
- .action = SCD_CFG_ENABLE_QUEUE,
- .window = cfg->frame_limit,
- .sta_id = cfg->sta_id,
- .ssn = cpu_to_le16(ssn),
- .tx_fifo = cfg->fifo,
- .aggregate = cfg->aggregate,
- .tid = cfg->tid,
- };
-
- iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL,
- wdg_timeout);
- WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0,
- sizeof(struct iwl_scd_txq_cfg_cmd),
- &cmd),
- "Failed to configure queue %d on FIFO %d\n", queue,
- cfg->fifo);
- }
+ if (!iwl_mvm_update_txq_mapping(mvm, queue, mac80211_queue,
+ cfg->sta_id, cfg->tid))
+ return false;
+
+ inc_ssn = iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn,
+ NULL, wdg_timeout);
+ if (inc_ssn)
+ le16_add_cpu(&cmd.ssn, 1);
+
+ WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd),
+ "Failed to configure queue %d on FIFO %d\n", queue, cfg->fifo);
+
+ return inc_ssn;
}
int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
@@ -1186,7 +1217,11 @@ static void iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
/* Go over all non-active TIDs, incl. IWL_MAX_TID_COUNT (for mgmt) */
for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
/* If some TFDs are still queued - don't mark TID as inactive */
- if (iwl_mvm_tid_queued(&mvmsta->tid_data[tid]))
+ if (iwl_mvm_tid_queued(mvm, &mvmsta->tid_data[tid]))
+ tid_bitmap &= ~BIT(tid);
+
+ /* Don't mark as inactive any TID that has an active BA */
+ if (mvmsta->tid_data[tid].state != IWL_AGG_OFF)
tid_bitmap &= ~BIT(tid);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index e51760e752d4..a8fb77483313 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -538,7 +538,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
/* a000 Series */
{IWL_PCI_DEVICE(0x2720, 0x0A10, iwla000_2ac_cfg_hr_cdb)},
- {IWL_PCI_DEVICE(0x2722, 0x0A10, iwla000_2ac_cfg_hr)},
+ {IWL_PCI_DEVICE(0x34F0, 0x0310, iwla000_2ac_cfg_jf)},
#endif /* CONFIG_IWLMVM */
{0}
@@ -672,10 +672,12 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
iwl_trans->cfg = cfg_7265d;
}
- if (iwl_trans->cfg->rf_id &&
- (cfg == &iwla000_2ac_cfg_hr || cfg == &iwla000_2ac_cfg_hr_cdb) &&
- iwl_trans->hw_rf_id == CSR_HW_RF_ID_TYPE_JF) {
- cfg = &iwla000_2ac_cfg_jf;
+ if (iwl_trans->cfg->rf_id && cfg == &iwla000_2ac_cfg_hr_cdb) {
+ if (iwl_trans->hw_rf_id == CSR_HW_RF_ID_TYPE_JF)
+ cfg = &iwla000_2ac_cfg_jf;
+ else if (iwl_trans->hw_rf_id == CSR_HW_RF_ID_TYPE_HR)
+ cfg = &iwla000_2ac_cfg_hr;
+
iwl_trans->cfg = cfg;
}
#endif
@@ -764,7 +766,6 @@ static int iwl_pci_resume(struct device *device)
struct pci_dev *pdev = to_pci_dev(device);
struct iwl_trans *trans = pci_get_drvdata(pdev);
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- bool hw_rfkill;
/* Before you put code here, think about WoWLAN. You cannot check here
* whether WoWLAN is enabled or not, and your code will run even if
@@ -781,16 +782,13 @@ static int iwl_pci_resume(struct device *device)
return 0;
/*
- * Enable rfkill interrupt (in order to keep track of
- * the rfkill status). Must be locked to avoid processing
- * a possible rfkill interrupt between reading the state
- * and calling iwl_trans_pcie_rf_kill() with it.
+ * Enable rfkill interrupt (in order to keep track of the rfkill
+ * status). Must be locked to avoid processing a possible rfkill
+ * interrupt while in iwl_trans_check_hw_rf_kill().
*/
mutex_lock(&trans_pcie->mutex);
iwl_enable_rfkill_int(trans);
-
- hw_rfkill = iwl_is_rfkill_set(trans);
- iwl_trans_pcie_rf_kill(trans, hw_rfkill);
+ iwl_trans_check_hw_rf_kill(trans);
mutex_unlock(&trans_pcie->mutex);
return 0;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index fd4faaaa1484..1f4bc933f0f2 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -403,7 +403,8 @@ struct iwl_trans_pcie {
dma_addr_t ict_tbl_dma;
int ict_index;
bool use_ict;
- bool is_down;
+ bool is_down, opmode_down;
+ bool debug_rfkill;
struct isr_statistics isr_stats;
spinlock_t irq_lock;
@@ -515,7 +516,7 @@ int iwl_pcie_gen2_tx_init(struct iwl_trans *trans);
void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr);
int iwl_pcie_tx_stop(struct iwl_trans *trans);
void iwl_pcie_tx_free(struct iwl_trans *trans);
-void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int queue, u16 ssn,
+bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int queue, u16 ssn,
const struct iwl_trans_txq_scd_cfg *cfg,
unsigned int wdg_timeout);
void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue,
@@ -675,6 +676,8 @@ static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
}
}
+void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans);
+
static inline void iwl_wake_queue(struct iwl_trans *trans,
struct iwl_txq *txq)
{
@@ -713,7 +716,12 @@ static inline u8 get_cmd_index(struct iwl_txq *q, u32 index)
static inline bool iwl_is_rfkill_set(struct iwl_trans *trans)
{
- lockdep_assert_held(&IWL_TRANS_GET_PCIE_TRANS(trans)->mutex);
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ lockdep_assert_held(&trans_pcie->mutex);
+
+ if (trans_pcie->debug_rfkill)
+ return true;
return !(iwl_read32(trans, CSR_GP_CNTRL) &
CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
@@ -767,9 +775,11 @@ void iwl_pcie_apm_config(struct iwl_trans *trans);
int iwl_pcie_prepare_card_hw(struct iwl_trans *trans);
void iwl_pcie_synchronize_irqs(struct iwl_trans *trans);
bool iwl_trans_check_hw_rf_kill(struct iwl_trans *trans);
+void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans,
+ bool was_in_rfkill);
void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq);
int iwl_queue_space(const struct iwl_txq *q);
-int iwl_pcie_apm_stop_master(struct iwl_trans *trans);
+void iwl_pcie_apm_stop_master(struct iwl_trans *trans);
void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie);
int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
int slots_num, bool cmd_queue);
@@ -779,6 +789,9 @@ int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
struct iwl_dma_ptr *ptr, size_t size);
void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr);
void iwl_pcie_apply_destination(struct iwl_trans *trans);
+#ifdef CONFIG_INET
+struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len);
+#endif
/* transport gen 2 exported functions */
int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index 1da2de205cdf..a5c0f69423d2 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -845,14 +845,14 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
* Set RX DMA chunk size to 64B for IOSF and 128B for PCIe
* Default queue is 0
*/
- iwl_write_prph_no_grab(trans, RFH_GEN_CFG, RFH_GEN_CFG_RFH_DMA_SNOOP |
- (DEFAULT_RXQ_NUM <<
- RFH_GEN_CFG_DEFAULT_RXQ_NUM_POS) |
+ iwl_write_prph_no_grab(trans, RFH_GEN_CFG,
+ RFH_GEN_CFG_RFH_DMA_SNOOP |
+ RFH_GEN_CFG_VAL(DEFAULT_RXQ_NUM, 0) |
RFH_GEN_CFG_SERVICE_DMA_SNOOP |
- (trans->cfg->integrated ?
- RFH_GEN_CFG_RB_CHUNK_SIZE_64 :
- RFH_GEN_CFG_RB_CHUNK_SIZE_128) <<
- RFH_GEN_CFG_RB_CHUNK_SIZE_POS);
+ RFH_GEN_CFG_VAL(RB_CHUNK_SIZE,
+ trans->cfg->integrated ?
+ RFH_GEN_CFG_RB_CHUNK_SIZE_64 :
+ RFH_GEN_CFG_RB_CHUNK_SIZE_128));
/* Enable the relevant rx queues */
iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, enabled);
@@ -1413,18 +1413,16 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
return;
}
- local_bh_disable();
- /* The STATUS_FW_ERROR bit is set in this function. This must happen
- * before we wake up the command caller, to ensure a proper cleanup. */
- iwl_trans_fw_error(trans);
- local_bh_enable();
-
for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
if (!trans_pcie->txq[i])
continue;
del_timer(&trans_pcie->txq[i]->stuck_timer);
}
+ /* The STATUS_FW_ERROR bit is set in this function. This must happen
+ * before we wake up the command caller, to ensure a proper cleanup. */
+ iwl_trans_fw_error(trans);
+
clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
wake_up(&trans_pcie->wait_command_queue);
}
@@ -1509,6 +1507,46 @@ static u32 iwl_pcie_int_cause_ict(struct iwl_trans *trans)
return inta;
}
+void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
+ bool hw_rfkill, prev, report;
+
+ mutex_lock(&trans_pcie->mutex);
+ prev = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
+ hw_rfkill = iwl_is_rfkill_set(trans);
+ if (hw_rfkill) {
+ set_bit(STATUS_RFKILL_OPMODE, &trans->status);
+ set_bit(STATUS_RFKILL_HW, &trans->status);
+ }
+ if (trans_pcie->opmode_down)
+ report = hw_rfkill;
+ else
+ report = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
+
+ IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
+ hw_rfkill ? "disable radio" : "enable radio");
+
+ isr_stats->rfkill++;
+
+ if (prev != report)
+ iwl_trans_pcie_rf_kill(trans, report);
+ mutex_unlock(&trans_pcie->mutex);
+
+ if (hw_rfkill) {
+ if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE,
+ &trans->status))
+ IWL_DEBUG_RF_KILL(trans,
+ "Rfkill while SYNC HCMD in flight\n");
+ wake_up(&trans_pcie->wait_command_queue);
+ } else {
+ clear_bit(STATUS_RFKILL_HW, &trans->status);
+ if (trans_pcie->opmode_down)
+ clear_bit(STATUS_RFKILL_OPMODE, &trans->status);
+ }
+}
+
irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
{
struct iwl_trans *trans = dev_id;
@@ -1632,30 +1670,7 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
/* HW RF KILL switch toggled */
if (inta & CSR_INT_BIT_RF_KILL) {
- bool hw_rfkill;
-
- mutex_lock(&trans_pcie->mutex);
- hw_rfkill = iwl_is_rfkill_set(trans);
- if (hw_rfkill)
- set_bit(STATUS_RFKILL, &trans->status);
-
- IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
- hw_rfkill ? "disable radio" : "enable radio");
-
- isr_stats->rfkill++;
-
- iwl_trans_pcie_rf_kill(trans, hw_rfkill);
- mutex_unlock(&trans_pcie->mutex);
- if (hw_rfkill) {
- if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE,
- &trans->status))
- IWL_DEBUG_RF_KILL(trans,
- "Rfkill while SYNC HCMD in flight\n");
- wake_up(&trans_pcie->wait_command_queue);
- } else {
- clear_bit(STATUS_RFKILL, &trans->status);
- }
-
+ iwl_pcie_handle_rfkill_irq(trans);
handled |= CSR_INT_BIT_RF_KILL;
}
@@ -1982,31 +1997,8 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
}
/* HW RF KILL switch toggled */
- if (inta_hw & MSIX_HW_INT_CAUSES_REG_RF_KILL) {
- bool hw_rfkill;
-
- mutex_lock(&trans_pcie->mutex);
- hw_rfkill = iwl_is_rfkill_set(trans);
- if (hw_rfkill)
- set_bit(STATUS_RFKILL, &trans->status);
-
- IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
- hw_rfkill ? "disable radio" : "enable radio");
-
- isr_stats->rfkill++;
-
- iwl_trans_pcie_rf_kill(trans, hw_rfkill);
- mutex_unlock(&trans_pcie->mutex);
- if (hw_rfkill) {
- if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE,
- &trans->status))
- IWL_DEBUG_RF_KILL(trans,
- "Rfkill while SYNC HCMD in flight\n");
- wake_up(&trans_pcie->wait_command_queue);
- } else {
- clear_bit(STATUS_RFKILL, &trans->status);
- }
- }
+ if (inta_hw & MSIX_HW_INT_CAUSES_REG_RF_KILL)
+ iwl_pcie_handle_rfkill_irq(trans);
if (inta_hw & MSIX_HW_INT_CAUSES_REG_HW_ERR) {
IWL_ERR(trans,
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
index ac60a282d6de..e84c5ff389a8 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
@@ -150,7 +150,6 @@ static void iwl_pcie_gen2_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- bool hw_rfkill, was_hw_rfkill;
lockdep_assert_held(&trans_pcie->mutex);
@@ -159,8 +158,6 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power)
trans_pcie->is_down = true;
- was_hw_rfkill = iwl_is_rfkill_set(trans);
-
/* tell the device to stop sending interrupts */
iwl_disable_interrupts(trans);
@@ -217,7 +214,6 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power)
clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
clear_bit(STATUS_INT_ENABLED, &trans->status);
clear_bit(STATUS_TPOWER_PMI, &trans->status);
- clear_bit(STATUS_RFKILL, &trans->status);
/*
* Even if we stop the HW, we still want the RF kill
@@ -225,26 +221,6 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power)
*/
iwl_enable_rfkill_int(trans);
- /*
- * Check again since the RF kill state may have changed while
- * all the interrupts were disabled, in this case we couldn't
- * receive the RF kill interrupt and update the state in the
- * op_mode.
- * Don't call the op_mode if the rkfill state hasn't changed.
- * This allows the op_mode to call stop_device from the rfkill
- * notification without endless recursion. Under very rare
- * circumstances, we might have a small recursion if the rfkill
- * state changed exactly now while we were called from stop_device.
- * This is very unlikely but can happen and is supported.
- */
- hw_rfkill = iwl_is_rfkill_set(trans);
- if (hw_rfkill)
- set_bit(STATUS_RFKILL, &trans->status);
- else
- clear_bit(STATUS_RFKILL, &trans->status);
- if (hw_rfkill != was_hw_rfkill)
- iwl_trans_pcie_rf_kill(trans, hw_rfkill);
-
/* re-take ownership to prevent other users from stealing the device */
iwl_pcie_prepare_card_hw(trans);
}
@@ -252,9 +228,13 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power)
void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ bool was_in_rfkill;
mutex_lock(&trans_pcie->mutex);
+ trans_pcie->opmode_down = true;
+ was_in_rfkill = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
_iwl_trans_pcie_gen2_stop_device(trans, low_power);
+ iwl_trans_pcie_handle_stop_rfkill(trans, was_in_rfkill);
mutex_unlock(&trans_pcie->mutex);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index 93cbc7a69bcd..5778ba2278d1 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -224,9 +224,9 @@ void iwl_pcie_apm_config(struct iwl_trans *trans)
pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_DEVCTL2, &cap);
trans->ltr_enabled = cap & PCI_EXP_DEVCTL2_LTR_EN;
- dev_info(trans->dev, "L1 %sabled - LTR %sabled\n",
- (lctl & PCI_EXP_LNKCTL_ASPM_L1) ? "En" : "Dis",
- trans->ltr_enabled ? "En" : "Dis");
+ IWL_DEBUG_POWER(trans, "L1 %sabled - LTR %sabled\n",
+ (lctl & PCI_EXP_LNKCTL_ASPM_L1) ? "En" : "Dis",
+ trans->ltr_enabled ? "En" : "Dis");
}
/*
@@ -245,7 +245,7 @@ static int iwl_pcie_apm_init(struct iwl_trans *trans)
*/
/* Disable L0S exit timer (platform NMI Work/Around) */
- if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
+ if (trans->cfg->device_family < IWL_DEVICE_FAMILY_8000)
iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
@@ -448,9 +448,9 @@ static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)
~SHR_APMG_XTAL_CFG_XTAL_ON_REQ);
}
-int iwl_pcie_apm_stop_master(struct iwl_trans *trans)
+void iwl_pcie_apm_stop_master(struct iwl_trans *trans)
{
- int ret = 0;
+ int ret;
/* stop device's busmaster DMA activity */
iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
@@ -462,8 +462,6 @@ int iwl_pcie_apm_stop_master(struct iwl_trans *trans)
IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n");
IWL_DEBUG_INFO(trans, "stop master\n");
-
- return ret;
}
static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
@@ -478,7 +476,7 @@ static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
if (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000)
iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
APMG_PCIDEV_STT_VAL_WAKE_ME);
- else if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
+ else if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000) {
iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
CSR_RESET_LINK_PWR_MGMT_DISABLED);
iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
@@ -892,7 +890,7 @@ monitor:
if (dest->monitor_mode == EXTERNAL_MODE && trans_pcie->fw_mon_size) {
iwl_write_prph(trans, le32_to_cpu(dest->base_reg),
trans_pcie->fw_mon_phys >> dest->base_shift);
- if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
+ if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000)
iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
(trans_pcie->fw_mon_phys +
trans_pcie->fw_mon_size - 256) >>
@@ -996,14 +994,24 @@ static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans,
bool iwl_trans_check_hw_rf_kill(struct iwl_trans *trans)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
bool hw_rfkill = iwl_is_rfkill_set(trans);
+ bool prev = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
+ bool report;
- if (hw_rfkill)
- set_bit(STATUS_RFKILL, &trans->status);
- else
- clear_bit(STATUS_RFKILL, &trans->status);
+ if (hw_rfkill) {
+ set_bit(STATUS_RFKILL_HW, &trans->status);
+ set_bit(STATUS_RFKILL_OPMODE, &trans->status);
+ } else {
+ clear_bit(STATUS_RFKILL_HW, &trans->status);
+ if (trans_pcie->opmode_down)
+ clear_bit(STATUS_RFKILL_OPMODE, &trans->status);
+ }
- iwl_trans_pcie_rf_kill(trans, hw_rfkill);
+ report = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
+
+ if (prev != report)
+ iwl_trans_pcie_rf_kill(trans, report);
return hw_rfkill;
}
@@ -1128,7 +1136,6 @@ static void iwl_pcie_init_msix(struct iwl_trans_pcie *trans_pcie)
static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- bool hw_rfkill, was_hw_rfkill;
lockdep_assert_held(&trans_pcie->mutex);
@@ -1137,8 +1144,6 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
trans_pcie->is_down = true;
- was_hw_rfkill = iwl_is_rfkill_set(trans);
-
/* tell the device to stop sending interrupts */
iwl_disable_interrupts(trans);
@@ -1199,7 +1204,6 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
clear_bit(STATUS_INT_ENABLED, &trans->status);
clear_bit(STATUS_TPOWER_PMI, &trans->status);
- clear_bit(STATUS_RFKILL, &trans->status);
/*
* Even if we stop the HW, we still want the RF kill
@@ -1207,26 +1211,6 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
*/
iwl_enable_rfkill_int(trans);
- /*
- * Check again since the RF kill state may have changed while
- * all the interrupts were disabled, in this case we couldn't
- * receive the RF kill interrupt and update the state in the
- * op_mode.
- * Don't call the op_mode if the rkfill state hasn't changed.
- * This allows the op_mode to call stop_device from the rfkill
- * notification without endless recursion. Under very rare
- * circumstances, we might have a small recursion if the rfkill
- * state changed exactly now while we were called from stop_device.
- * This is very unlikely but can happen and is supported.
- */
- hw_rfkill = iwl_is_rfkill_set(trans);
- if (hw_rfkill)
- set_bit(STATUS_RFKILL, &trans->status);
- else
- clear_bit(STATUS_RFKILL, &trans->status);
- if (hw_rfkill != was_hw_rfkill)
- iwl_trans_pcie_rf_kill(trans, hw_rfkill);
-
/* re-take ownership to prevent other users from stealing the device */
iwl_pcie_prepare_card_hw(trans);
}
@@ -1318,7 +1302,7 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
/* Load the given image to the HW */
- if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
+ if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000)
ret = iwl_pcie_load_given_ucode_8000(trans, fw);
else
ret = iwl_pcie_load_given_ucode(trans, fw);
@@ -1339,12 +1323,45 @@ static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)
iwl_pcie_tx_start(trans, scd_addr);
}
+void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans,
+ bool was_in_rfkill)
+{
+ bool hw_rfkill;
+
+ /*
+ * Check again since the RF kill state may have changed while
+ * all the interrupts were disabled, in this case we couldn't
+ * receive the RF kill interrupt and update the state in the
+ * op_mode.
+ * Don't call the op_mode if the rkfill state hasn't changed.
+ * This allows the op_mode to call stop_device from the rfkill
+ * notification without endless recursion. Under very rare
+ * circumstances, we might have a small recursion if the rfkill
+ * state changed exactly now while we were called from stop_device.
+ * This is very unlikely but can happen and is supported.
+ */
+ hw_rfkill = iwl_is_rfkill_set(trans);
+ if (hw_rfkill) {
+ set_bit(STATUS_RFKILL_HW, &trans->status);
+ set_bit(STATUS_RFKILL_OPMODE, &trans->status);
+ } else {
+ clear_bit(STATUS_RFKILL_HW, &trans->status);
+ clear_bit(STATUS_RFKILL_OPMODE, &trans->status);
+ }
+ if (hw_rfkill != was_in_rfkill)
+ iwl_trans_pcie_rf_kill(trans, hw_rfkill);
+}
+
static void iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ bool was_in_rfkill;
mutex_lock(&trans_pcie->mutex);
+ trans_pcie->opmode_down = true;
+ was_in_rfkill = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
_iwl_trans_pcie_stop_device(trans, low_power);
+ iwl_trans_pcie_handle_stop_rfkill(trans, was_in_rfkill);
mutex_unlock(&trans_pcie->mutex);
}
@@ -1355,6 +1372,8 @@ void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state)
lockdep_assert_held(&trans_pcie->mutex);
+ IWL_WARN(trans, "reporting RF_KILL (radio %s)\n",
+ state ? "disabled" : "enabled");
if (iwl_op_mode_hw_rf_kill(trans->op_mode, state)) {
if (trans->cfg->gen2)
_iwl_trans_pcie_gen2_stop_device(trans, true);
@@ -1435,7 +1454,7 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
- if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
+ if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000)
udelay(2);
ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
@@ -1646,6 +1665,8 @@ static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
/* From now on, the op_mode will be kept updated about RF kill state */
iwl_enable_rfkill_int(trans);
+ trans_pcie->opmode_down = false;
+
/* Set is_down to false here so that...*/
trans_pcie->is_down = false;
@@ -1822,7 +1843,7 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans,
/* this bit wakes up the NIC */
__iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
- if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
+ if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000)
udelay(2);
/*
@@ -2045,17 +2066,52 @@ void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq)
iwl_read_direct32(trans, FH_TX_TRB_REG(fifo)));
}
-static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm)
+static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq;
- int cnt;
unsigned long now = jiffies;
+ u8 wr_ptr;
+
+ if (!test_bit(txq_idx, trans_pcie->queue_used))
+ return -EINVAL;
+
+ IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", txq_idx);
+ txq = trans_pcie->txq[txq_idx];
+ wr_ptr = ACCESS_ONCE(txq->write_ptr);
+
+ while (txq->read_ptr != ACCESS_ONCE(txq->write_ptr) &&
+ !time_after(jiffies,
+ now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) {
+ u8 write_ptr = ACCESS_ONCE(txq->write_ptr);
+
+ if (WARN_ONCE(wr_ptr != write_ptr,
+ "WR pointer moved while flushing %d -> %d\n",
+ wr_ptr, write_ptr))
+ return -ETIMEDOUT;
+ usleep_range(1000, 2000);
+ }
+
+ if (txq->read_ptr != txq->write_ptr) {
+ IWL_ERR(trans,
+ "fail to flush all tx fifo queues Q %d\n", txq_idx);
+ iwl_trans_pcie_log_scd_error(trans, txq);
+ return -ETIMEDOUT;
+ }
+
+ IWL_DEBUG_TX_QUEUES(trans, "Queue %d is now empty.\n", txq_idx);
+
+ return 0;
+}
+
+static int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans *trans, u32 txq_bm)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int cnt;
int ret = 0;
/* waiting for all the tx frames complete might take a while */
for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
- u8 wr_ptr;
if (cnt == trans_pcie->cmd_queue)
continue;
@@ -2064,34 +2120,11 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm)
if (!(BIT(cnt) & txq_bm))
continue;
- IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", cnt);
- txq = trans_pcie->txq[cnt];
- wr_ptr = ACCESS_ONCE(txq->write_ptr);
-
- while (txq->read_ptr != ACCESS_ONCE(txq->write_ptr) &&
- !time_after(jiffies,
- now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) {
- u8 write_ptr = ACCESS_ONCE(txq->write_ptr);
-
- if (WARN_ONCE(wr_ptr != write_ptr,
- "WR pointer moved while flushing %d -> %d\n",
- wr_ptr, write_ptr))
- return -ETIMEDOUT;
- usleep_range(1000, 2000);
- }
-
- if (txq->read_ptr != txq->write_ptr) {
- IWL_ERR(trans,
- "fail to flush all tx fifo queues Q %d\n", cnt);
- ret = -ETIMEDOUT;
+ ret = iwl_trans_pcie_wait_txq_empty(trans, cnt);
+ if (ret)
break;
- }
- IWL_DEBUG_TX_QUEUES(trans, "Queue %d is now empty.\n", cnt);
}
- if (ret)
- iwl_trans_pcie_log_scd_error(trans, txq);
-
return ret;
}
@@ -2393,17 +2426,12 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
struct iwl_trans *trans = file->private_data;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
-
- char buf[8];
- int buf_size;
u32 reset_flag;
+ int ret;
- memset(buf, 0, sizeof(buf));
- buf_size = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
- if (sscanf(buf, "%x", &reset_flag) != 1)
- return -EFAULT;
+ ret = kstrtou32_from_user(user_buf, count, 16, &reset_flag);
+ if (ret)
+ return ret;
if (reset_flag == 0)
memset(isr_stats, 0, sizeof(*isr_stats));
@@ -2415,16 +2443,6 @@ static ssize_t iwl_dbgfs_csr_write(struct file *file,
size_t count, loff_t *ppos)
{
struct iwl_trans *trans = file->private_data;
- char buf[8];
- int buf_size;
- int csr;
-
- memset(buf, 0, sizeof(buf));
- buf_size = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
- if (sscanf(buf, "%d", &csr) != 1)
- return -EFAULT;
iwl_pcie_dump_csr(trans);
@@ -2449,11 +2467,50 @@ static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
return ret;
}
+static ssize_t iwl_dbgfs_rfkill_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_trans *trans = file->private_data;
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ char buf[100];
+ int pos;
+
+ pos = scnprintf(buf, sizeof(buf), "debug: %d\nhw: %d\n",
+ trans_pcie->debug_rfkill,
+ !(iwl_read32(trans, CSR_GP_CNTRL) &
+ CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW));
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_rfkill_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_trans *trans = file->private_data;
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ bool old = trans_pcie->debug_rfkill;
+ int ret;
+
+ ret = kstrtobool_from_user(user_buf, count, &trans_pcie->debug_rfkill);
+ if (ret)
+ return ret;
+ if (old == trans_pcie->debug_rfkill)
+ return count;
+ IWL_WARN(trans, "changing debug rfkill %d->%d\n",
+ old, trans_pcie->debug_rfkill);
+ iwl_pcie_handle_rfkill_irq(trans);
+
+ return count;
+}
+
DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
DEBUGFS_READ_FILE_OPS(fh_reg);
DEBUGFS_READ_FILE_OPS(rx_queue);
DEBUGFS_READ_FILE_OPS(tx_queue);
DEBUGFS_WRITE_FILE_OPS(csr);
+DEBUGFS_READ_WRITE_FILE_OPS(rfkill);
/* Create the debugfs files and directories */
int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans)
@@ -2465,6 +2522,7 @@ int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans)
DEBUGFS_ADD_FILE(interrupt, dir, S_IWUSR | S_IRUSR);
DEBUGFS_ADD_FILE(csr, dir, S_IWUSR);
DEBUGFS_ADD_FILE(fh_reg, dir, S_IRUSR);
+ DEBUGFS_ADD_FILE(rfkill, dir, S_IWUSR | S_IRUSR);
return 0;
err:
@@ -2563,8 +2621,15 @@ static u32 iwl_trans_pcie_fh_regs_dump(struct iwl_trans *trans,
(*data)->len = cpu_to_le32(fh_regs_len);
val = (void *)(*data)->data;
- for (i = FH_MEM_LOWER_BOUND; i < FH_MEM_UPPER_BOUND; i += sizeof(u32))
- *val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i));
+ if (!trans->cfg->gen2)
+ for (i = FH_MEM_LOWER_BOUND; i < FH_MEM_UPPER_BOUND;
+ i += sizeof(u32))
+ *val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i));
+ else
+ for (i = FH_MEM_LOWER_BOUND_GEN2; i < FH_MEM_UPPER_BOUND_GEN2;
+ i += sizeof(u32))
+ *val++ = cpu_to_le32(iwl_trans_pcie_read_prph(trans,
+ i));
iwl_trans_release_nic_access(trans, &flags);
@@ -2714,7 +2779,7 @@ static struct iwl_trans_dump_data
trans->dbg_dest_tlv->end_shift;
/* Make "end" point to the actual end */
- if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000 ||
+ if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000 ||
trans->dbg_dest_tlv->monitor_mode == MARBH_MODE)
end += (1 << trans->dbg_dest_tlv->end_shift);
monitor_len = end - base;
@@ -2740,7 +2805,12 @@ static struct iwl_trans_dump_data
len += sizeof(*data) + IWL_CSR_TO_DUMP;
/* FH registers */
- len += sizeof(*data) + (FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND);
+ if (trans->cfg->gen2)
+ len += sizeof(*data) +
+ (FH_MEM_UPPER_BOUND_GEN2 - FH_MEM_LOWER_BOUND_GEN2);
+ else
+ len += sizeof(*data) +
+ (FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND);
if (dump_rbs) {
/* Dump RBs is supported only for pre-9000 devices (1 queue) */
@@ -2754,6 +2824,13 @@ static struct iwl_trans_dump_data
(PAGE_SIZE << trans_pcie->rx_page_order));
}
+ /* Paged memory for gen2 HW */
+ if (trans->cfg->gen2)
+ for (i = 0; i < trans_pcie->init_dram.paging_cnt; i++)
+ len += sizeof(*data) +
+ sizeof(struct iwl_fw_error_dump_paging) +
+ trans_pcie->init_dram.paging[i].size;
+
dump_data = vzalloc(len);
if (!dump_data)
return NULL;
@@ -2793,6 +2870,28 @@ static struct iwl_trans_dump_data
if (dump_rbs)
len += iwl_trans_pcie_dump_rbs(trans, &data, num_rbs);
+ /* Paged memory for gen2 HW */
+ if (trans->cfg->gen2) {
+ for (i = 0; i < trans_pcie->init_dram.paging_cnt; i++) {
+ struct iwl_fw_error_dump_paging *paging;
+ dma_addr_t addr =
+ trans_pcie->init_dram.paging[i].physical;
+ u32 page_len = trans_pcie->init_dram.paging[i].size;
+
+ data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING);
+ data->len = cpu_to_le32(sizeof(*paging) + page_len);
+ paging = (void *)data->data;
+ paging->index = cpu_to_le32(i);
+ dma_sync_single_for_cpu(trans->dev, addr, page_len,
+ DMA_BIDIRECTIONAL);
+ memcpy(paging->data,
+ trans_pcie->init_dram.paging[i].block, page_len);
+ data = iwl_fw_error_next_data(data);
+
+ len += sizeof(*data) + sizeof(*paging) + page_len;
+ }
+ }
+
len += iwl_trans_pcie_dump_monitor(trans, &data, monitor_len);
dump_data->len = len;
@@ -2835,7 +2934,6 @@ static void iwl_trans_pcie_resume(struct iwl_trans *trans)
.ref = iwl_trans_pcie_ref, \
.unref = iwl_trans_pcie_unref, \
.dump_data = iwl_trans_pcie_dump_data, \
- .wait_tx_queues_empty = iwl_trans_pcie_wait_txq_empty, \
.d3_suspend = iwl_trans_pcie_d3_suspend, \
.d3_resume = iwl_trans_pcie_d3_resume
@@ -2865,6 +2963,8 @@ static const struct iwl_trans_ops trans_ops_pcie = {
.txq_set_shared_mode = iwl_trans_pcie_txq_set_shared_mode,
+ .wait_tx_queues_empty = iwl_trans_pcie_wait_txqs_empty,
+
.freeze_txq_timer = iwl_trans_pcie_freeze_txq_timer,
.block_txq_ptrs = iwl_trans_pcie_block_txq_ptrs,
};
@@ -2884,6 +2984,7 @@ static const struct iwl_trans_ops trans_ops_pcie_gen2 = {
.txq_alloc = iwl_trans_pcie_dyn_txq_alloc,
.txq_free = iwl_trans_pcie_dyn_txq_free,
+ .wait_txq_empty = iwl_trans_pcie_wait_txq_empty,
};
struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
@@ -2910,6 +3011,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
trans_pcie->trans = trans;
+ trans_pcie->opmode_down = true;
spin_lock_init(&trans_pcie->irq_lock);
spin_lock_init(&trans_pcie->reg_lock);
mutex_init(&trans_pcie->mutex);
@@ -2988,7 +3090,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
* "dash" value). To keep hw_rev backwards compatible - we'll store it
* in the old format.
*/
- if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
+ if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000) {
unsigned long flags;
trans->hw_rev = (trans->hw_rev & 0xfff0) |
@@ -3032,6 +3134,17 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
}
}
+ /*
+ * 9000-series integrated A-step has a problem with suspend/resume
+ * and sometimes even causes the whole platform to get stuck. This
+ * workaround makes the hardware not go into the problematic state.
+ */
+ if (trans->cfg->integrated &&
+ trans->cfg->device_family == IWL_DEVICE_FAMILY_9000 &&
+ CSR_HW_REV_STEP(trans->hw_rev) == SILICON_A_STEP)
+ iwl_set_bit(trans, CSR_HOST_CHICKEN,
+ CSR_HOST_CHICKEN_PM_IDLE_SRC_DIS_SB_PME);
+
trans->hw_rf_id = iwl_read32(trans, CSR_HW_RF_ID);
iwl_pcie_set_interrupt_capa(pdev, trans);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
index 9c9bfbbabdf1..a3795ba0d7b9 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
@@ -49,6 +49,7 @@
*
*****************************************************************************/
#include <linux/pm_runtime.h>
+#include <net/tso.h>
#include "iwl-debug.h"
#include "iwl-csr.h"
@@ -226,6 +227,143 @@ static int iwl_pcie_gen2_set_tb(struct iwl_trans *trans,
return idx;
}
+static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
+ struct sk_buff *skb,
+ struct iwl_tfh_tfd *tfd, int start_len,
+ u8 hdr_len, struct iwl_device_cmd *dev_cmd)
+{
+#ifdef CONFIG_INET
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
+ unsigned int mss = skb_shinfo(skb)->gso_size;
+ u16 length, iv_len, amsdu_pad;
+ u8 *start_hdr;
+ struct iwl_tso_hdr_page *hdr_page;
+ struct page **page_ptr;
+ struct tso_t tso;
+
+ /* if the packet is protected, then it must be CCMP or GCMP */
+ iv_len = ieee80211_has_protected(hdr->frame_control) ?
+ IEEE80211_CCMP_HDR_LEN : 0;
+
+ trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd),
+ &dev_cmd->hdr, start_len, 0);
+
+ ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb);
+ snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb);
+ total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len - iv_len;
+ amsdu_pad = 0;
+
+ /* total amount of header we may need for this A-MSDU */
+ hdr_room = DIV_ROUND_UP(total_len, mss) *
+ (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len;
+
+ /* Our device supports 9 segments at most, it will fit in 1 page */
+ hdr_page = get_page_hdr(trans, hdr_room);
+ if (!hdr_page)
+ return -ENOMEM;
+
+ get_page(hdr_page->page);
+ start_hdr = hdr_page->pos;
+ page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
+ *page_ptr = hdr_page->page;
+ memcpy(hdr_page->pos, skb->data + hdr_len, iv_len);
+ hdr_page->pos += iv_len;
+
+ /*
+ * Pull the ieee80211 header + IV to be able to use TSO core,
+ * we will restore it for the tx_status flow.
+ */
+ skb_pull(skb, hdr_len + iv_len);
+
+ /*
+ * Remove the length of all the headers that we don't actually
+ * have in the MPDU by themselves, but that we duplicate into
+ * all the different MSDUs inside the A-MSDU.
+ */
+ le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen);
+
+ tso_start(skb, &tso);
+
+ while (total_len) {
+ /* this is the data left for this subframe */
+ unsigned int data_left = min_t(unsigned int, mss, total_len);
+ struct sk_buff *csum_skb = NULL;
+ unsigned int tb_len;
+ dma_addr_t tb_phys;
+ struct tcphdr *tcph;
+ u8 *iph, *subf_hdrs_start = hdr_page->pos;
+
+ total_len -= data_left;
+
+ memset(hdr_page->pos, 0, amsdu_pad);
+ hdr_page->pos += amsdu_pad;
+ amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen +
+ data_left)) & 0x3;
+ ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr));
+ hdr_page->pos += ETH_ALEN;
+ ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr));
+ hdr_page->pos += ETH_ALEN;
+
+ length = snap_ip_tcp_hdrlen + data_left;
+ *((__be16 *)hdr_page->pos) = cpu_to_be16(length);
+ hdr_page->pos += sizeof(length);
+
+ /*
+ * This will copy the SNAP as well which will be considered
+ * as MAC header.
+ */
+ tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len);
+ iph = hdr_page->pos + 8;
+ tcph = (void *)(iph + ip_hdrlen);
+
+ hdr_page->pos += snap_ip_tcp_hdrlen;
+
+ tb_len = hdr_page->pos - start_hdr;
+ tb_phys = dma_map_single(trans->dev, start_hdr,
+ tb_len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(trans->dev, tb_phys))) {
+ dev_kfree_skb(csum_skb);
+ goto out_err;
+ }
+ iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len);
+ trace_iwlwifi_dev_tx_tso_chunk(trans->dev, start_hdr, tb_len);
+ /* add this subframe's headers' length to the tx_cmd */
+ le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start);
+
+ /* prepare the start_hdr for the next subframe */
+ start_hdr = hdr_page->pos;
+
+ /* put the payload */
+ while (data_left) {
+ tb_len = min_t(unsigned int, tso.size, data_left);
+ tb_phys = dma_map_single(trans->dev, tso.data,
+ tb_len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(trans->dev, tb_phys))) {
+ dev_kfree_skb(csum_skb);
+ goto out_err;
+ }
+ iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len);
+ trace_iwlwifi_dev_tx_tso_chunk(trans->dev, tso.data,
+ tb_len);
+
+ data_left -= tb_len;
+ tso_build_data(skb, &tso, tb_len);
+ }
+ }
+
+ /* re -add the WiFi header and IV */
+ skb_push(skb, hdr_len + iv_len);
+
+ return 0;
+
+out_err:
+#endif
+ return -EINVAL;
+}
+
static
struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
struct iwl_txq *txq,
@@ -238,15 +376,21 @@ struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
struct iwl_tfh_tfd *tfd =
iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr);
dma_addr_t tb_phys;
+ bool amsdu;
int i, len, tb1_len, tb2_len, hdr_len;
void *tb1_addr;
memset(tfd, 0, sizeof(*tfd));
+ amsdu = ieee80211_is_data_qos(hdr->frame_control) &&
+ (*ieee80211_get_qos_ctl(hdr) &
+ IEEE80211_QOS_CTL_A_MSDU_PRESENT);
+
tb_phys = iwl_pcie_get_first_tb_dma(txq, txq->write_ptr);
/* The first TB points to bi-directional DMA data */
- memcpy(&txq->first_tb_bufs[txq->write_ptr], &dev_cmd->hdr,
- IWL_FIRST_TB_SIZE);
+ if (!amsdu)
+ memcpy(&txq->first_tb_bufs[txq->write_ptr], &dev_cmd->hdr,
+ IWL_FIRST_TB_SIZE);
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
@@ -262,7 +406,11 @@ struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
len = sizeof(struct iwl_tx_cmd_gen2) + sizeof(struct iwl_cmd_header) +
ieee80211_hdrlen(hdr->frame_control) - IWL_FIRST_TB_SIZE;
- tb1_len = ALIGN(len, 4);
+ /* do not align A-MSDU to dword as the subframe header aligns it */
+ if (amsdu)
+ tb1_len = len;
+ else
+ tb1_len = ALIGN(len, 4);
/* map the data for TB1 */
tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
@@ -271,8 +419,24 @@ struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
goto out_err;
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb1_len);
- /* set up TFD's third entry to point to remainder of skb's head */
hdr_len = ieee80211_hdrlen(hdr->frame_control);
+
+ if (amsdu) {
+ if (!iwl_pcie_gen2_build_amsdu(trans, skb, tfd,
+ tb1_len + IWL_FIRST_TB_SIZE,
+ hdr_len, dev_cmd))
+ goto out_err;
+
+ /*
+ * building the A-MSDU might have changed this data, so memcpy
+ * it now
+ */
+ memcpy(&txq->first_tb_bufs[txq->write_ptr], &dev_cmd->hdr,
+ IWL_FIRST_TB_SIZE);
+ return tfd;
+ }
+
+ /* set up TFD's third entry to point to remainder of skb's head */
tb2_len = skb_headlen(skb) - hdr_len;
if (tb2_len > 0) {
@@ -303,10 +467,8 @@ struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
}
trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr,
- IWL_FIRST_TB_SIZE + tb1_len,
- skb->data + hdr_len, tb2_len);
- trace_iwlwifi_dev_tx_data(trans->dev, skb, hdr_len,
- skb->len - hdr_len);
+ IWL_FIRST_TB_SIZE + tb1_len, hdr_len);
+ trace_iwlwifi_dev_tx_data(trans->dev, skb, hdr_len);
return tfd;
@@ -699,7 +861,7 @@ static int iwl_pcie_gen2_send_hcmd_sync(struct iwl_trans *trans,
}
if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
- test_bit(STATUS_RFKILL, &trans->status)) {
+ test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n");
ret = -ERFKILL;
goto cancel;
@@ -736,7 +898,7 @@ int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans,
struct iwl_host_cmd *cmd)
{
if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
- test_bit(STATUS_RFKILL, &trans->status)) {
+ test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n",
cmd->id);
return -ERFKILL;
@@ -912,7 +1074,7 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
rsp = (void *)hcmd.resp_pkt->data;
qid = le16_to_cpu(rsp->queue_number);
- if (qid > ARRAY_SIZE(trans_pcie->txq)) {
+ if (qid >= ARRAY_SIZE(trans_pcie->txq)) {
WARN_ONCE(1, "queue index %d unsupported", qid);
ret = -EIO;
goto error_free_resp;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index 01013d273aa7..de50418adae5 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -762,7 +762,7 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
/* Enable L1-Active */
- if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
+ if (trans->cfg->device_family < IWL_DEVICE_FAMILY_8000)
iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
}
@@ -1277,13 +1277,14 @@ static int iwl_pcie_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid,
* combined with Traffic ID (QOS priority), in format used by Tx Scheduler */
#define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid))
-void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
+bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
const struct iwl_trans_txq_scd_cfg *cfg,
unsigned int wdg_timeout)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq = trans_pcie->txq[txq_id];
int fifo = -1;
+ bool scd_bug = false;
if (test_and_set_bit(txq_id, trans_pcie->queue_used))
WARN_ONCE(1, "queue %d already used - expect issues", txq_id);
@@ -1324,6 +1325,23 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
ssn = txq->read_ptr;
}
+ } else {
+ /*
+ * If we need to move the SCD write pointer by steps of
+ * 0x40, 0x80 or 0xc0, it gets stuck. Avoids this and let
+ * the op_mode know by returning true later.
+ * Do this only in case cfg is NULL since this trick can
+ * be done only if we have DQA enabled which is true for mvm
+ * only. And mvm never sets a cfg pointer.
+ * This is really ugly, but this is the easiest way out for
+ * this sad hardware issue.
+ * This bug has been fixed on devices 9000 and up.
+ */
+ scd_bug = !trans->cfg->mq_rx_supported &&
+ !((ssn - txq->write_ptr) & 0x3f) &&
+ (ssn != txq->write_ptr);
+ if (scd_bug)
+ ssn++;
}
/* Place first TFD at index corresponding to start sequence number.
@@ -1344,10 +1362,8 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
iwl_trans_write_mem32(trans,
trans_pcie->scd_base_addr +
SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
- ((frame_limit << SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
- SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
- ((frame_limit << SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
- SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
+ SCD_QUEUE_CTX_REG2_VAL(WIN_SIZE, frame_limit) |
+ SCD_QUEUE_CTX_REG2_VAL(FRAME_LIMIT, frame_limit));
/* Set up status area in SRAM, map to Tx DMA/FIFO, activate */
iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id),
@@ -1369,6 +1385,8 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
"Activate queue %d WrPtr: %d\n",
txq_id, ssn & 0xff);
}
+
+ return scd_bug;
}
void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
@@ -1708,7 +1726,7 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
u16 sequence = le16_to_cpu(pkt->hdr.sequence);
- u8 group_id = iwl_cmd_groupid(pkt->hdr.group_id);
+ u8 group_id;
u32 cmd_id;
int txq_id = SEQ_TO_QUEUE(sequence);
int index = SEQ_TO_INDEX(sequence);
@@ -1734,6 +1752,7 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
cmd_index = get_cmd_index(txq, index);
cmd = txq->entries[cmd_index].cmd;
meta = &txq->entries[cmd_index].meta;
+ group_id = cmd->hdr.group_id;
cmd_id = iwl_cmd_id(cmd->hdr.cmd, group_id, 0);
iwl_pcie_tfd_unmap(trans, meta, txq, index);
@@ -1876,7 +1895,7 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
}
if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
- test_bit(STATUS_RFKILL, &trans->status)) {
+ test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n");
ret = -ERFKILL;
goto cancel;
@@ -1913,7 +1932,7 @@ cancel:
int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
{
if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
- test_bit(STATUS_RFKILL, &trans->status)) {
+ test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n",
cmd->id);
return -ERFKILL;
@@ -1980,15 +1999,13 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr),
trans_pcie->tfd_size,
&dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len,
- skb->data + hdr_len, tb2_len);
- trace_iwlwifi_dev_tx_data(trans->dev, skb,
- hdr_len, skb->len - hdr_len);
+ hdr_len);
+ trace_iwlwifi_dev_tx_data(trans->dev, skb, hdr_len);
return 0;
}
#ifdef CONFIG_INET
-static struct iwl_tso_hdr_page *
-get_page_hdr(struct iwl_trans *trans, size_t len)
+struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_tso_hdr_page *p = this_cpu_ptr(trans_pcie->tso_hdr_page);
@@ -2055,8 +2072,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
trace_iwlwifi_dev_tx(trans->dev, skb,
iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr),
trans_pcie->tfd_size,
- &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len,
- NULL, 0);
+ &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 0);
ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb);
snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb);
diff --git a/drivers/net/wireless/marvell/libertas/if_sdio.c b/drivers/net/wireless/marvell/libertas/if_sdio.c
index a9e2b06b3175..2300e796c6ab 100644
--- a/drivers/net/wireless/marvell/libertas/if_sdio.c
+++ b/drivers/net/wireless/marvell/libertas/if_sdio.c
@@ -239,7 +239,6 @@ static int if_sdio_handle_data(struct if_sdio_card *card,
{
int ret;
struct sk_buff *skb;
- char *data;
if (size > MRVDRV_ETH_RX_PACKET_BUFFER_SIZE) {
lbs_deb_sdio("response packet too large (%d bytes)\n",
@@ -256,7 +255,7 @@ static int if_sdio_handle_data(struct if_sdio_card *card,
skb_reserve(skb, NET_IP_ALIGN);
- data = skb_put_data(skb, buffer, size);
+ skb_put_data(skb, buffer, size);
lbs_process_rxed_packet(card->priv, skb);
diff --git a/drivers/net/wireless/marvell/libertas/mesh.c b/drivers/net/wireless/marvell/libertas/mesh.c
index eeeb892219aa..37ace5cb309d 100644
--- a/drivers/net/wireless/marvell/libertas/mesh.c
+++ b/drivers/net/wireless/marvell/libertas/mesh.c
@@ -233,8 +233,9 @@ static ssize_t lbs_prb_rsp_limit_set(struct device *dev,
memset(&mesh_access, 0, sizeof(mesh_access));
mesh_access.data[0] = cpu_to_le32(CMD_ACT_SET);
- if (!kstrtoul(buf, 10, &retry_limit))
- return -ENOTSUPP;
+ ret = kstrtoul(buf, 10, &retry_limit);
+ if (ret)
+ return ret;
if (retry_limit > 15)
return -ENOTSUPP;
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
index c20e4944ef87..06ad2d50f9b0 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
@@ -892,24 +892,20 @@ mwifiex_init_new_priv_params(struct mwifiex_private *priv,
priv->bss_num = mwifiex_get_unused_bss_num(adapter,
MWIFIEX_BSS_TYPE_STA);
priv->bss_role = MWIFIEX_BSS_ROLE_STA;
- priv->bss_type = MWIFIEX_BSS_TYPE_STA;
break;
case NL80211_IFTYPE_P2P_CLIENT:
priv->bss_num = mwifiex_get_unused_bss_num(adapter,
MWIFIEX_BSS_TYPE_P2P);
priv->bss_role = MWIFIEX_BSS_ROLE_STA;
- priv->bss_type = MWIFIEX_BSS_TYPE_P2P;
break;
case NL80211_IFTYPE_P2P_GO:
priv->bss_num = mwifiex_get_unused_bss_num(adapter,
MWIFIEX_BSS_TYPE_P2P);
priv->bss_role = MWIFIEX_BSS_ROLE_UAP;
- priv->bss_type = MWIFIEX_BSS_TYPE_P2P;
break;
case NL80211_IFTYPE_AP:
priv->bss_num = mwifiex_get_unused_bss_num(adapter,
MWIFIEX_BSS_TYPE_UAP);
- priv->bss_type = MWIFIEX_BSS_TYPE_UAP;
priv->bss_role = MWIFIEX_BSS_ROLE_UAP;
break;
default:
@@ -1981,7 +1977,7 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
if (mwifiex_set_secure_params(priv, bss_cfg, params)) {
mwifiex_dbg(priv->adapter, ERROR,
- "Failed to parse secuirty parameters!\n");
+ "Failed to parse security parameters!\n");
goto out;
}
@@ -3031,7 +3027,7 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
INIT_DELAYED_WORK(&priv->dfs_chan_sw_work,
mwifiex_dfs_chan_sw_work_queue);
- sema_init(&priv->async_sem, 1);
+ mutex_init(&priv->async_mutex);
/* Register network device */
if (register_netdevice(dev)) {
diff --git a/drivers/net/wireless/marvell/mwifiex/cfp.c b/drivers/net/wireless/marvell/mwifiex/cfp.c
index 1ff22055e54f..6e2994308526 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfp.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfp.c
@@ -350,7 +350,7 @@ mwifiex_get_cfp(struct mwifiex_private *priv, u8 band, u16 channel, u32 freq)
}
}
if (i == sband->n_channels) {
- mwifiex_dbg(priv->adapter, ERROR,
+ mwifiex_dbg(priv->adapter, WARN,
"%s: cannot find cfp by band %d\t"
"& channel=%d freq=%d\n",
__func__, band, channel, freq);
diff --git a/drivers/net/wireless/marvell/mwifiex/debugfs.c b/drivers/net/wireless/marvell/mwifiex/debugfs.c
index ae2b69db5994..f6f105a7d3ff 100644
--- a/drivers/net/wireless/marvell/mwifiex/debugfs.c
+++ b/drivers/net/wireless/marvell/mwifiex/debugfs.c
@@ -1046,6 +1046,5 @@ mwifiex_debugfs_init(void)
void
mwifiex_debugfs_remove(void)
{
- if (mwifiex_dfs_dir)
- debugfs_remove(mwifiex_dfs_dir);
+ debugfs_remove(mwifiex_dfs_dir);
}
diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h
index c37fb2606502..f8cf3079ac7d 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.h
+++ b/drivers/net/wireless/marvell/mwifiex/main.h
@@ -629,7 +629,7 @@ struct mwifiex_private {
struct dentry *dfs_dev_dir;
#endif
u16 current_key_index;
- struct semaphore async_sem;
+ struct mutex async_mutex;
struct cfg80211_scan_request *scan_request;
u8 cfg_bssid[6];
struct wps wps;
diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c
index ce6936d0c5c0..ae9630b49342 100644
--- a/drivers/net/wireless/marvell/mwifiex/scan.c
+++ b/drivers/net/wireless/marvell/mwifiex/scan.c
@@ -2809,7 +2809,7 @@ int mwifiex_request_scan(struct mwifiex_private *priv,
{
int ret;
- if (down_interruptible(&priv->async_sem)) {
+ if (mutex_lock_interruptible(&priv->async_mutex)) {
mwifiex_dbg(priv->adapter, ERROR,
"%s: acquire semaphore fail\n",
__func__);
@@ -2825,7 +2825,7 @@ int mwifiex_request_scan(struct mwifiex_private *priv,
/* Normal scan */
ret = mwifiex_scan_networks(priv, NULL);
- up(&priv->async_sem);
+ mutex_unlock(&priv->async_mutex);
return ret;
}
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
index 3348fb3a7514..2945775e83c5 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
@@ -1154,8 +1154,8 @@ static int mwifiex_ret_chan_region_cfg(struct mwifiex_private *priv,
return 0;
}
-int mwifiex_ret_pkt_aggr_ctrl(struct mwifiex_private *priv,
- struct host_cmd_ds_command *resp)
+static int mwifiex_ret_pkt_aggr_ctrl(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *resp)
{
struct host_cmd_ds_pkt_aggr_ctrl *pkt_aggr_ctrl =
&resp->params.pkt_aggr_ctrl;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.c b/drivers/net/wireless/quantenna/qtnfmac/commands.c
index 37c3bececa1f..b39dbc3d3c1f 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/commands.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/commands.c
@@ -1298,12 +1298,6 @@ int qtnf_cmd_get_mac_chan_info(struct qtnf_wmac *mac,
int ret = 0;
u8 qband;
- cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, 0,
- QLINK_CMD_CHANS_INFO_GET,
- sizeof(*cmd));
- if (!cmd_skb)
- return -ENOMEM;
-
switch (band->band) {
case NL80211_BAND_2GHZ:
qband = QLINK_BAND_2GHZ;
@@ -1318,6 +1312,12 @@ int qtnf_cmd_get_mac_chan_info(struct qtnf_wmac *mac,
return -EINVAL;
}
+ cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, 0,
+ QLINK_CMD_CHANS_INFO_GET,
+ sizeof(*cmd));
+ if (!cmd_skb)
+ return -ENOMEM;
+
cmd = (struct qlink_cmd_chans_info_get *)cmd_skb->data;
cmd->band = qband;
ret = qtnf_cmd_send_with_reply(mac->bus, cmd_skb, &resp_skb, &res_code,
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c
index f93b27f3a236..7fc4f0d6a9ad 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c
@@ -247,7 +247,7 @@ static void qtnf_pcie_free_shm_ipc(struct qtnf_pcie_bus_priv *priv)
static int qtnf_pcie_init_memory(struct qtnf_pcie_bus_priv *priv)
{
- int ret;
+ int ret = -ENOMEM;
priv->sysctl_bar = qtnf_map_bar(priv, QTN_SYSCTL_BAR);
if (IS_ERR_OR_NULL(priv->sysctl_bar)) {
diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h b/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h
index f6ac39973b5d..90d7d09a6c63 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h
@@ -33,9 +33,7 @@ static inline void qtnf_cmd_skb_put_action(struct sk_buff *skb, u16 action)
static inline void
qtnf_cmd_skb_put_buffer(struct sk_buff *skb, const u8 *buf_src, size_t len)
{
- u8 *buf_dst;
-
- buf_dst = skb_put_data(skb, buf_src, len);
+ skb_put_data(skb, buf_src, len);
}
static inline void qtnf_cmd_skb_put_tlv_arr(struct sk_buff *skb,
diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c
index 710e5b447cff..e36ee592c660 100644
--- a/drivers/net/wireless/realtek/rtlwifi/base.c
+++ b/drivers/net/wireless/realtek/rtlwifi/base.c
@@ -405,6 +405,10 @@ static void _rtl_init_mac80211(struct ieee80211_hw *hw)
ieee80211_hw_set(hw, SUPPORTS_PS);
ieee80211_hw_set(hw, PS_NULLFUNC_STACK);
}
+ if (rtlpriv->psc.fwctrl_lps) {
+ ieee80211_hw_set(hw, SUPPORTS_PS);
+ ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
+ }
hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_STATION) |
@@ -560,6 +564,7 @@ int rtl_init_core(struct ieee80211_hw *hw)
spin_lock_init(&rtlpriv->locks.waitq_lock);
spin_lock_init(&rtlpriv->locks.entry_list_lock);
spin_lock_init(&rtlpriv->locks.c2hcmd_lock);
+ spin_lock_init(&rtlpriv->locks.scan_list_lock);
spin_lock_init(&rtlpriv->locks.cck_and_rw_pagea_lock);
spin_lock_init(&rtlpriv->locks.check_sendpkt_lock);
spin_lock_init(&rtlpriv->locks.fw_ps_lock);
@@ -568,6 +573,7 @@ int rtl_init_core(struct ieee80211_hw *hw)
/* <5> init list */
INIT_LIST_HEAD(&rtlpriv->entry_list);
INIT_LIST_HEAD(&rtlpriv->c2hcmd_list);
+ INIT_LIST_HEAD(&rtlpriv->scan_list.list);
rtlmac->link_state = MAC80211_NOLINK;
@@ -578,9 +584,12 @@ int rtl_init_core(struct ieee80211_hw *hw)
}
EXPORT_SYMBOL_GPL(rtl_init_core);
+static void rtl_free_entries_from_scan_list(struct ieee80211_hw *hw);
+
void rtl_deinit_core(struct ieee80211_hw *hw)
{
rtl_c2hcmd_launcher(hw, 0);
+ rtl_free_entries_from_scan_list(hw);
}
EXPORT_SYMBOL_GPL(rtl_deinit_core);
@@ -1110,6 +1119,9 @@ void rtl_get_tcb_desc(struct ieee80211_hw *hw,
if (txrate)
tcb_desc->hw_rate = txrate->hw_value;
+ if (rtl_is_tx_report_skb(hw, skb))
+ tcb_desc->use_spe_rpt = 1;
+
if (ieee80211_is_data(fc)) {
/*
*we set data rate INX 0
@@ -1306,33 +1318,26 @@ bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
}
EXPORT_SYMBOL_GPL(rtl_action_proc);
-static void setup_arp_tx(struct rtl_priv *rtlpriv, struct rtl_ps_ctl *ppsc)
+static void setup_special_tx(struct rtl_priv *rtlpriv, struct rtl_ps_ctl *ppsc,
+ int type)
{
struct ieee80211_hw *hw = rtlpriv->hw;
rtlpriv->ra.is_special_data = true;
if (rtlpriv->cfg->ops->get_btc_status())
rtlpriv->btcoexist.btc_ops->btc_special_packet_notify(
- rtlpriv, 1);
+ rtlpriv, type);
rtl_lps_leave(hw);
ppsc->last_delaylps_stamp_jiffies = jiffies;
}
-/*should call before software enc*/
-u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx,
- bool is_enc)
+static const u8 *rtl_skb_ether_type_ptr(struct ieee80211_hw *hw,
+ struct sk_buff *skb, bool is_enc)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- __le16 fc = rtl_get_fc(skb);
- u16 ether_type;
u8 mac_hdr_len = ieee80211_get_hdrlen_from_skb(skb);
u8 encrypt_header_len = 0;
u8 offset;
- const struct iphdr *ip;
-
- if (!ieee80211_is_data(fc))
- goto end;
switch (rtlpriv->sec.pairwise_enc_algorithm) {
case WEP40_ENCRYPTION:
@@ -1352,10 +1357,29 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx,
offset = mac_hdr_len + SNAP_SIZE;
if (is_enc)
offset += encrypt_header_len;
- ether_type = be16_to_cpup((__be16 *)(skb->data + offset));
+
+ return skb->data + offset;
+}
+
+/*should call before software enc*/
+u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx,
+ bool is_enc)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ __le16 fc = rtl_get_fc(skb);
+ u16 ether_type;
+ const u8 *ether_type_ptr;
+ const struct iphdr *ip;
+
+ if (!ieee80211_is_data(fc))
+ goto end;
+
+ ether_type_ptr = rtl_skb_ether_type_ptr(hw, skb, is_enc);
+ ether_type = be16_to_cpup((__be16 *)ether_type_ptr);
if (ETH_P_IP == ether_type) {
- ip = (struct iphdr *)((u8 *)skb->data + offset +
+ ip = (struct iphdr *)((u8 *)ether_type_ptr +
PROTOC_TYPE_SIZE);
if (IPPROTO_UDP == ip->protocol) {
struct udphdr *udp = (struct udphdr *)((u8 *)ip +
@@ -1372,13 +1396,15 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx,
(is_tx) ? "Tx" : "Rx");
if (is_tx)
- setup_arp_tx(rtlpriv, ppsc);
+ setup_special_tx(rtlpriv, ppsc,
+ PACKET_DHCP);
+
return true;
}
}
} else if (ETH_P_ARP == ether_type) {
if (is_tx)
- setup_arp_tx(rtlpriv, ppsc);
+ setup_special_tx(rtlpriv, ppsc, PACKET_ARP);
return true;
} else if (ETH_P_PAE == ether_type) {
@@ -1389,6 +1415,8 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx,
rtlpriv->ra.is_special_data = true;
rtl_lps_leave(hw);
ppsc->last_delaylps_stamp_jiffies = jiffies;
+
+ setup_special_tx(rtlpriv, ppsc, PACKET_EAPOL);
}
return true;
@@ -1405,6 +1433,96 @@ end:
}
EXPORT_SYMBOL_GPL(rtl_is_special_data);
+bool rtl_is_tx_report_skb(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+ u16 ether_type;
+ const u8 *ether_type_ptr;
+
+ ether_type_ptr = rtl_skb_ether_type_ptr(hw, skb, true);
+ ether_type = be16_to_cpup((__be16 *)ether_type_ptr);
+
+ /* EAPOL */
+ if (ether_type == ETH_P_PAE)
+ return true;
+
+ return false;
+}
+
+static u16 rtl_get_tx_report_sn(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_tx_report *tx_report = &rtlpriv->tx_report;
+ u16 sn;
+
+ sn = atomic_inc_return(&tx_report->sn) & 0x0FFF;
+
+ tx_report->last_sent_sn = sn;
+ tx_report->last_sent_time = jiffies;
+
+ RT_TRACE(rtlpriv, COMP_TX_REPORT, DBG_DMESG,
+ "Send TX-Report sn=0x%X\n", sn);
+
+ return sn;
+}
+
+void rtl_get_tx_report(struct rtl_tcb_desc *ptcb_desc, u8 *pdesc,
+ struct ieee80211_hw *hw)
+{
+ if (ptcb_desc->use_spe_rpt) {
+ u16 sn = rtl_get_tx_report_sn(hw);
+
+ SET_TX_DESC_SPE_RPT(pdesc, 1);
+ SET_TX_DESC_SW_DEFINE(pdesc, sn);
+ }
+}
+EXPORT_SYMBOL_GPL(rtl_get_tx_report);
+
+void rtl_tx_report_handler(struct ieee80211_hw *hw, u8 *tmp_buf, u8 c2h_cmd_len)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_tx_report *tx_report = &rtlpriv->tx_report;
+ u16 sn;
+
+ sn = ((tmp_buf[7] & 0x0F) << 8) | tmp_buf[6];
+
+ tx_report->last_recv_sn = sn;
+
+ RT_TRACE(rtlpriv, COMP_TX_REPORT, DBG_DMESG,
+ "Recv TX-Report st=0x%02X sn=0x%X retry=0x%X\n",
+ tmp_buf[0], sn, tmp_buf[2]);
+}
+EXPORT_SYMBOL_GPL(rtl_tx_report_handler);
+
+bool rtl_check_tx_report_acked(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_tx_report *tx_report = &rtlpriv->tx_report;
+
+ if (tx_report->last_sent_sn == tx_report->last_recv_sn)
+ return true;
+
+ if (time_before(tx_report->last_sent_time + 3 * HZ, jiffies)) {
+ RT_TRACE(rtlpriv, COMP_TX_REPORT, DBG_WARNING,
+ "Check TX-Report timeout!!\n");
+ return true; /* 3 sec. (timeout) seen as acked */
+ }
+
+ return false;
+}
+
+void rtl_wait_tx_report_acked(struct ieee80211_hw *hw, u32 wait_ms)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ int i;
+
+ for (i = 0; i < wait_ms; i++) {
+ if (rtl_check_tx_report_acked(hw))
+ break;
+ usleep_range(1000, 2000);
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
+ "Wait 1ms (%d/%d) to disable key.\n", i, wait_ms);
+ }
+}
/*********************************************************
*
* functions called by core.c
@@ -1469,6 +1587,7 @@ int rtl_rx_agg_start(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_tid_data *tid_data;
struct rtl_sta_info *sta_entry = NULL;
+ u8 reject_agg;
if (sta == NULL)
return -EINVAL;
@@ -1476,6 +1595,14 @@ int rtl_rx_agg_start(struct ieee80211_hw *hw,
if (unlikely(tid >= MAX_TID_COUNT))
return -EINVAL;
+ if (rtlpriv->cfg->ops->get_btc_status()) {
+ rtlpriv->btcoexist.btc_ops->btc_get_ampdu_cfg(rtlpriv,
+ &reject_agg,
+ NULL, NULL);
+ if (reject_agg)
+ return -EINVAL;
+ }
+
sta_entry = (struct rtl_sta_info *)sta->drv_priv;
if (!sta_entry)
return -ENXIO;
@@ -1530,6 +1657,24 @@ int rtl_tx_agg_oper(struct ieee80211_hw *hw,
return 0;
}
+void rtl_rx_ampdu_apply(struct rtl_priv *rtlpriv)
+{
+ struct rtl_btc_ops *btc_ops = rtlpriv->btcoexist.btc_ops;
+ u8 reject_agg, ctrl_agg_size = 0, agg_size;
+
+ if (rtlpriv->cfg->ops->get_btc_status())
+ btc_ops->btc_get_ampdu_cfg(rtlpriv, &reject_agg,
+ &ctrl_agg_size, &agg_size);
+
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "Set RX AMPDU: coex - reject=%d, ctrl_agg_size=%d, size=%d",
+ reject_agg, ctrl_agg_size, agg_size);
+
+ rtlpriv->hw->max_rx_aggregation_subframes =
+ (ctrl_agg_size ? agg_size : IEEE80211_MAX_AMPDU_BUF);
+}
+EXPORT_SYMBOL(rtl_rx_ampdu_apply);
+
/*********************************************************
*
* wq & timer callback functions
@@ -1564,6 +1709,100 @@ void rtl_beacon_statistic(struct ieee80211_hw *hw, struct sk_buff *skb)
}
EXPORT_SYMBOL_GPL(rtl_beacon_statistic);
+static void rtl_free_entries_from_scan_list(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_bssid_entry *entry, *next;
+
+ list_for_each_entry_safe(entry, next, &rtlpriv->scan_list.list, list) {
+ list_del(&entry->list);
+ kfree(entry);
+ rtlpriv->scan_list.num--;
+ }
+}
+
+void rtl_scan_list_expire(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_bssid_entry *entry, *next;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rtlpriv->locks.scan_list_lock, flags);
+
+ list_for_each_entry_safe(entry, next, &rtlpriv->scan_list.list, list) {
+ /* 180 seconds */
+ if (jiffies_to_msecs(jiffies - entry->age) < 180000)
+ continue;
+
+ list_del(&entry->list);
+ kfree(entry);
+ rtlpriv->scan_list.num--;
+
+ RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD,
+ "BSSID=%pM is expire in scan list (total=%d)\n",
+ entry->bssid, rtlpriv->scan_list.num);
+ }
+
+ spin_unlock_irqrestore(&rtlpriv->locks.scan_list_lock, flags);
+
+ rtlpriv->btcoexist.btc_info.ap_num = rtlpriv->scan_list.num;
+}
+
+void rtl_collect_scan_list(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ unsigned long flags;
+
+ struct rtl_bssid_entry *entry;
+ bool entry_found = false;
+
+ /* check if it is scanning */
+ if (!mac->act_scanning)
+ return;
+
+ /* check if this really is a beacon */
+ if (!ieee80211_is_beacon(hdr->frame_control) &&
+ !ieee80211_is_probe_resp(hdr->frame_control))
+ return;
+
+ spin_lock_irqsave(&rtlpriv->locks.scan_list_lock, flags);
+
+ list_for_each_entry(entry, &rtlpriv->scan_list.list, list) {
+ if (memcmp(entry->bssid, hdr->addr3, ETH_ALEN) == 0) {
+ list_del_init(&entry->list);
+ entry_found = true;
+ RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD,
+ "Update BSSID=%pM to scan list (total=%d)\n",
+ hdr->addr3, rtlpriv->scan_list.num);
+ break;
+ }
+ }
+
+ if (!entry_found) {
+ entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
+
+ if (!entry)
+ goto label_err;
+
+ memcpy(entry->bssid, hdr->addr3, ETH_ALEN);
+ rtlpriv->scan_list.num++;
+
+ RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD,
+ "Add BSSID=%pM to scan list (total=%d)\n",
+ hdr->addr3, rtlpriv->scan_list.num);
+ }
+
+ entry->age = jiffies;
+
+ list_add_tail(&entry->list, &rtlpriv->scan_list.list);
+
+label_err:
+ spin_unlock_irqrestore(&rtlpriv->locks.scan_list_lock, flags);
+}
+EXPORT_SYMBOL(rtl_collect_scan_list);
+
void rtl_watchdog_wq_callback(void *data)
{
struct rtl_works *rtlworks = container_of_dwork_rtl(data,
@@ -1662,12 +1901,20 @@ void rtl_watchdog_wq_callback(void *data)
false;
}
+ /* PS is controlled by coex. */
+ if (rtlpriv->cfg->ops->get_btc_status() &&
+ rtlpriv->btcoexist.btc_ops->btc_is_bt_ctrl_lps(rtlpriv))
+ goto label_lps_done;
+
if (((rtlpriv->link_info.num_rx_inperiod +
rtlpriv->link_info.num_tx_inperiod) > 8) ||
(rtlpriv->link_info.num_rx_inperiod > 2))
rtl_lps_leave(hw);
else
rtl_lps_enter(hw);
+
+label_lps_done:
+ ;
}
rtlpriv->link_info.num_rx_inperiod = 0;
@@ -1713,6 +1960,9 @@ void rtl_watchdog_wq_callback(void *data)
rtlpriv->btcoexist.btc_ops->btc_periodical(rtlpriv);
rtlpriv->link_info.bcn_rx_inperiod = 0;
+
+ /* <6> scan list */
+ rtl_scan_list_expire(hw);
}
void rtl_watch_dog_timer_callback(unsigned long data)
diff --git a/drivers/net/wireless/realtek/rtlwifi/base.h b/drivers/net/wireless/realtek/rtlwifi/base.h
index 02ff0c5624a7..ab7d81904d25 100644
--- a/drivers/net/wireless/realtek/rtlwifi/base.h
+++ b/drivers/net/wireless/realtek/rtlwifi/base.h
@@ -107,6 +107,11 @@ enum ap_peer {
SET_BEACON_PROBE_RSP_CAPABILITY_INFO(__phdr, \
(GET_BEACON_PROBE_RSP_CAPABILITY_INFO(__phdr) & (~(__val))))
+#define SET_TX_DESC_SPE_RPT(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE((__pdesc) + 8, 19, 1, __val)
+#define SET_TX_DESC_SW_DEFINE(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE((__pdesc) + 24, 0, 12, __val)
+
int rtl_init_core(struct ieee80211_hw *hw);
void rtl_deinit_core(struct ieee80211_hw *hw);
void rtl_init_rx_config(struct ieee80211_hw *hw);
@@ -123,7 +128,17 @@ bool rtl_tx_mgmt_proc(struct ieee80211_hw *hw, struct sk_buff *skb);
u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx,
bool is_enc);
+bool rtl_is_tx_report_skb(struct ieee80211_hw *hw, struct sk_buff *skb);
+void rtl_get_tx_report(struct rtl_tcb_desc *ptcb_desc, u8 *pdesc,
+ struct ieee80211_hw *hw);
+void rtl_tx_report_handler(struct ieee80211_hw *hw, u8 *tmp_buf,
+ u8 c2h_cmd_len);
+bool rtl_check_tx_report_acked(struct ieee80211_hw *hw);
+void rtl_wait_tx_report_acked(struct ieee80211_hw *hw, u32 wait_ms);
+
void rtl_beacon_statistic(struct ieee80211_hw *hw, struct sk_buff *skb);
+void rtl_collect_scan_list(struct ieee80211_hw *hw, struct sk_buff *skb);
+void rtl_scan_list_expire(struct ieee80211_hw *hw);
int rtl_tx_agg_start(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u16 tid, u16 *ssn);
int rtl_tx_agg_stop(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
@@ -134,6 +149,7 @@ int rtl_rx_agg_start(struct ieee80211_hw *hw,
struct ieee80211_sta *sta, u16 tid);
int rtl_rx_agg_stop(struct ieee80211_hw *hw,
struct ieee80211_sta *sta, u16 tid);
+void rtl_rx_ampdu_apply(struct rtl_priv *rtlpriv);
void rtl_watchdog_wq_callback(void *data);
void rtl_fwevt_wq_callback(void *data);
void rtl_c2hcmd_wq_callback(void *data);
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbt_precomp.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbt_precomp.h
index 39b9a3309cfd..2ac989a4b2bb 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbt_precomp.h
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbt_precomp.h
@@ -37,6 +37,28 @@
#include "halbtcoutsrc.h"
+/* Interface type */
+#define RT_PCI_INTERFACE 1
+#define RT_USB_INTERFACE 2
+#define RT_SDIO_INTERFACE 3
+#define DEV_BUS_TYPE RT_PCI_INTERFACE
+
+/* IC type */
+#define RTL_HW_TYPE(adapter) (rtl_hal((struct rtl_priv *)adapter)->hw_type)
+
+#define IS_NEW_GENERATION_IC(adapter) \
+ (RTL_HW_TYPE(adapter) >= HARDWARE_TYPE_RTL8192EE)
+#define IS_HARDWARE_TYPE_8812(adapter) \
+ (RTL_HW_TYPE(adapter) == HARDWARE_TYPE_RTL8812AE)
+#define IS_HARDWARE_TYPE_8821(adapter) \
+ (RTL_HW_TYPE(adapter) == HARDWARE_TYPE_RTL8821AE)
+#define IS_HARDWARE_TYPE_8723A(adapter) \
+ (RTL_HW_TYPE(adapter) == HARDWARE_TYPE_RTL8723AE)
+#define IS_HARDWARE_TYPE_8723B(adapter) \
+ (RTL_HW_TYPE(adapter) == HARDWARE_TYPE_RTL8723BE)
+#define IS_HARDWARE_TYPE_8192E(adapter) \
+ (RTL_HW_TYPE(adapter) == HARDWARE_TYPE_RTL8192EE)
+
#include "halbtc8192e2ant.h"
#include "halbtc8723b1ant.h"
#include "halbtc8723b2ant.h"
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c
index 9015512ed647..44c25724529e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c
@@ -3194,7 +3194,7 @@ void ex_btc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist,
btc8192e2ant_run_coexist_mechanism(btcoexist);
}
-void ex_halbtc8192e2ant_halt_notify(struct btc_coexist *btcoexist)
+void ex_btc8192e2ant_halt_notify(struct btc_coexist *btcoexist)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.h
index a57d6947eaf7..65502acee52c 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.h
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.h
@@ -164,20 +164,20 @@ struct coex_sta_8192e_2ant {
/****************************************************************
* The following is interface which will notify coex module.
****************************************************************/
-void ex_halbtc8192e2ant_init_hwconfig(struct btc_coexist *btcoexist);
-void ex_halbtc8192e2ant_init_coex_dm(struct btc_coexist *btcoexist);
-void ex_halbtc8192e2ant_ips_notify(struct btc_coexist *btcoexist, u8 type);
-void ex_halbtc8192e2ant_lps_notify(struct btc_coexist *btcoexist, u8 type);
-void ex_halbtc8192e2ant_scan_notify(struct btc_coexist *btcoexist, u8 type);
-void ex_halbtc8192e2ant_connect_notify(struct btc_coexist *btcoexist, u8 type);
-void ex_halbtc8192e2ant_media_status_notify(struct btc_coexist *btcoexist,
+void ex_btc8192e2ant_init_hwconfig(struct btc_coexist *btcoexist);
+void ex_btc8192e2ant_init_coex_dm(struct btc_coexist *btcoexist);
+void ex_btc8192e2ant_ips_notify(struct btc_coexist *btcoexist, u8 type);
+void ex_btc8192e2ant_lps_notify(struct btc_coexist *btcoexist, u8 type);
+void ex_btc8192e2ant_scan_notify(struct btc_coexist *btcoexist, u8 type);
+void ex_btc8192e2ant_connect_notify(struct btc_coexist *btcoexist, u8 type);
+void ex_btc8192e2ant_media_status_notify(struct btc_coexist *btcoexist,
+ u8 type);
+void ex_btc8192e2ant_special_packet_notify(struct btc_coexist *btcoexist,
+ u8 type);
+void ex_btc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist,
+ u8 *tmpbuf, u8 length);
+void ex_btc8192e2ant_stack_operation_notify(struct btc_coexist *btcoexist,
u8 type);
-void ex_halbtc8192e2ant_special_packet_notify(struct btc_coexist *btcoexist,
- u8 type);
-void ex_halbtc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist,
- u8 *tmpbuf, u8 length);
-void ex_halbtc8192e2ant_stack_operation_notify(struct btc_coexist *btcoexist,
- u8 type);
-void ex_halbtc8192e2ant_halt_notify(struct btc_coexist *btcoexist);
-void ex_halbtc8192e2ant_periodical(struct btc_coexist *btcoexist);
-void ex_halbtc8192e2ant_display_coex_info(struct btc_coexist *btcoexist);
+void ex_btc8192e2ant_halt_notify(struct btc_coexist *btcoexist);
+void ex_btc8192e2ant_periodical(struct btc_coexist *btcoexist);
+void ex_btc8192e2ant_display_coex_info(struct btc_coexist *btcoexist);
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c
index a0f3a18add25..03998d2e9eb8 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c
@@ -2390,9 +2390,9 @@ static void halbtc8723b1ant_init_hw_config(struct btc_coexist *btcoexist,
}
/**************************************************************
- * extern function start with ex_halbtc8723b1ant_
+ * extern function start with ex_btc8723b1ant_
**************************************************************/
-void ex_halbtc8723b1ant_power_on_setting(struct btc_coexist *btcoexist)
+void ex_btc8723b1ant_power_on_setting(struct btc_coexist *btcoexist)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
struct btc_board_info *board_info = &btcoexist->board_info;
@@ -2462,14 +2462,14 @@ void ex_halbtc8723b1ant_power_on_setting(struct btc_coexist *btcoexist)
}
-void ex_halbtc8723b1ant_init_hwconfig(struct btc_coexist *btcoexist,
- bool wifi_only)
+void ex_btc8723b1ant_init_hwconfig(struct btc_coexist *btcoexist,
+ bool wifi_only)
{
halbtc8723b1ant_init_hw_config(btcoexist, true, wifi_only);
btcoexist->stop_coex_dm = false;
}
-void ex_halbtc8723b1ant_init_coex_dm(struct btc_coexist *btcoexist)
+void ex_btc8723b1ant_init_coex_dm(struct btc_coexist *btcoexist)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
@@ -2483,7 +2483,7 @@ void ex_halbtc8723b1ant_init_coex_dm(struct btc_coexist *btcoexist)
halbtc8723b1ant_query_bt_info(btcoexist);
}
-void ex_halbtc8723b1ant_display_coex_info(struct btc_coexist *btcoexist)
+void ex_btc8723b1ant_display_coex_info(struct btc_coexist *btcoexist)
{
struct btc_board_info *board_info = &btcoexist->board_info;
struct btc_stack_info *stack_info = &btcoexist->stack_info;
@@ -2758,7 +2758,7 @@ void ex_halbtc8723b1ant_display_coex_info(struct btc_coexist *btcoexist)
btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_COEX_STATISTICS);
}
-void ex_halbtc8723b1ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
+void ex_btc8723b1ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
@@ -2787,7 +2787,7 @@ void ex_halbtc8723b1ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
}
}
-void ex_halbtc8723b1ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
+void ex_btc8723b1ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
@@ -2805,7 +2805,7 @@ void ex_halbtc8723b1ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
}
}
-void ex_halbtc8723b1ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
+void ex_btc8723b1ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
bool wifi_connected = false, bt_hs_on = false;
@@ -2891,7 +2891,7 @@ void ex_halbtc8723b1ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
}
}
-void ex_halbtc8723b1ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
+void ex_btc8723b1ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
bool wifi_connected = false, bt_hs_on = false;
@@ -2961,8 +2961,8 @@ void ex_halbtc8723b1ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
}
}
-void ex_halbtc8723b1ant_media_status_notify(struct btc_coexist *btcoexist,
- u8 type)
+void ex_btc8723b1ant_media_status_notify(struct btc_coexist *btcoexist,
+ u8 type)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 h2c_parameter[3] = {0};
@@ -3042,8 +3042,8 @@ void ex_halbtc8723b1ant_media_status_notify(struct btc_coexist *btcoexist,
btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter);
}
-void ex_halbtc8723b1ant_special_packet_notify(struct btc_coexist *btcoexist,
- u8 type)
+void ex_btc8723b1ant_special_packet_notify(struct btc_coexist *btcoexist,
+ u8 type)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
bool bt_hs_on = false;
@@ -3119,8 +3119,8 @@ void ex_halbtc8723b1ant_special_packet_notify(struct btc_coexist *btcoexist,
}
}
-void ex_halbtc8723b1ant_bt_info_notify(struct btc_coexist *btcoexist,
- u8 *tmp_buf, u8 length)
+void ex_btc8723b1ant_bt_info_notify(struct btc_coexist *btcoexist,
+ u8 *tmp_buf, u8 length)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 bt_info = 0;
@@ -3211,11 +3211,11 @@ void ex_halbtc8723b1ant_bt_info_notify(struct btc_coexist *btcoexist,
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
&wifi_connected);
if (wifi_connected)
- ex_halbtc8723b1ant_media_status_notify(btcoexist,
- BTC_MEDIA_CONNECT);
+ ex_btc8723b1ant_media_status_notify(btcoexist,
+ BTC_MEDIA_CONNECT);
else
- ex_halbtc8723b1ant_media_status_notify(btcoexist,
- BTC_MEDIA_DISCONNECT);
+ ex_btc8723b1ant_media_status_notify(btcoexist,
+ BTC_MEDIA_DISCONNECT);
}
if (coex_sta->bt_info_ext & BIT3) {
@@ -3364,7 +3364,7 @@ void ex_halbtc8723b1ant_bt_info_notify(struct btc_coexist *btcoexist,
halbtc8723b1ant_run_coexist_mechanism(btcoexist);
}
-void ex_halbtc8723b1ant_rf_status_notify(struct btc_coexist *btcoexist, u8 type)
+void ex_btc8723b1ant_rf_status_notify(struct btc_coexist *btcoexist, u8 type)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
u32 u32tmp;
@@ -3401,7 +3401,7 @@ void ex_halbtc8723b1ant_rf_status_notify(struct btc_coexist *btcoexist, u8 type)
}
}
-void ex_halbtc8723b1ant_halt_notify(struct btc_coexist *btcoexist)
+void ex_btc8723b1ant_halt_notify(struct btc_coexist *btcoexist)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
@@ -3418,12 +3418,12 @@ void ex_halbtc8723b1ant_halt_notify(struct btc_coexist *btcoexist)
0x0, 0x0);
halbtc8723b1ant_ps_tdma(btcoexist, FORCE_EXEC, false, 0);
- ex_halbtc8723b1ant_media_status_notify(btcoexist, BTC_MEDIA_DISCONNECT);
+ ex_btc8723b1ant_media_status_notify(btcoexist, BTC_MEDIA_DISCONNECT);
btcoexist->stop_coex_dm = true;
}
-void ex_halbtc8723b1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state)
+void ex_btc8723b1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
@@ -3458,7 +3458,7 @@ void ex_halbtc8723b1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state)
}
}
-void ex_halbtc8723b1ant_coex_dm_reset(struct btc_coexist *btcoexist)
+void ex_btc8723b1ant_coex_dm_reset(struct btc_coexist *btcoexist)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
@@ -3469,7 +3469,7 @@ void ex_halbtc8723b1ant_coex_dm_reset(struct btc_coexist *btcoexist)
halbtc8723b1ant_init_coex_dm(btcoexist);
}
-void ex_halbtc8723b1ant_periodical(struct btc_coexist *btcoexist)
+void ex_btc8723b1ant_periodical(struct btc_coexist *btcoexist)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.h
index 506961a1ca56..8d4fde235e11 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.h
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.h
@@ -200,24 +200,25 @@ struct coex_sta_8723b_1ant {
/*************************************************************************
* The following is interface which will notify coex module.
*************************************************************************/
-void ex_halbtc8723b1ant_power_on_setting(struct btc_coexist *btcoexist);
-void ex_halbtc8723b1ant_init_hwconfig(struct btc_coexist *btcoexist,
- bool wifi_only);
-void ex_halbtc8723b1ant_init_coex_dm(struct btc_coexist *btcoexist);
-void ex_halbtc8723b1ant_ips_notify(struct btc_coexist *btcoexist, u8 type);
-void ex_halbtc8723b1ant_lps_notify(struct btc_coexist *btcoexist, u8 type);
-void ex_halbtc8723b1ant_scan_notify(struct btc_coexist *btcoexist, u8 type);
-void ex_halbtc8723b1ant_connect_notify(struct btc_coexist *btcoexist, u8 type);
-void ex_halbtc8723b1ant_media_status_notify(struct btc_coexist *btcoexist,
- u8 type);
-void ex_halbtc8723b1ant_special_packet_notify(struct btc_coexist *btcoexist,
- u8 type);
-void ex_halbtc8723b1ant_bt_info_notify(struct btc_coexist *btcoexist,
- u8 *tmpbuf, u8 length);
-void ex_halbtc8723b1ant_rf_status_notify(struct btc_coexist *btcoexist,
+void ex_btc8723b1ant_power_on_setting(struct btc_coexist *btcoexist);
+void ex_btc8723b1ant_init_hwconfig(struct btc_coexist *btcoexist,
+ bool wifi_only);
+void ex_btc8723b1ant_init_coex_dm(struct btc_coexist *btcoexist);
+void ex_btc8723b1ant_ips_notify(struct btc_coexist *btcoexist, u8 type);
+void ex_btc8723b1ant_lps_notify(struct btc_coexist *btcoexist, u8 type);
+void ex_btc8723b1ant_scan_notify(struct btc_coexist *btcoexist, u8 type);
+void ex_btc8723b1ant_connect_notify(struct btc_coexist *btcoexist, u8 type);
+void ex_btc8723b1ant_media_status_notify(struct btc_coexist *btcoexist,
u8 type);
-void ex_halbtc8723b1ant_halt_notify(struct btc_coexist *btcoexist);
-void ex_halbtc8723b1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnpstate);
-void ex_halbtc8723b1ant_coex_dm_reset(struct btc_coexist *btcoexist);
-void ex_halbtc8723b1ant_periodical(struct btc_coexist *btcoexist);
-void ex_halbtc8723b1ant_display_coex_info(struct btc_coexist *btcoexist);
+void ex_btc8723b1ant_special_packet_notify(struct btc_coexist *btcoexist,
+ u8 type);
+void ex_btc8723b1ant_bt_info_notify(struct btc_coexist *btcoexist,
+ u8 *tmpbuf, u8 length);
+void ex_btc8723b1ant_rf_status_notify(struct btc_coexist *btcoexist,
+ u8 type);
+void ex_btc8723b1ant_halt_notify(struct btc_coexist *btcoexist);
+void ex_btc8723b1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnpstate);
+void ex_btc8723b1ant_coex_dm_reset(struct btc_coexist *btcoexist);
+void ex_btc8723b1ant_periodical(struct btc_coexist *btcoexist);
+void ex_btc8723b1ant_display_coex_info(struct btc_coexist *btcoexist);
+void ex_btc8723b1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state);
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.h
index 50726beaeead..bc1e3042e271 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.h
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.h
@@ -196,5 +196,8 @@ void ex_btc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist,
void ex_btc8723b2ant_halt_notify(struct btc_coexist *btcoexist);
void ex_btc8723b2ant_periodical(struct btc_coexist *btcoexist);
void ex_btc8723b2ant_display_coex_info(struct btc_coexist *btcoexist);
+void ex_btc8723b2ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state);
+void ex_btc8723b2ant_pre_load_firmware(struct btc_coexist *btcoexist);
+void ex_btc8723b2ant_power_on_setting(struct btc_coexist *btcoexist);
#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.h
index cb32e7a64ae6..b0a6626fbb66 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.h
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.h
@@ -170,21 +170,23 @@ struct coex_sta_8821a_1ant {
* The following is interface which will notify coex module.
*===========================================
*/
-void ex_halbtc8821a1ant_init_hwconfig(struct btc_coexist *btcoexist);
-void ex_halbtc8821a1ant_init_coex_dm(struct btc_coexist *btcoexist);
-void ex_halbtc8821a1ant_ips_notify(struct btc_coexist *btcoexist, u8 type);
-void ex_halbtc8821a1ant_lps_notify(struct btc_coexist *btcoexist, u8 type);
-void ex_halbtc8821a1ant_scan_notify(struct btc_coexist *btcoexist, u8 type);
-void ex_halbtc8821a1ant_connect_notify(struct btc_coexist *btcoexist, u8 type);
-void ex_halbtc8821a1ant_media_status_notify(struct btc_coexist *btcoexist,
- u8 type);
-void ex_halbtc8821a1ant_special_packet_notify(struct btc_coexist *btcoexist,
- u8 type);
-void ex_halbtc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist,
- u8 *tmpbuf, u8 length);
-void ex_halbtc8821a1ant_halt_notify(struct btc_coexist *btcoexist);
-void ex_halbtc8821a1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnpstate);
-void ex_halbtc8821a1ant_periodical(struct btc_coexist *btcoexist);
-void ex_halbtc8821a1ant_display_coex_info(struct btc_coexist *btcoexist);
-void ex_halbtc8821a1ant_dbg_control(struct btc_coexist *btcoexist, u8 op_code,
- u8 op_len, u8 *data);
+void ex_btc8821a1ant_init_hwconfig(struct btc_coexist *btcoexist,
+ bool wifi_only);
+void ex_btc8821a1ant_init_coex_dm(struct btc_coexist *btcoexist);
+void ex_btc8821a1ant_ips_notify(struct btc_coexist *btcoexist, u8 type);
+void ex_btc8821a1ant_lps_notify(struct btc_coexist *btcoexist, u8 type);
+void ex_btc8821a1ant_scan_notify(struct btc_coexist *btcoexist, u8 type);
+void ex_btc8821a1ant_connect_notify(struct btc_coexist *btcoexist, u8 type);
+void ex_btc8821a1ant_media_status_notify(struct btc_coexist *btcoexist,
+ u8 type);
+void ex_btc8821a1ant_special_packet_notify(struct btc_coexist *btcoexist,
+ u8 type);
+void ex_btc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist,
+ u8 *tmpbuf, u8 length);
+void ex_btc8821a1ant_halt_notify(struct btc_coexist *btcoexist);
+void ex_btc8821a1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnpstate);
+void ex_btc8821a1ant_periodical(struct btc_coexist *btcoexist);
+void ex_btc8821a1ant_display_coex_info(struct btc_coexist *btcoexist);
+void ex_btc8821a1ant_dbg_control(struct btc_coexist *btcoexist, u8 op_code,
+ u8 op_len, u8 *data);
+void ex_btc8821a1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state);
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.h
index a1603e2d44e3..a839d5574422 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.h
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.h
@@ -173,58 +173,60 @@ struct coex_sta_8821a_2ant {
*===========================================
*/
void
-ex_halbtc8821a2ant_init_hwconfig(
+ex_btc8821a2ant_init_hwconfig(
struct btc_coexist *btcoexist
);
void
-ex_halbtc8821a2ant_init_coex_dm(
+ex_btc8821a2ant_init_coex_dm(
struct btc_coexist *btcoexist
);
void
-ex_halbtc8821a2ant_ips_notify(
+ex_btc8821a2ant_ips_notify(
struct btc_coexist *btcoexist,
u8 type
);
void
-ex_halbtc8821a2ant_lps_notify(
+ex_btc8821a2ant_lps_notify(
struct btc_coexist *btcoexist,
u8 type
);
void
-ex_halbtc8821a2ant_scan_notify(
+ex_btc8821a2ant_scan_notify(
struct btc_coexist *btcoexist,
u8 type
);
void
-ex_halbtc8821a2ant_connect_notify(
+ex_btc8821a2ant_connect_notify(
struct btc_coexist *btcoexist,
u8 type
);
void
-ex_halbtc8821a2ant_media_status_notify(
+ex_btc8821a2ant_media_status_notify(
struct btc_coexist *btcoexist,
u8 type
);
void
-ex_halbtc8821a2ant_special_packet_notify(
+ex_btc8821a2ant_special_packet_notify(
struct btc_coexist *btcoexist,
u8 type
);
void
-ex_halbtc8821a2ant_bt_info_notify(
+ex_btc8821a2ant_bt_info_notify(
struct btc_coexist *btcoexist,
u8 *tmp_buf,
u8 length
);
void
-ex_halbtc8821a2ant_halt_notify(
+ex_btc8821a2ant_halt_notify(
struct btc_coexist *btcoexist
);
void
-ex_halbtc8821a2ant_periodical(
+ex_btc8821a2ant_periodical(
struct btc_coexist *btcoexist
);
void
-ex_halbtc8821a2ant_display_coex_info(
+ex_btc8821a2ant_display_coex_info(
struct btc_coexist *btcoexist
);
+void ex_btc8821a2ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state);
+void ex_btc8821a2ant_pre_load_firmware(struct btc_coexist *btcoexist);
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
index f13000612913..e6024b013ca5 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
@@ -36,6 +36,20 @@ u32 btc_dbg_type[BTC_MSG_MAX];
/***************************************************
* Debug related function
***************************************************/
+
+const char *const gl_btc_wifi_bw_string[] = {
+ "11bg",
+ "HT20",
+ "HT40",
+ "HT80",
+ "HT160"
+};
+
+const char *const gl_btc_wifi_freq_string[] = {
+ "2.4G",
+ "5G"
+};
+
static bool halbtc_is_bt_coexist_available(struct btc_coexist *btcoexist)
{
if (!btcoexist->binded || NULL == btcoexist->adapter)
@@ -54,28 +68,39 @@ static bool halbtc_is_wifi_busy(struct rtl_priv *rtlpriv)
static void halbtc_dbg_init(void)
{
- u8 i;
-
- for (i = 0; i < BTC_MSG_MAX; i++)
- btc_dbg_type[i] = 0;
-
- btc_dbg_type[BTC_MSG_INTERFACE] =
-/* INTF_INIT | */
-/* INTF_NOTIFY | */
- 0;
+}
- btc_dbg_type[BTC_MSG_ALGORITHM] =
-/* ALGO_BT_RSSI_STATE | */
-/* ALGO_WIFI_RSSI_STATE | */
-/* ALGO_BT_MONITOR | */
-/* ALGO_TRACE | */
-/* ALGO_TRACE_FW | */
-/* ALGO_TRACE_FW_DETAIL | */
-/* ALGO_TRACE_FW_EXEC | */
-/* ALGO_TRACE_SW | */
-/* ALGO_TRACE_SW_DETAIL | */
-/* ALGO_TRACE_SW_EXEC | */
- 0;
+/***************************************************
+ * helper function
+ ***************************************************/
+static bool is_any_client_connect_to_ap(struct btc_coexist *btcoexist)
+{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+ struct rtl_mac *mac = rtl_mac(rtlpriv);
+ struct rtl_sta_info *drv_priv;
+ u8 cnt = 0;
+
+ if (mac->opmode == NL80211_IFTYPE_ADHOC ||
+ mac->opmode == NL80211_IFTYPE_MESH_POINT ||
+ mac->opmode == NL80211_IFTYPE_AP) {
+ if (in_interrupt() > 0) {
+ list_for_each_entry(drv_priv, &rtlpriv->entry_list,
+ list) {
+ cnt++;
+ }
+ } else {
+ spin_lock_bh(&rtlpriv->locks.entry_list_lock);
+ list_for_each_entry(drv_priv, &rtlpriv->entry_list,
+ list) {
+ cnt++;
+ }
+ spin_unlock_bh(&rtlpriv->locks.entry_list_lock);
+ }
+ }
+ if (cnt > 0)
+ return true;
+ else
+ return false;
}
static bool halbtc_is_bt40(struct rtl_priv *adapter)
@@ -188,12 +213,14 @@ static void halbtc_leave_lps(struct btc_coexist *btcoexist)
&ap_enable);
if (ap_enable) {
- pr_info("halbtc_leave_lps()<--dont leave lps under AP mode\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "%s()<--dont leave lps under AP mode\n", __func__);
return;
}
btcoexist->bt_info.bt_ctrl_lps = true;
btcoexist->bt_info.bt_lps_on = false;
+ rtl_lps_leave(rtlpriv->mac80211.hw);
}
static void halbtc_enter_lps(struct btc_coexist *btcoexist)
@@ -209,36 +236,93 @@ static void halbtc_enter_lps(struct btc_coexist *btcoexist)
&ap_enable);
if (ap_enable) {
- pr_info("halbtc_enter_lps()<--dont enter lps under AP mode\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "%s()<--dont enter lps under AP mode\n", __func__);
return;
}
btcoexist->bt_info.bt_ctrl_lps = true;
- btcoexist->bt_info.bt_lps_on = false;
+ btcoexist->bt_info.bt_lps_on = true;
+ rtl_lps_enter(rtlpriv->mac80211.hw);
}
static void halbtc_normal_lps(struct btc_coexist *btcoexist)
{
+ struct rtl_priv *rtlpriv;
+
+ rtlpriv = btcoexist->adapter;
+
if (btcoexist->bt_info.bt_ctrl_lps) {
btcoexist->bt_info.bt_lps_on = false;
+ rtl_lps_leave(rtlpriv->mac80211.hw);
btcoexist->bt_info.bt_ctrl_lps = false;
}
}
-static void halbtc_leave_low_power(void)
+static void halbtc_leave_low_power(struct btc_coexist *btcoexist)
{
}
-static void halbtc_nomal_low_power(void)
+static void halbtc_normal_low_power(struct btc_coexist *btcoexist)
{
}
-static void halbtc_disable_low_power(void)
+static void halbtc_disable_low_power(struct btc_coexist *btcoexist,
+ bool low_pwr_disable)
{
+ /* TODO: original/leave 32k low power */
+ btcoexist->bt_info.bt_disable_low_pwr = low_pwr_disable;
}
-static void halbtc_aggregation_check(void)
+static void halbtc_aggregation_check(struct btc_coexist *btcoexist)
{
+ bool need_to_act = false;
+ static unsigned long pre_time;
+ unsigned long cur_time = 0;
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ /* To void continuous deleteBA=>addBA=>deleteBA=>addBA
+ * This function is not allowed to continuous called
+ * It can only be called after 8 seconds
+ */
+
+ cur_time = jiffies;
+ if (jiffies_to_msecs(cur_time - pre_time) <= 8000) {
+ /* over 8 seconds you can execute this function again. */
+ return;
+ }
+ pre_time = cur_time;
+
+ if (btcoexist->bt_info.reject_agg_pkt) {
+ need_to_act = true;
+ btcoexist->bt_info.pre_reject_agg_pkt =
+ btcoexist->bt_info.reject_agg_pkt;
+ } else {
+ if (btcoexist->bt_info.pre_reject_agg_pkt) {
+ need_to_act = true;
+ btcoexist->bt_info.pre_reject_agg_pkt =
+ btcoexist->bt_info.reject_agg_pkt;
+ }
+
+ if (btcoexist->bt_info.pre_bt_ctrl_agg_buf_size !=
+ btcoexist->bt_info.bt_ctrl_agg_buf_size) {
+ need_to_act = true;
+ btcoexist->bt_info.pre_bt_ctrl_agg_buf_size =
+ btcoexist->bt_info.bt_ctrl_agg_buf_size;
+ }
+
+ if (btcoexist->bt_info.bt_ctrl_agg_buf_size) {
+ if (btcoexist->bt_info.pre_agg_buf_size !=
+ btcoexist->bt_info.agg_buf_size) {
+ need_to_act = true;
+ }
+ btcoexist->bt_info.pre_agg_buf_size =
+ btcoexist->bt_info.agg_buf_size;
+ }
+
+ if (need_to_act)
+ rtl_rx_ampdu_apply(rtlpriv);
+ }
}
static u32 halbtc_get_bt_patch_version(struct btc_coexist *btcoexist)
@@ -246,10 +330,37 @@ static u32 halbtc_get_bt_patch_version(struct btc_coexist *btcoexist)
return 0;
}
-static s32 halbtc_get_wifi_rssi(struct rtl_priv *adapter)
+u32 halbtc_get_wifi_link_status(struct btc_coexist *btcoexist)
{
- struct rtl_priv *rtlpriv = adapter;
- s32 undec_sm_pwdb = 0;
+ /* return value:
+ * [31:16] => connected port number
+ * [15:0] => port connected bit define
+ */
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+ struct rtl_mac *mac = rtl_mac(rtlpriv);
+ u32 ret_val = 0;
+ u32 port_connected_status = 0, num_of_connected_port = 0;
+
+ if (mac->opmode == NL80211_IFTYPE_STATION &&
+ mac->link_state >= MAC80211_LINKED) {
+ port_connected_status |= WIFI_STA_CONNECTED;
+ num_of_connected_port++;
+ }
+ /* AP & ADHOC & MESH */
+ if (is_any_client_connect_to_ap(btcoexist)) {
+ port_connected_status |= WIFI_AP_CONNECTED;
+ num_of_connected_port++;
+ }
+ /* TODO: P2P Connected Status */
+
+ ret_val = (num_of_connected_port << 16) | port_connected_status;
+
+ return ret_val;
+}
+
+static s32 halbtc_get_wifi_rssi(struct rtl_priv *rtlpriv)
+{
+ int undec_sm_pwdb = 0;
if (rtlpriv->mac80211.link_state >= MAC80211_LINKED)
undec_sm_pwdb = rtlpriv->dm.undec_sm_pwdb;
@@ -270,6 +381,7 @@ static bool halbtc_get(void *void_btcoexist, u8 get_type, void *out_buf)
u32 *u32_tmp = (u32 *)out_buf;
u8 *u8_tmp = (u8 *)out_buf;
bool tmp = false;
+ bool ret = true;
if (!halbtc_is_bt_coexist_available(btcoexist))
return false;
@@ -277,12 +389,17 @@ static bool halbtc_get(void *void_btcoexist, u8 get_type, void *out_buf)
switch (get_type) {
case BTC_GET_BL_HS_OPERATION:
*bool_tmp = false;
+ ret = false;
break;
case BTC_GET_BL_HS_CONNECTING:
*bool_tmp = false;
+ ret = false;
break;
case BTC_GET_BL_WIFI_CONNECTED:
- if (rtlpriv->mac80211.link_state >= MAC80211_LINKED)
+ if (rtlpriv->mac80211.opmode == NL80211_IFTYPE_STATION &&
+ rtlpriv->mac80211.link_state >= MAC80211_LINKED)
+ tmp = true;
+ if (is_any_client_connect_to_ap(btcoexist))
tmp = true;
*bool_tmp = tmp;
break;
@@ -304,32 +421,26 @@ static bool halbtc_get(void *void_btcoexist, u8 get_type, void *out_buf)
else
*bool_tmp = false;
break;
- case BTC_GET_BL_WIFI_ROAM: /*TODO*/
+ case BTC_GET_BL_WIFI_ROAM:
if (mac->link_state == MAC80211_LINKING)
*bool_tmp = true;
else
*bool_tmp = false;
break;
- case BTC_GET_BL_WIFI_4_WAY_PROGRESS: /*TODO*/
- *bool_tmp = false;
-
+ case BTC_GET_BL_WIFI_4_WAY_PROGRESS:
+ *bool_tmp = rtlpriv->btcoexist.btc_info.in_4way;
break;
case BTC_GET_BL_WIFI_UNDER_5G:
- *bool_tmp = false; /*TODO*/
-
- case BTC_GET_BL_WIFI_DHCP: /*TODO*/
- break;
- case BTC_GET_BL_WIFI_SOFTAP_IDLE:
- *bool_tmp = true;
- break;
- case BTC_GET_BL_WIFI_SOFTAP_LINKING:
- *bool_tmp = false;
- break;
- case BTC_GET_BL_WIFI_IN_EARLY_SUSPEND:
- *bool_tmp = false;
+ if (rtlhal->current_bandtype == BAND_ON_5G)
+ *bool_tmp = true;
+ else
+ *bool_tmp = false;
break;
case BTC_GET_BL_WIFI_AP_MODE_ENABLE:
- *bool_tmp = false;
+ if (mac->opmode == NL80211_IFTYPE_AP)
+ *bool_tmp = true;
+ else
+ *bool_tmp = false;
break;
case BTC_GET_BL_WIFI_ENABLE_ENCRYPTION:
if (NO_ENCRYPTION == rtlpriv->sec.pairwise_enc_algorithm)
@@ -338,16 +449,26 @@ static bool halbtc_get(void *void_btcoexist, u8 get_type, void *out_buf)
*bool_tmp = true;
break;
case BTC_GET_BL_WIFI_UNDER_B_MODE:
- *bool_tmp = false; /*TODO*/
+ if (rtlpriv->mac80211.mode == WIRELESS_MODE_B)
+ *bool_tmp = true;
+ else
+ *bool_tmp = false;
break;
case BTC_GET_BL_EXT_SWITCH:
*bool_tmp = false;
break;
+ case BTC_GET_BL_WIFI_IS_IN_MP_MODE:
+ *bool_tmp = false;
+ break;
+ case BTC_GET_BL_IS_ASUS_8723B:
+ *bool_tmp = false;
+ break;
case BTC_GET_S4_WIFI_RSSI:
*s32_tmp = halbtc_get_wifi_rssi(rtlpriv);
break;
- case BTC_GET_S4_HS_RSSI: /*TODO*/
- *s32_tmp = halbtc_get_wifi_rssi(rtlpriv);
+ case BTC_GET_S4_HS_RSSI:
+ *s32_tmp = 0;
+ ret = false;
break;
case BTC_GET_U4_WIFI_BW:
*u32_tmp = halbtc_get_wifi_bw(btcoexist);
@@ -359,7 +480,10 @@ static bool halbtc_get(void *void_btcoexist, u8 get_type, void *out_buf)
*u32_tmp = BTC_WIFI_TRAFFIC_RX;
break;
case BTC_GET_U4_WIFI_FW_VER:
- *u32_tmp = rtlhal->fw_version;
+ *u32_tmp = (rtlhal->fw_version << 16) | rtlhal->fw_subversion;
+ break;
+ case BTC_GET_U4_WIFI_LINK_STATUS:
+ *u32_tmp = halbtc_get_wifi_link_status(btcoexist);
break;
case BTC_GET_U4_BT_PATCH_VER:
*u32_tmp = halbtc_get_bt_patch_version(btcoexist);
@@ -374,10 +498,17 @@ static bool halbtc_get(void *void_btcoexist, u8 get_type, void *out_buf)
*u8_tmp = halbtc_get_wifi_central_chnl(btcoexist);
break;
case BTC_GET_U1_WIFI_HS_CHNL:
- *u8_tmp = 1;/*BT_OperateChnl(rtlpriv);*/
+ *u8_tmp = 0;
+ ret = false;
break;
- case BTC_GET_U1_MAC_PHY_MODE:
- *u8_tmp = BTC_MP_UNKNOWN;
+ case BTC_GET_U1_AP_NUM:
+ *u8_tmp = rtlpriv->btcoexist.btc_info.ap_num;
+ break;
+ case BTC_GET_U1_ANT_TYPE:
+ *u8_tmp = (u8)BTC_ANT_TYPE_0;
+ break;
+ case BTC_GET_U1_IOT_PEER:
+ *u8_tmp = 0;
break;
/************* 1Ant **************/
@@ -386,10 +517,11 @@ static bool halbtc_get(void *void_btcoexist, u8 get_type, void *out_buf)
break;
default:
+ ret = false;
break;
}
- return true;
+ return ret;
}
static bool halbtc_set(void *void_btcoexist, u8 set_type, void *in_buf)
@@ -398,6 +530,7 @@ static bool halbtc_set(void *void_btcoexist, u8 set_type, void *in_buf)
bool *bool_tmp = (bool *)in_buf;
u8 *u8_tmp = (u8 *)in_buf;
u32 *u32_tmp = (u32 *)in_buf;
+ bool ret = true;
if (!halbtc_is_bt_coexist_available(btcoexist))
return false;
@@ -420,11 +553,17 @@ static bool halbtc_set(void *void_btcoexist, u8 set_type, void *in_buf)
btcoexist->bt_info.reject_agg_pkt = *bool_tmp;
break;
case BTC_SET_BL_BT_CTRL_AGG_SIZE:
- btcoexist->bt_info.bt_ctrl_buf_size = *bool_tmp;
+ btcoexist->bt_info.bt_ctrl_agg_buf_size = *bool_tmp;
break;
case BTC_SET_BL_INC_SCAN_DEV_NUM:
btcoexist->bt_info.increase_scan_dev_num = *bool_tmp;
break;
+ case BTC_SET_BL_BT_TX_RX_MASK:
+ btcoexist->bt_info.bt_tx_rx_mask = *bool_tmp;
+ break;
+ case BTC_SET_BL_MIRACAST_PLUS_BT:
+ btcoexist->bt_info.miracast_plus_bt = *bool_tmp;
+ break;
/* set some u1Byte type variables. */
case BTC_SET_U1_RSSI_ADJ_VAL_FOR_AGC_TABLE_ON:
btcoexist->bt_info.rssi_adjust_for_agc_table_on = *u8_tmp;
@@ -432,25 +571,25 @@ static bool halbtc_set(void *void_btcoexist, u8 set_type, void *in_buf)
case BTC_SET_U1_AGG_BUF_SIZE:
btcoexist->bt_info.agg_buf_size = *u8_tmp;
break;
- /* the following are some action which will be triggered */
+
+ /* the following are some action which will be triggered */
case BTC_SET_ACT_GET_BT_RSSI:
- /*BTHCI_SendGetBtRssiEvent(rtlpriv);*/
+ ret = false;
break;
case BTC_SET_ACT_AGGREGATE_CTRL:
- halbtc_aggregation_check();
+ halbtc_aggregation_check(btcoexist);
break;
- /* 1Ant */
+ /* 1Ant */
case BTC_SET_U1_RSSI_ADJ_VAL_FOR_1ANT_COEX_TYPE:
btcoexist->bt_info.rssi_adjust_for_1ant_coex_type = *u8_tmp;
break;
case BTC_SET_UI_SCAN_SIG_COMPENSATION:
- /* rtlpriv->mlmepriv.scan_compensation = *u8_tmp; */
break;
- case BTC_SET_U1_1ANT_LPS:
+ case BTC_SET_U1_LPS_VAL:
btcoexist->bt_info.lps_val = *u8_tmp;
break;
- case BTC_SET_U1_1ANT_RPWM:
+ case BTC_SET_U1_RPWM_VAL:
btcoexist->bt_info.rpwm_val = *u8_tmp;
break;
/* the following are some action which will be triggered */
@@ -464,41 +603,24 @@ static bool halbtc_set(void *void_btcoexist, u8 set_type, void *in_buf)
halbtc_normal_lps(btcoexist);
break;
case BTC_SET_ACT_DISABLE_LOW_POWER:
- halbtc_disable_low_power();
+ halbtc_disable_low_power(btcoexist, *bool_tmp);
break;
case BTC_SET_ACT_UPDATE_RAMASK:
btcoexist->bt_info.ra_mask = *u32_tmp;
break;
case BTC_SET_ACT_SEND_MIMO_PS:
break;
- case BTC_SET_ACT_INC_FORCE_EXEC_PWR_CMD_CNT:
- btcoexist->bt_info.force_exec_pwr_cmd_cnt++;
- break;
case BTC_SET_ACT_CTRL_BT_INFO: /*wait for 8812/8821*/
break;
case BTC_SET_ACT_CTRL_BT_COEX:
break;
+ case BTC_SET_ACT_CTRL_8723B_ANT:
+ break;
default:
break;
}
- return true;
-}
-
-static void halbtc_display_coex_statistics(struct btc_coexist *btcoexist)
-{
-}
-
-static void halbtc_display_bt_link_info(struct btc_coexist *btcoexist)
-{
-}
-
-static void halbtc_display_bt_fw_info(struct btc_coexist *btcoexist)
-{
-}
-
-static void halbtc_display_fw_pwr_mode_cmd(struct btc_coexist *btcoexist)
-{
+ return ret;
}
/************************************************************
@@ -574,6 +696,35 @@ static void halbtc_write_4byte(void *bt_context, u32 reg_addr, u32 data)
rtl_write_dword(rtlpriv, reg_addr, data);
}
+void halbtc_write_local_reg_1byte(void *btc_context, u32 reg_addr, u8 data)
+{
+ struct btc_coexist *btcoexist = (struct btc_coexist *)btc_context;
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ if (btcoexist->chip_interface == BTC_INTF_SDIO)
+ ;
+ else if (btcoexist->chip_interface == BTC_INTF_PCI)
+ rtl_write_byte(rtlpriv, reg_addr, data);
+ else if (btcoexist->chip_interface == BTC_INTF_USB)
+ rtl_write_byte(rtlpriv, reg_addr, data);
+}
+
+void halbtc_set_macreg(void *btc_context, u32 reg_addr, u32 bit_mask, u32 data)
+{
+ struct btc_coexist *btcoexist = (struct btc_coexist *)btc_context;
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ rtl_set_bbreg(rtlpriv->mac80211.hw, reg_addr, bit_mask, data);
+}
+
+u32 halbtc_get_macreg(void *btc_context, u32 reg_addr, u32 bit_mask)
+{
+ struct btc_coexist *btcoexist = (struct btc_coexist *)btc_context;
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ return rtl_get_bbreg(rtlpriv->mac80211.hw, reg_addr, bit_mask);
+}
+
static void halbtc_set_bbreg(void *bt_context, u32 reg_addr, u32 bit_mask,
u32 data)
{
@@ -619,50 +770,64 @@ static void halbtc_fill_h2c_cmd(void *bt_context, u8 element_id,
cmd_len, cmd_buf);
}
-static void halbtc_display_dbg_msg(void *bt_context, u8 disp_type)
+void halbtc_set_bt_reg(void *btc_context, u8 reg_type, u32 offset, u32 set_val)
{
- struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
- switch (disp_type) {
- case BTC_DBG_DISP_COEX_STATISTICS:
- halbtc_display_coex_statistics(btcoexist);
- break;
- case BTC_DBG_DISP_BT_LINK_INFO:
- halbtc_display_bt_link_info(btcoexist);
- break;
- case BTC_DBG_DISP_BT_FW_VER:
- halbtc_display_bt_fw_info(btcoexist);
- break;
- case BTC_DBG_DISP_FW_PWR_MODE_CMD:
- halbtc_display_fw_pwr_mode_cmd(btcoexist);
- break;
- default:
- break;
+ struct btc_coexist *btcoexist = (struct btc_coexist *)btc_context;
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+ u8 cmd_buffer1[4] = {0};
+ u8 cmd_buffer2[4] = {0};
+ u8 *addr_to_set = (u8 *)&offset;
+ u8 *value_to_set = (u8 *)&set_val;
+ u8 oper_ver = 0;
+ u8 req_num = 0;
+
+ if (IS_HARDWARE_TYPE_8723B(btcoexist->adapter)) {
+ cmd_buffer1[0] |= (oper_ver & 0x0f); /* Set OperVer */
+ cmd_buffer1[0] |= ((req_num << 4) & 0xf0); /* Set ReqNum */
+ cmd_buffer1[1] = 0x0d; /* OpCode: BT_LO_OP_WRITE_REG_VALUE */
+ cmd_buffer1[2] = value_to_set[0]; /* Set WriteRegValue */
+ rtlpriv->cfg->ops->fill_h2c_cmd(rtlpriv->mac80211.hw, 0x67, 4,
+ &cmd_buffer1[0]);
+
+ msleep(200);
+ req_num++;
+
+ cmd_buffer2[0] |= (oper_ver & 0x0f); /* Set OperVer */
+ cmd_buffer2[0] |= ((req_num << 4) & 0xf0); /* Set ReqNum */
+ cmd_buffer2[1] = 0x0c; /* OpCode: BT_LO_OP_WRITE_REG_ADDR */
+ cmd_buffer2[3] = addr_to_set[0]; /* Set WriteRegAddr */
+ rtlpriv->cfg->ops->fill_h2c_cmd(rtlpriv->mac80211.hw, 0x67, 4,
+ &cmd_buffer2[0]);
}
}
+bool halbtc_under_ips(struct btc_coexist *btcoexist)
+{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtlpriv);
+ enum rf_pwrstate rtstate;
+
+ if (ppsc->inactiveps) {
+ rtstate = ppsc->rfpwr_state;
+
+ if (rtstate != ERFON &&
+ ppsc->rfoff_reason == RF_CHANGE_BY_IPS) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
/*****************************************************************
* Extern functions called by other module
*****************************************************************/
-bool exhalbtc_initlize_variables(struct rtl_priv *adapter)
+bool exhalbtc_initlize_variables(void)
{
struct btc_coexist *btcoexist = &gl_bt_coexist;
- btcoexist->statistics.cnt_bind++;
-
halbtc_dbg_init();
- if (btcoexist->binded)
- return false;
- else
- btcoexist->binded = true;
-
- btcoexist->chip_interface = BTC_INTF_UNKNOWN;
-
- if (NULL == btcoexist->adapter)
- btcoexist->adapter = adapter;
-
- btcoexist->stack_info.profile_notified = false;
-
btcoexist->btc_read_1byte = halbtc_read_1byte;
btcoexist->btc_write_1byte = halbtc_write_1byte;
btcoexist->btc_write_1byte_bitmask = halbtc_bitmask_write_1byte;
@@ -670,6 +835,7 @@ bool exhalbtc_initlize_variables(struct rtl_priv *adapter)
btcoexist->btc_write_2byte = halbtc_write_2byte;
btcoexist->btc_read_4byte = halbtc_read_4byte;
btcoexist->btc_write_4byte = halbtc_write_4byte;
+ btcoexist->btc_write_local_reg_1byte = halbtc_write_local_reg_1byte;
btcoexist->btc_set_bb_reg = halbtc_set_bbreg;
btcoexist->btc_get_bb_reg = halbtc_get_bbreg;
@@ -678,10 +844,11 @@ bool exhalbtc_initlize_variables(struct rtl_priv *adapter)
btcoexist->btc_get_rf_reg = halbtc_get_rfreg;
btcoexist->btc_fill_h2c = halbtc_fill_h2c_cmd;
- btcoexist->btc_disp_dbg_msg = halbtc_display_dbg_msg;
btcoexist->btc_get = halbtc_get;
btcoexist->btc_set = halbtc_set;
+ btcoexist->btc_set_bt_reg = halbtc_set_bt_reg;
+
btcoexist->bt_info.bt_ctrl_buf_size = false;
btcoexist->bt_info.agg_buf_size = 5;
@@ -690,40 +857,148 @@ bool exhalbtc_initlize_variables(struct rtl_priv *adapter)
return true;
}
-void exhalbtc_init_hw_config(struct btc_coexist *btcoexist)
+bool exhalbtc_bind_bt_coex_withadapter(void *adapter)
{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ struct btc_coexist *btcoexist = &gl_bt_coexist;
+ struct rtl_priv *rtlpriv = adapter;
+ u8 ant_num = 2, chip_type, single_ant_path = 0;
+
+ if (btcoexist->binded)
+ return false;
+
+ switch (rtlpriv->rtlhal.interface) {
+ case INTF_PCI:
+ btcoexist->chip_interface = BTC_INTF_PCI;
+ break;
+ case INTF_USB:
+ btcoexist->chip_interface = BTC_INTF_USB;
+ break;
+ default:
+ btcoexist->chip_interface = BTC_INTF_UNKNOWN;
+ break;
+ }
+
+ btcoexist->binded = true;
+ btcoexist->statistics.cnt_bind++;
+
+ btcoexist->adapter = adapter;
+
+ btcoexist->stack_info.profile_notified = false;
+
+ btcoexist->bt_info.bt_ctrl_agg_buf_size = false;
+ btcoexist->bt_info.agg_buf_size = 5;
+
+ btcoexist->bt_info.increase_scan_dev_num = false;
+ btcoexist->bt_info.miracast_plus_bt = false;
+
+ chip_type = rtl_get_hwpg_bt_type(rtlpriv);
+ exhalbtc_set_chip_type(chip_type);
+ ant_num = rtl_get_hwpg_ant_num(rtlpriv);
+ exhalbtc_set_ant_num(rtlpriv, BT_COEX_ANT_TYPE_PG, ant_num);
+ /* set default antenna position to main port */
+ btcoexist->board_info.btdm_ant_pos = BTC_ANTENNA_AT_MAIN_PORT;
+
+ single_ant_path = rtl_get_hwpg_single_ant_path(rtlpriv);
+ exhalbtc_set_single_ant_path(single_ant_path);
+
+ if (rtl_get_hwpg_package_type(rtlpriv) == 0)
+ btcoexist->board_info.tfbga_package = false;
+ else if (rtl_get_hwpg_package_type(rtlpriv) == 1)
+ btcoexist->board_info.tfbga_package = false;
+ else
+ btcoexist->board_info.tfbga_package = true;
+
+ if (btcoexist->board_info.tfbga_package)
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Package Type = TFBGA\n");
+ else
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Package Type = Non-TFBGA\n");
+
+ return true;
+}
+
+void exhalbtc_power_on_setting(struct btc_coexist *btcoexist)
+{
+ if (!halbtc_is_bt_coexist_available(btcoexist))
+ return;
+
+ btcoexist->statistics.cnt_power_on++;
+
+ if (IS_HARDWARE_TYPE_8723B(btcoexist->adapter)) {
+ if (btcoexist->board_info.btdm_ant_num == 2)
+ ex_btc8723b2ant_power_on_setting(btcoexist);
+ else if (btcoexist->board_info.btdm_ant_num == 1)
+ ex_btc8723b1ant_power_on_setting(btcoexist);
+ }
+}
+
+void exhalbtc_pre_load_firmware(struct btc_coexist *btcoexist)
+{
+ if (!halbtc_is_bt_coexist_available(btcoexist))
+ return;
+
+ btcoexist->statistics.cnt_pre_load_firmware++;
+
+ if (IS_HARDWARE_TYPE_8723B(btcoexist->adapter)) {
+ if (btcoexist->board_info.btdm_ant_num == 2)
+ ex_btc8723b2ant_pre_load_firmware(btcoexist);
+ }
+}
+
+void exhalbtc_init_hw_config(struct btc_coexist *btcoexist, bool wifi_only)
+{
if (!halbtc_is_bt_coexist_available(btcoexist))
return;
btcoexist->statistics.cnt_init_hw_config++;
- if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE)
- ex_btc8723b2ant_init_hwconfig(btcoexist);
+ if (IS_HARDWARE_TYPE_8821(btcoexist->adapter)) {
+ if (btcoexist->board_info.btdm_ant_num == 2)
+ ex_btc8821a2ant_init_hwconfig(btcoexist);
+ else if (btcoexist->board_info.btdm_ant_num == 1)
+ ex_btc8821a1ant_init_hwconfig(btcoexist, wifi_only);
+ } else if (IS_HARDWARE_TYPE_8723B(btcoexist->adapter)) {
+ if (btcoexist->board_info.btdm_ant_num == 2)
+ ex_btc8723b2ant_init_hwconfig(btcoexist);
+ else if (btcoexist->board_info.btdm_ant_num == 1)
+ ex_btc8723b1ant_init_hwconfig(btcoexist, wifi_only);
+ } else if (IS_HARDWARE_TYPE_8723A(btcoexist->adapter)) {
+ /* 8723A has no this function */
+ } else if (IS_HARDWARE_TYPE_8192E(btcoexist->adapter)) {
+ if (btcoexist->board_info.btdm_ant_num == 2)
+ ex_btc8192e2ant_init_hwconfig(btcoexist);
+ }
}
void exhalbtc_init_coex_dm(struct btc_coexist *btcoexist)
{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
-
if (!halbtc_is_bt_coexist_available(btcoexist))
return;
btcoexist->statistics.cnt_init_coex_dm++;
- if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE)
- ex_btc8723b2ant_init_coex_dm(btcoexist);
+ if (IS_HARDWARE_TYPE_8821(btcoexist->adapter)) {
+ if (btcoexist->board_info.btdm_ant_num == 2)
+ ex_btc8821a2ant_init_coex_dm(btcoexist);
+ else if (btcoexist->board_info.btdm_ant_num == 1)
+ ex_btc8821a1ant_init_coex_dm(btcoexist);
+ } else if (IS_HARDWARE_TYPE_8723B(btcoexist->adapter)) {
+ if (btcoexist->board_info.btdm_ant_num == 2)
+ ex_btc8723b2ant_init_coex_dm(btcoexist);
+ else if (btcoexist->board_info.btdm_ant_num == 1)
+ ex_btc8723b1ant_init_coex_dm(btcoexist);
+ } else if (IS_HARDWARE_TYPE_8192E(btcoexist->adapter)) {
+ if (btcoexist->board_info.btdm_ant_num == 2)
+ ex_btc8192e2ant_init_coex_dm(btcoexist);
+ }
btcoexist->initilized = true;
}
void exhalbtc_ips_notify(struct btc_coexist *btcoexist, u8 type)
{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
u8 ips_type;
if (!halbtc_is_bt_coexist_available(btcoexist))
@@ -737,18 +1012,28 @@ void exhalbtc_ips_notify(struct btc_coexist *btcoexist, u8 type)
else
ips_type = BTC_IPS_LEAVE;
- halbtc_leave_low_power();
-
- if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE)
- ex_btc8723b2ant_ips_notify(btcoexist, ips_type);
+ halbtc_leave_low_power(btcoexist);
+
+ if (IS_HARDWARE_TYPE_8821(btcoexist->adapter)) {
+ if (btcoexist->board_info.btdm_ant_num == 2)
+ ex_btc8821a2ant_ips_notify(btcoexist, ips_type);
+ else if (btcoexist->board_info.btdm_ant_num == 1)
+ ex_btc8821a1ant_ips_notify(btcoexist, ips_type);
+ } else if (IS_HARDWARE_TYPE_8723B(btcoexist->adapter)) {
+ if (btcoexist->board_info.btdm_ant_num == 2)
+ ex_btc8723b2ant_ips_notify(btcoexist, ips_type);
+ else if (btcoexist->board_info.btdm_ant_num == 1)
+ ex_btc8723b1ant_ips_notify(btcoexist, ips_type);
+ } else if (IS_HARDWARE_TYPE_8192E(btcoexist->adapter)) {
+ if (btcoexist->board_info.btdm_ant_num == 2)
+ ex_btc8192e2ant_ips_notify(btcoexist, ips_type);
+ }
- halbtc_nomal_low_power();
+ halbtc_normal_low_power(btcoexist);
}
void exhalbtc_lps_notify(struct btc_coexist *btcoexist, u8 type)
{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
u8 lps_type;
if (!halbtc_is_bt_coexist_available(btcoexist))
@@ -762,14 +1047,24 @@ void exhalbtc_lps_notify(struct btc_coexist *btcoexist, u8 type)
else
lps_type = BTC_LPS_ENABLE;
- if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE)
- ex_btc8723b2ant_lps_notify(btcoexist, lps_type);
+ if (IS_HARDWARE_TYPE_8821(btcoexist->adapter)) {
+ if (btcoexist->board_info.btdm_ant_num == 2)
+ ex_btc8821a2ant_lps_notify(btcoexist, lps_type);
+ else if (btcoexist->board_info.btdm_ant_num == 1)
+ ex_btc8821a1ant_lps_notify(btcoexist, lps_type);
+ } else if (IS_HARDWARE_TYPE_8723B(btcoexist->adapter)) {
+ if (btcoexist->board_info.btdm_ant_num == 2)
+ ex_btc8723b2ant_lps_notify(btcoexist, lps_type);
+ else if (btcoexist->board_info.btdm_ant_num == 1)
+ ex_btc8723b1ant_lps_notify(btcoexist, lps_type);
+ } else if (IS_HARDWARE_TYPE_8192E(btcoexist->adapter)) {
+ if (btcoexist->board_info.btdm_ant_num == 2)
+ ex_btc8192e2ant_lps_notify(btcoexist, lps_type);
+ }
}
void exhalbtc_scan_notify(struct btc_coexist *btcoexist, u8 type)
{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
u8 scan_type;
if (!halbtc_is_bt_coexist_available(btcoexist))
@@ -783,18 +1078,28 @@ void exhalbtc_scan_notify(struct btc_coexist *btcoexist, u8 type)
else
scan_type = BTC_SCAN_FINISH;
- halbtc_leave_low_power();
-
- if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE)
- ex_btc8723b2ant_scan_notify(btcoexist, scan_type);
+ halbtc_leave_low_power(btcoexist);
+
+ if (IS_HARDWARE_TYPE_8821(btcoexist->adapter)) {
+ if (btcoexist->board_info.btdm_ant_num == 2)
+ ex_btc8821a2ant_scan_notify(btcoexist, scan_type);
+ else if (btcoexist->board_info.btdm_ant_num == 1)
+ ex_btc8821a1ant_scan_notify(btcoexist, scan_type);
+ } else if (IS_HARDWARE_TYPE_8723B(btcoexist->adapter)) {
+ if (btcoexist->board_info.btdm_ant_num == 2)
+ ex_btc8723b2ant_scan_notify(btcoexist, scan_type);
+ else if (btcoexist->board_info.btdm_ant_num == 1)
+ ex_btc8723b1ant_scan_notify(btcoexist, scan_type);
+ } else if (IS_HARDWARE_TYPE_8192E(btcoexist->adapter)) {
+ if (btcoexist->board_info.btdm_ant_num == 2)
+ ex_btc8192e2ant_scan_notify(btcoexist, scan_type);
+ }
- halbtc_nomal_low_power();
+ halbtc_normal_low_power(btcoexist);
}
void exhalbtc_connect_notify(struct btc_coexist *btcoexist, u8 action)
{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
u8 asso_type;
if (!halbtc_is_bt_coexist_available(btcoexist))
@@ -808,10 +1113,24 @@ void exhalbtc_connect_notify(struct btc_coexist *btcoexist, u8 action)
else
asso_type = BTC_ASSOCIATE_FINISH;
- halbtc_leave_low_power();
+ halbtc_leave_low_power(btcoexist);
+
+ if (IS_HARDWARE_TYPE_8821(btcoexist->adapter)) {
+ if (btcoexist->board_info.btdm_ant_num == 2)
+ ex_btc8821a2ant_connect_notify(btcoexist, asso_type);
+ else if (btcoexist->board_info.btdm_ant_num == 1)
+ ex_btc8821a1ant_connect_notify(btcoexist, asso_type);
+ } else if (IS_HARDWARE_TYPE_8723B(btcoexist->adapter)) {
+ if (btcoexist->board_info.btdm_ant_num == 2)
+ ex_btc8723b2ant_connect_notify(btcoexist, asso_type);
+ else if (btcoexist->board_info.btdm_ant_num == 1)
+ ex_btc8723b1ant_connect_notify(btcoexist, asso_type);
+ } else if (IS_HARDWARE_TYPE_8192E(btcoexist->adapter)) {
+ if (btcoexist->board_info.btdm_ant_num == 2)
+ ex_btc8192e2ant_connect_notify(btcoexist, asso_type);
+ }
- if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE)
- ex_btc8723b2ant_connect_notify(btcoexist, asso_type);
+ halbtc_normal_low_power(btcoexist);
}
void exhalbtc_mediastatus_notify(struct btc_coexist *btcoexist,
@@ -830,15 +1149,28 @@ void exhalbtc_mediastatus_notify(struct btc_coexist *btcoexist,
else
status = BTC_MEDIA_DISCONNECT;
- halbtc_leave_low_power();
+ halbtc_leave_low_power(btcoexist);
+
+ if (IS_HARDWARE_TYPE_8821(btcoexist->adapter)) {
+ if (btcoexist->board_info.btdm_ant_num == 2)
+ ex_btc8821a2ant_media_status_notify(btcoexist, status);
+ else if (btcoexist->board_info.btdm_ant_num == 1)
+ ex_btc8821a1ant_media_status_notify(btcoexist, status);
+ } else if (IS_HARDWARE_TYPE_8723B(btcoexist->adapter)) {
+ if (btcoexist->board_info.btdm_ant_num == 2)
+ ex_btc8723b2ant_media_status_notify(btcoexist, status);
+ else if (btcoexist->board_info.btdm_ant_num == 1)
+ ex_btc8723b1ant_media_status_notify(btcoexist, status);
+ } else if (IS_HARDWARE_TYPE_8192E(btcoexist->adapter)) {
+ if (btcoexist->board_info.btdm_ant_num == 2)
+ ex_btc8192e2ant_media_status_notify(btcoexist, status);
+ }
- halbtc_nomal_low_power();
+ halbtc_normal_low_power(btcoexist);
}
void exhalbtc_special_packet_notify(struct btc_coexist *btcoexist, u8 pkt_type)
{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
u8 packet_type;
if (!halbtc_is_bt_coexist_available(btcoexist))
@@ -847,28 +1179,85 @@ void exhalbtc_special_packet_notify(struct btc_coexist *btcoexist, u8 pkt_type)
if (btcoexist->manual_control)
return;
- packet_type = BTC_PACKET_DHCP;
-
- halbtc_leave_low_power();
+ if (pkt_type == PACKET_DHCP) {
+ packet_type = BTC_PACKET_DHCP;
+ } else if (pkt_type == PACKET_EAPOL) {
+ packet_type = BTC_PACKET_EAPOL;
+ } else if (pkt_type == PACKET_ARP) {
+ packet_type = BTC_PACKET_ARP;
+ } else {
+ packet_type = BTC_PACKET_UNKNOWN;
+ return;
+ }
- if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE)
- ex_btc8723b2ant_special_packet_notify(btcoexist,
- packet_type);
+ halbtc_leave_low_power(btcoexist);
+
+ if (IS_HARDWARE_TYPE_8821(btcoexist->adapter)) {
+ if (btcoexist->board_info.btdm_ant_num == 2)
+ ex_btc8821a2ant_special_packet_notify(btcoexist,
+ packet_type);
+ else if (btcoexist->board_info.btdm_ant_num == 1)
+ ex_btc8821a1ant_special_packet_notify(btcoexist,
+ packet_type);
+ } else if (IS_HARDWARE_TYPE_8723B(btcoexist->adapter)) {
+ if (btcoexist->board_info.btdm_ant_num == 2)
+ ex_btc8723b2ant_special_packet_notify(btcoexist,
+ packet_type);
+ else if (btcoexist->board_info.btdm_ant_num == 1)
+ ex_btc8723b1ant_special_packet_notify(btcoexist,
+ packet_type);
+ } else if (IS_HARDWARE_TYPE_8192E(btcoexist->adapter)) {
+ if (btcoexist->board_info.btdm_ant_num == 2)
+ ex_btc8192e2ant_special_packet_notify(btcoexist,
+ packet_type);
+ }
- halbtc_nomal_low_power();
+ halbtc_normal_low_power(btcoexist);
}
void exhalbtc_bt_info_notify(struct btc_coexist *btcoexist,
u8 *tmp_buf, u8 length)
{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
if (!halbtc_is_bt_coexist_available(btcoexist))
return;
btcoexist->statistics.cnt_bt_info_notify++;
- if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE)
- ex_btc8723b2ant_bt_info_notify(btcoexist, tmp_buf, length);
+ halbtc_leave_low_power(btcoexist);
+
+ if (IS_HARDWARE_TYPE_8821(btcoexist->adapter)) {
+ if (btcoexist->board_info.btdm_ant_num == 2)
+ ex_btc8821a2ant_bt_info_notify(btcoexist, tmp_buf,
+ length);
+ else if (btcoexist->board_info.btdm_ant_num == 1)
+ ex_btc8821a1ant_bt_info_notify(btcoexist, tmp_buf,
+ length);
+ } else if (IS_HARDWARE_TYPE_8723B(btcoexist->adapter)) {
+ if (btcoexist->board_info.btdm_ant_num == 2)
+ ex_btc8723b2ant_bt_info_notify(btcoexist, tmp_buf,
+ length);
+ else if (btcoexist->board_info.btdm_ant_num == 1)
+ ex_btc8723b1ant_bt_info_notify(btcoexist, tmp_buf,
+ length);
+ } else if (IS_HARDWARE_TYPE_8192E(btcoexist->adapter)) {
+ if (btcoexist->board_info.btdm_ant_num == 2)
+ ex_btc8192e2ant_bt_info_notify(btcoexist, tmp_buf,
+ length);
+ }
+
+ halbtc_normal_low_power(btcoexist);
+}
+
+void exhalbtc_rf_status_notify(struct btc_coexist *btcoexist, u8 type)
+{
+ if (!halbtc_is_bt_coexist_available(btcoexist))
+ return;
+
+ if (IS_HARDWARE_TYPE_8821(btcoexist->adapter)) {
+ } else if (IS_HARDWARE_TYPE_8723B(btcoexist->adapter)) {
+ if (btcoexist->board_info.btdm_ant_num == 1)
+ ex_btc8723b1ant_rf_status_notify(btcoexist, type);
+ } else if (IS_HARDWARE_TYPE_8192E(btcoexist->adapter)) {
+ }
}
void exhalbtc_stack_operation_notify(struct btc_coexist *btcoexist, u8 type)
@@ -881,44 +1270,117 @@ void exhalbtc_stack_operation_notify(struct btc_coexist *btcoexist, u8 type)
if (btcoexist->manual_control)
return;
- stack_op_type = BTC_STACK_OP_NONE;
-
- halbtc_leave_low_power();
-
- halbtc_nomal_low_power();
+ if ((type == HCI_BT_OP_INQUIRY_START) ||
+ (type == HCI_BT_OP_PAGING_START) ||
+ (type == HCI_BT_OP_PAIRING_START)) {
+ stack_op_type = BTC_STACK_OP_INQ_PAGE_PAIR_START;
+ } else if ((type == HCI_BT_OP_INQUIRY_FINISH) ||
+ (type == HCI_BT_OP_PAGING_SUCCESS) ||
+ (type == HCI_BT_OP_PAGING_UNSUCCESS) ||
+ (type == HCI_BT_OP_PAIRING_FINISH)) {
+ stack_op_type = BTC_STACK_OP_INQ_PAGE_PAIR_FINISH;
+ } else {
+ stack_op_type = BTC_STACK_OP_NONE;
+ }
}
void exhalbtc_halt_notify(struct btc_coexist *btcoexist)
{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
if (!halbtc_is_bt_coexist_available(btcoexist))
return;
- if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE)
- ex_btc8723b2ant_halt_notify(btcoexist);
+ if (IS_HARDWARE_TYPE_8821(btcoexist->adapter)) {
+ if (btcoexist->board_info.btdm_ant_num == 2)
+ ex_btc8821a2ant_halt_notify(btcoexist);
+ else if (btcoexist->board_info.btdm_ant_num == 1)
+ ex_btc8821a1ant_halt_notify(btcoexist);
+ } else if (IS_HARDWARE_TYPE_8723B(btcoexist->adapter)) {
+ if (btcoexist->board_info.btdm_ant_num == 2)
+ ex_btc8723b2ant_halt_notify(btcoexist);
+ else if (btcoexist->board_info.btdm_ant_num == 1)
+ ex_btc8723b1ant_halt_notify(btcoexist);
+ } else if (IS_HARDWARE_TYPE_8192E(btcoexist->adapter)) {
+ if (btcoexist->board_info.btdm_ant_num == 2)
+ ex_btc8192e2ant_halt_notify(btcoexist);
+ }
+
+ btcoexist->binded = false;
}
void exhalbtc_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state)
{
if (!halbtc_is_bt_coexist_available(btcoexist))
return;
+
+ /* currently only 1ant we have to do the notification,
+ * once pnp is notified to sleep state, we have to leave LPS that
+ * we can sleep normally.
+ */
+
+ if (IS_HARDWARE_TYPE_8723B(btcoexist->adapter)) {
+ if (btcoexist->board_info.btdm_ant_num == 1)
+ ex_btc8723b1ant_pnp_notify(btcoexist, pnp_state);
+ else if (btcoexist->board_info.btdm_ant_num == 2)
+ ex_btc8723b2ant_pnp_notify(btcoexist, pnp_state);
+ } else if (IS_HARDWARE_TYPE_8821(btcoexist->adapter)) {
+ if (btcoexist->board_info.btdm_ant_num == 1)
+ ex_btc8821a1ant_pnp_notify(btcoexist, pnp_state);
+ else if (btcoexist->board_info.btdm_ant_num == 2)
+ ex_btc8821a2ant_pnp_notify(btcoexist, pnp_state);
+ } else if (IS_HARDWARE_TYPE_8192E(btcoexist->adapter)) {
+ }
}
-void exhalbtc_periodical(struct btc_coexist *btcoexist)
+void exhalbtc_coex_dm_switch(struct btc_coexist *btcoexist)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+
if (!halbtc_is_bt_coexist_available(btcoexist))
return;
- btcoexist->statistics.cnt_periodical++;
+ btcoexist->statistics.cnt_coex_dm_switch++;
+
+ halbtc_leave_low_power(btcoexist);
+
+ if (IS_HARDWARE_TYPE_8723B(btcoexist->adapter)) {
+ if (btcoexist->board_info.btdm_ant_num == 1) {
+ btcoexist->stop_coex_dm = true;
+ ex_btc8723b1ant_coex_dm_reset(btcoexist);
+ exhalbtc_set_ant_num(rtlpriv,
+ BT_COEX_ANT_TYPE_DETECTED, 2);
+ ex_btc8723b2ant_init_hwconfig(btcoexist);
+ ex_btc8723b2ant_init_coex_dm(btcoexist);
+ btcoexist->stop_coex_dm = false;
+ }
+ }
- halbtc_leave_low_power();
+ halbtc_normal_low_power(btcoexist);
+}
- if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE)
- ex_btc8723b2ant_periodical(btcoexist);
+void exhalbtc_periodical(struct btc_coexist *btcoexist)
+{
+ if (!halbtc_is_bt_coexist_available(btcoexist))
+ return;
+ btcoexist->statistics.cnt_periodical++;
+
+ halbtc_leave_low_power(btcoexist);
+
+ if (IS_HARDWARE_TYPE_8821(btcoexist->adapter)) {
+ if (btcoexist->board_info.btdm_ant_num == 2)
+ ex_btc8821a2ant_periodical(btcoexist);
+ else if (btcoexist->board_info.btdm_ant_num == 1)
+ if (!halbtc_under_ips(btcoexist))
+ ex_btc8821a1ant_periodical(btcoexist);
+ } else if (IS_HARDWARE_TYPE_8723B(btcoexist->adapter)) {
+ if (btcoexist->board_info.btdm_ant_num == 2)
+ ex_btc8723b2ant_periodical(btcoexist);
+ else if (btcoexist->board_info.btdm_ant_num == 1)
+ ex_btc8723b1ant_periodical(btcoexist);
+ } else if (IS_HARDWARE_TYPE_8192E(btcoexist->adapter)) {
+ if (btcoexist->board_info.btdm_ant_num == 2)
+ ex_btc8192e2ant_periodical(btcoexist);
+ }
- halbtc_nomal_low_power();
+ halbtc_normal_low_power(btcoexist);
}
void exhalbtc_dbg_control(struct btc_coexist *btcoexist,
@@ -927,6 +1389,17 @@ void exhalbtc_dbg_control(struct btc_coexist *btcoexist,
if (!halbtc_is_bt_coexist_available(btcoexist))
return;
btcoexist->statistics.cnt_dbg_ctrl++;
+
+ halbtc_leave_low_power(btcoexist);
+
+ halbtc_normal_low_power(btcoexist);
+}
+
+void exhalbtc_antenna_detection(struct btc_coexist *btcoexist, u32 cent_freq,
+ u32 offset, u32 span, u32 seconds)
+{
+ if (!halbtc_is_bt_coexist_available(btcoexist))
+ return;
}
void exhalbtc_stack_update_profile_info(void)
@@ -964,11 +1437,6 @@ void exhalbtc_set_bt_patch_version(u16 bt_hci_version, u16 bt_patch_version)
btcoexist->bt_info.bt_hci_ver = bt_hci_version;
}
-void exhalbtc_set_bt_exist(bool bt_exist)
-{
- gl_bt_coexist.board_info.bt_exist = bt_exist;
-}
-
void exhalbtc_set_chip_type(u8 chip_type)
{
switch (chip_type) {
@@ -1002,25 +1470,8 @@ void exhalbtc_set_ant_num(struct rtl_priv *rtlpriv, u8 type, u8 ant_num)
if (BT_COEX_ANT_TYPE_PG == type) {
gl_bt_coexist.board_info.pg_ant_num = ant_num;
gl_bt_coexist.board_info.btdm_ant_num = ant_num;
- /* The antenna position:
- * Main (default) or Aux for pgAntNum=2 && btdmAntNum =1.
- * The antenna position should be determined by
- * auto-detect mechanism.
- * The following is assumed to main,
- * and those must be modified
- * if y auto-detect mechanism is ready
- */
- if ((gl_bt_coexist.board_info.pg_ant_num == 2) &&
- (gl_bt_coexist.board_info.btdm_ant_num == 1))
- gl_bt_coexist.board_info.btdm_ant_pos =
- BTC_ANTENNA_AT_MAIN_PORT;
- else
- gl_bt_coexist.board_info.btdm_ant_pos =
- BTC_ANTENNA_AT_MAIN_PORT;
} else if (BT_COEX_ANT_TYPE_ANTDIV == type) {
gl_bt_coexist.board_info.btdm_ant_num = ant_num;
- gl_bt_coexist.board_info.btdm_ant_pos =
- BTC_ANTENNA_AT_MAIN_PORT;
} else if (type == BT_COEX_ANT_TYPE_DETECTED) {
gl_bt_coexist.board_info.btdm_ant_num = ant_num;
if (rtlpriv->cfg->mod_params->ant_sel == 1)
@@ -1032,13 +1483,33 @@ void exhalbtc_set_ant_num(struct rtl_priv *rtlpriv, u8 type, u8 ant_num)
}
}
+/* Currently used by 8723b only, S0 or S1 */
+void exhalbtc_set_single_ant_path(u8 single_ant_path)
+{
+ gl_bt_coexist.board_info.single_ant_path = single_ant_path;
+}
+
void exhalbtc_display_bt_coex_info(struct btc_coexist *btcoexist)
{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
if (!halbtc_is_bt_coexist_available(btcoexist))
return;
- if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE)
- ex_btc8723b2ant_display_coex_info(btcoexist);
+ halbtc_leave_low_power(btcoexist);
+
+ if (IS_HARDWARE_TYPE_8821(btcoexist->adapter)) {
+ if (btcoexist->board_info.btdm_ant_num == 2)
+ ex_btc8821a2ant_display_coex_info(btcoexist);
+ else if (btcoexist->board_info.btdm_ant_num == 1)
+ ex_btc8821a1ant_display_coex_info(btcoexist);
+ } else if (IS_HARDWARE_TYPE_8723B(btcoexist->adapter)) {
+ if (btcoexist->board_info.btdm_ant_num == 2)
+ ex_btc8723b2ant_display_coex_info(btcoexist);
+ else if (btcoexist->board_info.btdm_ant_num == 1)
+ ex_btc8723b1ant_display_coex_info(btcoexist);
+ } else if (IS_HARDWARE_TYPE_8192E(btcoexist->adapter)) {
+ if (btcoexist->board_info.btdm_ant_num == 2)
+ ex_btc8192e2ant_display_coex_info(btcoexist);
+ }
+
+ halbtc_normal_low_power(btcoexist);
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h
index c5c360e011a9..f9b87c12db09 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h
@@ -153,6 +153,7 @@ struct btc_board_info {
u8 btdm_ant_pos;
u8 single_ant_path; /* current used for 8723b only, 1=>s0, 0=>s1 */
bool bt_exist;
+ bool tfbga_package;
};
enum btc_dbg_opcode {
@@ -217,6 +218,26 @@ enum btc_iot_peer {
BTC_IOT_PEER_MAX,
};
+/* for 8723b-d cut large current issue */
+enum bt_wifi_coex_state {
+ BTC_WIFI_STAT_INIT,
+ BTC_WIFI_STAT_IQK,
+ BTC_WIFI_STAT_NORMAL_OFF,
+ BTC_WIFI_STAT_MP_OFF,
+ BTC_WIFI_STAT_NORMAL,
+ BTC_WIFI_STAT_ANT_DIV,
+ BTC_WIFI_STAT_MAX
+};
+
+enum bt_ant_type {
+ BTC_ANT_TYPE_0,
+ BTC_ANT_TYPE_1,
+ BTC_ANT_TYPE_2,
+ BTC_ANT_TYPE_3,
+ BTC_ANT_TYPE_4,
+ BTC_ANT_TYPE_MAX
+};
+
enum btc_get_type {
/* type bool */
BTC_GET_BL_HS_OPERATION,
@@ -237,6 +258,9 @@ enum btc_get_type {
BTC_GET_BL_WIFI_UNDER_B_MODE,
BTC_GET_BL_EXT_SWITCH,
BTC_GET_BL_WIFI_IS_IN_MP_MODE,
+ BTC_GET_BL_IS_ASUS_8723B,
+ BTC_GET_BL_FW_READY,
+ BTC_GET_BL_RF4CE_CONNECTED,
/* type s4Byte */
BTC_GET_S4_WIFI_RSSI,
@@ -249,6 +273,11 @@ enum btc_get_type {
BTC_GET_U4_WIFI_LINK_STATUS,
BTC_GET_U4_BT_PATCH_VER,
BTC_GET_U4_VENDOR,
+ BTC_GET_U4_SUPPORTED_VERSION,
+ BTC_GET_U4_SUPPORTED_FEATURE,
+ BTC_GET_U4_WIFI_IQK_TOTAL,
+ BTC_GET_U4_WIFI_IQK_OK,
+ BTC_GET_U4_WIFI_IQK_FAIL,
/* type u1Byte */
BTC_GET_U1_WIFI_DOT11_CHNL,
@@ -256,6 +285,7 @@ enum btc_get_type {
BTC_GET_U1_WIFI_HS_CHNL,
BTC_GET_U1_MAC_PHY_MODE,
BTC_GET_U1_AP_NUM,
+ BTC_GET_U1_ANT_TYPE,
BTC_GET_U1_IOT_PEER,
/* for 1Ant */
@@ -315,6 +345,7 @@ enum btc_set_type {
/* BT Coex related */
BTC_SET_ACT_CTRL_BT_INFO,
BTC_SET_ACT_CTRL_BT_COEX,
+ BTC_SET_ACT_CTRL_8723B_ANT,
/***************************/
BTC_SET_MAX
};
@@ -433,12 +464,18 @@ struct btc_bt_info {
bool bt_disabled;
u8 rssi_adjust_for_agc_table_on;
u8 rssi_adjust_for_1ant_coex_type;
+ bool pre_bt_ctrl_agg_buf_size;
bool bt_busy;
+ u8 pre_agg_buf_size;
u8 agg_buf_size;
bool limited_dig;
+ bool pre_reject_agg_pkt;
bool reject_agg_pkt;
bool bt_ctrl_buf_size;
bool increase_scan_dev_num;
+ bool miracast_plus_bt;
+ bool bt_ctrl_agg_buf_size;
+ bool bt_tx_rx_mask;
u16 bt_hci_ver;
u16 bt_real_fw_ver;
u8 bt_fw_ver;
@@ -486,6 +523,8 @@ struct btc_statistics {
u32 cnt_coex_dm_switch;
u32 cnt_stack_operation_notify;
u32 cnt_dbg_ctrl;
+ u32 cnt_pre_load_firmware;
+ u32 cnt_power_on;
};
struct btc_bt_link_info {
@@ -560,8 +599,9 @@ bool halbtc_is_wifi_uplink(struct rtl_priv *adapter);
extern struct btc_coexist gl_bt_coexist;
-bool exhalbtc_initlize_variables(struct rtl_priv *adapter);
-void exhalbtc_init_hw_config(struct btc_coexist *btcoexist);
+bool exhalbtc_initlize_variables(void);
+bool exhalbtc_bind_bt_coex_withadapter(void *adapter);
+void exhalbtc_init_hw_config(struct btc_coexist *btcoexist, bool wifi_only);
void exhalbtc_init_coex_dm(struct btc_coexist *btcoexist);
void exhalbtc_ips_notify(struct btc_coexist *btcoexist, u8 type);
void exhalbtc_lps_notify(struct btc_coexist *btcoexist, u8 type);
@@ -591,5 +631,6 @@ void exhalbtc_signal_compensation(struct btc_coexist *btcoexist,
u8 *rssi_wifi, u8 *rssi_bt);
void exhalbtc_lps_leave(struct btc_coexist *btcoexist);
void exhalbtc_low_wifi_traffic_notify(struct btc_coexist *btcoexist);
+void exhalbtc_set_single_ant_path(u8 single_ant_path);
#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c
index 46e0fa6be273..4366c9817e1e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c
@@ -45,42 +45,79 @@ static struct rtl_btc_ops rtl_btc_operation = {
.btc_is_disable_edca_turbo = rtl_btc_is_disable_edca_turbo,
.btc_is_bt_disabled = rtl_btc_is_bt_disabled,
.btc_special_packet_notify = rtl_btc_special_packet_notify,
+ .btc_record_pwr_mode = rtl_btc_record_pwr_mode,
+ .btc_get_lps_val = rtl_btc_get_lps_val,
+ .btc_get_rpwm_val = rtl_btc_get_rpwm_val,
+ .btc_is_bt_ctrl_lps = rtl_btc_is_bt_ctrl_lps,
+ .btc_is_bt_lps_on = rtl_btc_is_bt_lps_on,
+ .btc_get_ampdu_cfg = rtl_btc_get_ampdu_cfg,
};
-void rtl_btc_init_variables(struct rtl_priv *rtlpriv)
+void rtl_btc_record_pwr_mode(struct rtl_priv *rtlpriv, u8 *buf, u8 len)
{
- exhalbtc_initlize_variables(rtlpriv);
+ u8 safe_len;
+
+ safe_len = sizeof(gl_bt_coexist.pwr_mode_val);
+
+ if (safe_len > len)
+ safe_len = len;
+
+ memcpy(gl_bt_coexist.pwr_mode_val, buf, safe_len);
}
-void rtl_btc_init_hal_vars(struct rtl_priv *rtlpriv)
+u8 rtl_btc_get_lps_val(struct rtl_priv *rtlpriv)
{
- u8 ant_num;
- u8 bt_exist;
- u8 bt_type;
+ return gl_bt_coexist.bt_info.lps_val;
+}
- ant_num = rtl_get_hwpg_ant_num(rtlpriv);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "%s, antNum is %d\n", __func__, ant_num);
+u8 rtl_btc_get_rpwm_val(struct rtl_priv *rtlpriv)
+{
+ return gl_bt_coexist.bt_info.rpwm_val;
+}
- bt_exist = rtl_get_hwpg_bt_exist(rtlpriv);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "%s, bt_exist is %d\n", __func__, bt_exist);
- exhalbtc_set_bt_exist(bt_exist);
+bool rtl_btc_is_bt_ctrl_lps(struct rtl_priv *rtlpriv)
+{
+ return gl_bt_coexist.bt_info.bt_ctrl_lps;
+}
+
+bool rtl_btc_is_bt_lps_on(struct rtl_priv *rtlpriv)
+{
+ return gl_bt_coexist.bt_info.bt_lps_on;
+}
- bt_type = rtl_get_hwpg_bt_type(rtlpriv);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "%s, bt_type is %d\n",
- __func__, bt_type);
- exhalbtc_set_chip_type(bt_type);
+void rtl_btc_get_ampdu_cfg(struct rtl_priv *rtlpriv, u8 *reject_agg,
+ u8 *ctrl_agg_size, u8 *agg_size)
+{
+ if (reject_agg)
+ *reject_agg = gl_bt_coexist.bt_info.reject_agg_pkt;
+ if (ctrl_agg_size)
+ *ctrl_agg_size = gl_bt_coexist.bt_info.bt_ctrl_agg_buf_size;
+ if (agg_size)
+ *agg_size = gl_bt_coexist.bt_info.agg_buf_size;
+}
- if (rtlpriv->cfg->mod_params->ant_sel == 1)
- exhalbtc_set_ant_num(rtlpriv, BT_COEX_ANT_TYPE_DETECTED, 1);
- else
- exhalbtc_set_ant_num(rtlpriv, BT_COEX_ANT_TYPE_PG, ant_num);
+void rtl_btc_init_variables(struct rtl_priv *rtlpriv)
+{
+ exhalbtc_initlize_variables();
+ exhalbtc_bind_bt_coex_withadapter(rtlpriv);
+}
+
+void rtl_btc_init_hal_vars(struct rtl_priv *rtlpriv)
+{
+ /* move ant_num, bt_type and single_ant_path to
+ * exhalbtc_bind_bt_coex_withadapter()
+ */
}
void rtl_btc_init_hw_config(struct rtl_priv *rtlpriv)
{
- exhalbtc_init_hw_config(&gl_bt_coexist);
+ u8 bt_exist;
+
+ bt_exist = rtl_get_hwpg_bt_exist(rtlpriv);
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+ "%s, bt_exist is %d\n", __func__, bt_exist);
+
+ exhalbtc_init_hw_config(&gl_bt_coexist, !bt_exist);
exhalbtc_init_coex_dm(&gl_bt_coexist);
}
@@ -118,7 +155,9 @@ void rtl_btc_periodical(struct rtl_priv *rtlpriv)
void rtl_btc_halt_notify(void)
{
- exhalbtc_halt_notify(&gl_bt_coexist);
+ struct btc_coexist *btcoexist = &gl_bt_coexist;
+
+ exhalbtc_halt_notify(btcoexist);
}
void rtl_btc_btinfo_notify(struct rtl_priv *rtlpriv, u8 *tmp_buf, u8 length)
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.h
index fff5117e1c4e..6fe521cbe7f0 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.h
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.h
@@ -43,6 +43,13 @@ bool rtl_btc_is_limited_dig(struct rtl_priv *rtlpriv);
bool rtl_btc_is_disable_edca_turbo(struct rtl_priv *rtlpriv);
bool rtl_btc_is_bt_disabled(struct rtl_priv *rtlpriv);
void rtl_btc_special_packet_notify(struct rtl_priv *rtlpriv, u8 pkt_type);
+void rtl_btc_record_pwr_mode(struct rtl_priv *rtlpriv, u8 *buf, u8 len);
+u8 rtl_btc_get_lps_val(struct rtl_priv *rtlpriv);
+u8 rtl_btc_get_rpwm_val(struct rtl_priv *rtlpriv);
+bool rtl_btc_is_bt_ctrl_lps(struct rtl_priv *rtlpriv);
+bool rtl_btc_is_bt_lps_on(struct rtl_priv *rtlpriv);
+void rtl_btc_get_ampdu_cfg(struct rtl_priv *rtlpriv, u8 *reject_agg,
+ u8 *ctrl_agg_size, u8 *agg_size);
struct rtl_btc_ops *rtl_btc_get_ops_pointer(void);
diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c
index a4f8e326a2bc..b0ad061048c5 100644
--- a/drivers/net/wireless/realtek/rtlwifi/core.c
+++ b/drivers/net/wireless/realtek/rtlwifi/core.c
@@ -629,7 +629,8 @@ static int rtl_op_config(struct ieee80211_hw *hw, u32 changed)
}
/*For LPS */
- if (changed & IEEE80211_CONF_CHANGE_PS) {
+ if ((changed & IEEE80211_CONF_CHANGE_PS) &&
+ rtlpriv->psc.swctrl_lps && !rtlpriv->psc.fwctrl_lps) {
cancel_delayed_work(&rtlpriv->works.ps_work);
cancel_delayed_work(&rtlpriv->works.ps_rfon_wq);
if (conf->flags & IEEE80211_CONF_PS) {
@@ -1463,6 +1464,9 @@ static void rtl_op_sw_scan_complete(struct ieee80211_hw *hw,
RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, "\n");
mac->act_scanning = false;
mac->skip_scan = false;
+
+ rtlpriv->btcoexist.btc_info.ap_num = rtlpriv->scan_list.num;
+
if (rtlpriv->link_info.higher_busytraffic)
return;
@@ -1670,6 +1674,8 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
*so don't use rtl_cam_reset_all_entry
*or clear all entry here.
*/
+ rtl_wait_tx_report_acked(hw, 500); /* wait 500ms for TX ack */
+
rtl_cam_delete_one_entry(hw, mac_addr, key_idx);
break;
default:
diff --git a/drivers/net/wireless/realtek/rtlwifi/debug.c b/drivers/net/wireless/realtek/rtlwifi/debug.c
index 7ecac6116d5d..38fef6dbb44b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/debug.c
+++ b/drivers/net/wireless/realtek/rtlwifi/debug.c
@@ -27,7 +27,7 @@
#include <linux/moduleparam.h>
#ifdef CONFIG_RTLWIFI_DEBUG
-void _rtl_dbg_trace(struct rtl_priv *rtlpriv, int comp, int level,
+void _rtl_dbg_trace(struct rtl_priv *rtlpriv, u64 comp, int level,
const char *fmt, ...)
{
if (unlikely((comp & rtlpriv->cfg->mod_params->debug_mask) &&
diff --git a/drivers/net/wireless/realtek/rtlwifi/debug.h b/drivers/net/wireless/realtek/rtlwifi/debug.h
index bf5339f1c1bc..947718001457 100644
--- a/drivers/net/wireless/realtek/rtlwifi/debug.h
+++ b/drivers/net/wireless/realtek/rtlwifi/debug.h
@@ -105,6 +105,7 @@
#define COMP_EASY_CONCURRENT COMP_USB /* reuse of this bit is OK */
#define COMP_BT_COEXIST BIT(30)
#define COMP_IQK BIT(31)
+#define COMP_TX_REPORT BIT_ULL(32)
/*--------------------------------------------------------------
Define the rt_print components
@@ -169,7 +170,7 @@ enum dbgp_flag_e {
struct rtl_priv;
__printf(4, 5)
-void _rtl_dbg_trace(struct rtl_priv *rtlpriv, int comp, int level,
+void _rtl_dbg_trace(struct rtl_priv *rtlpriv, u64 comp, int level,
const char *fmt, ...);
__printf(4, 5)
@@ -198,7 +199,7 @@ struct rtl_priv;
__printf(4, 5)
static inline void RT_TRACE(struct rtl_priv *rtlpriv,
- int comp, int level,
+ u64 comp, int level,
const char *fmt, ...)
{
}
@@ -211,7 +212,7 @@ static inline void RTPRINT(struct rtl_priv *rtlpriv,
}
static inline void RT_PRINT_DATA(struct rtl_priv *rtlpriv,
- int comp, int level,
+ u64 comp, int level,
const char *titlestring,
const void *hexdata, size_t hexdatalen)
{
diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c
index 0c1f8307e179..032b6317690d 100644
--- a/drivers/net/wireless/realtek/rtlwifi/pci.c
+++ b/drivers/net/wireless/realtek/rtlwifi/pci.c
@@ -729,13 +729,12 @@ static void _rtl_pci_rx_to_mac80211(struct ieee80211_hw *hw,
dev_kfree_skb_any(skb);
} else {
struct sk_buff *uskb = NULL;
- u8 *pdata;
uskb = dev_alloc_skb(skb->len + 128);
if (likely(uskb)) {
memcpy(IEEE80211_SKB_RXCB(uskb), &rx_status,
sizeof(rx_status));
- pdata = skb_put_data(uskb, skb->data, skb->len);
+ skb_put_data(uskb, skb->data, skb->len);
dev_kfree_skb_any(skb);
ieee80211_rx_irqsafe(hw, uskb);
} else {
@@ -880,6 +879,9 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
if (unicast)
rtlpriv->link_info.num_rx_inperiod++;
}
+
+ rtl_collect_scan_list(hw, skb);
+
/* static bcn for roaming */
rtl_beacon_statistic(hw, skb);
rtl_p2p_info(hw, (void *)skb->data, skb->len);
@@ -1825,6 +1827,7 @@ static int rtl_pci_start(struct ieee80211_hw *hw)
rtlpci->driver_is_goingto_unload = false;
if (rtlpriv->cfg->ops->get_btc_status &&
rtlpriv->cfg->ops->get_btc_status()) {
+ rtlpriv->btcoexist.btc_info.ap_num = 36;
rtlpriv->btcoexist.btc_ops->btc_init_variables(rtlpriv);
rtlpriv->btcoexist.btc_ops->btc_init_hal_vars(rtlpriv);
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/ps.c b/drivers/net/wireless/realtek/rtlwifi/ps.c
index 0d152877d969..07ee3096f50e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/ps.c
+++ b/drivers/net/wireless/realtek/rtlwifi/ps.c
@@ -356,7 +356,7 @@ void rtl_lps_set_psmode(struct ieee80211_hw *hw, u8 rt_psmode)
if (mac->link_state != MAC80211_LINKED)
return;
- if (ppsc->dot11_psmode == rt_psmode)
+ if (ppsc->dot11_psmode == rt_psmode && rt_psmode == EACTIVE)
return;
/* Update power save mode configured. */
@@ -438,11 +438,13 @@ static void rtl_lps_enter_core(struct ieee80211_hw *hw)
spin_lock_irqsave(&rtlpriv->locks.lps_lock, flag);
- if (ppsc->dot11_psmode == EACTIVE) {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "Enter 802.11 power save mode...\n");
- rtl_lps_set_psmode(hw, EAUTOPS);
- }
+ /* Don't need to check (ppsc->dot11_psmode == EACTIVE), because
+ * bt_ccoexist may ask to enter lps.
+ * In normal case, this constraint move to rtl_lps_set_psmode().
+ */
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
+ "Enter 802.11 power save mode...\n");
+ rtl_lps_set_psmode(hw, EAUTOPS);
spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flag);
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/reg.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/reg.h
index 15400ee6c04b..0c0d64aea651 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/reg.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/reg.h
@@ -248,7 +248,6 @@
#define REG_RD_NAV_NXT 0x0544
#define REG_NAV_PROT_LEN 0x0546
#define REG_BCN_CTRL 0x0550
-#define REG_USTIME_TSF 0x0551
#define REG_MBID_NUM 0x0552
#define REG_DUAL_TSF_RST 0x0553
#define REG_BCN_INTERVAL 0x0554
@@ -256,6 +255,7 @@
#define REG_DRVERLYINT 0x0558
#define REG_BCNDMATIM 0x0559
#define REG_ATIMWND 0x055A
+#define REG_USTIME_TSF 0x055C
#define REG_BCN_MAX_ERR 0x055D
#define REG_RXTSF_OFFSET_CCK 0x055E
#define REG_RXTSF_OFFSET_OFDM 0x055F
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
index 7661cfa53032..774e72058d24 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
@@ -270,7 +270,7 @@ static struct rtl_hal_ops rtl8188ee_hal_ops = {
static struct rtl_mod_params rtl88ee_mod_params = {
.sw_crypto = false,
- .inactiveps = false,
+ .inactiveps = true,
.swctrl_lps = false,
.fwctrl_lps = false,
.msi_support = true,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c
index c7a77467b20e..015476e3f7e5 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c
@@ -647,8 +647,7 @@ void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw,
skb = dev_alloc_skb(totalpacketlen);
- memcpy((u8 *)skb_put(skb, totalpacketlen),
- &reserved_page_packet, totalpacketlen);
+ skb_put_data(skb, &reserved_page_packet, totalpacketlen);
if (cmd_send_packet)
rtstatus = cmd_send_packet(hw, skb);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/reg.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/reg.h
index 1bb7ed35812d..9e3b58a5d2bb 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/reg.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/reg.h
@@ -227,7 +227,6 @@
#define REG_RD_NAV_NXT 0x0544
#define REG_NAV_PROT_LEN 0x0546
#define REG_BCN_CTRL 0x0550
-#define REG_USTIME_TSF 0x0551
#define REG_MBID_NUM 0x0552
#define REG_DUAL_TSF_RST 0x0553
#define REG_BCN_INTERVAL 0x0554
@@ -235,6 +234,7 @@
#define REG_DRVERLYINT 0x0558
#define REG_BCNDMATIM 0x0559
#define REG_ATIMWND 0x055A
+#define REG_USTIME_TSF 0x055C
#define REG_BCN_MAX_ERR 0x055D
#define REG_RXTSF_OFFSET_CCK 0x055E
#define REG_RXTSF_OFFSET_OFDM 0x055F
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c
index 88faeab2574f..f4129cf96e7c 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c
@@ -668,8 +668,7 @@ void rtl92d_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool dl_finished)
if (!skb) {
dlok = false;
} else {
- memcpy((u8 *) skb_put(skb, totalpacketlen),
- &reserved_page_packet, totalpacketlen);
+ skb_put_data(skb, &reserved_page_packet, totalpacketlen);
rtstatus = _rtl92d_cmd_send_packet(hw, skb);
if (rtstatus)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/reg.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/reg.h
index b354b95936e2..d4c4e76a9244 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/reg.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/reg.h
@@ -255,7 +255,6 @@
#define REG_RD_NAV_NXT 0x0544
#define REG_NAV_PROT_LEN 0x0546
#define REG_BCN_CTRL 0x0550
-#define REG_USTIME_TSF 0x0551
#define REG_MBID_NUM 0x0552
#define REG_DUAL_TSF_RST 0x0553
#define REG_BCN_INTERVAL 0x0554
@@ -263,6 +262,7 @@
#define REG_DRVERLYINT 0x0558
#define REG_BCNDMATIM 0x0559
#define REG_ATIMWND 0x055A
+#define REG_USTIME_TSF 0x055C
#define REG_BCN_MAX_ERR 0x055D
#define REG_RXTSF_OFFSET_CCK 0x055E
#define REG_RXTSF_OFFSET_OFDM 0x055F
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c
index 1f42ce5f8f27..f5d4df985c37 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c
@@ -422,28 +422,86 @@ void rtl92ee_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode)
struct rtl_priv *rtlpriv = rtl_priv(hw);
u8 u1_h2c_set_pwrmode[H2C_92E_PWEMODE_LENGTH] = { 0 };
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- u8 rlbm , power_state = 0;
+ u8 rlbm, power_state = 0, byte5 = 0;
+ u8 awake_intvl; /* DTIM = (awake_intvl - 1) */
+ struct rtl_btc_ops *btc_ops = rtlpriv->btcoexist.btc_ops;
+ bool bt_ctrl_lps = (rtlpriv->cfg->ops->get_btc_status() ?
+ btc_ops->btc_is_bt_ctrl_lps(rtlpriv) : false);
+ bool bt_lps_on = (rtlpriv->cfg->ops->get_btc_status() ?
+ btc_ops->btc_is_bt_lps_on(rtlpriv) : false);
+
+ if (bt_ctrl_lps)
+ mode = (bt_lps_on ? FW_PS_MIN_MODE : FW_PS_ACTIVE_MODE);
+
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG, "FW LPS mode = %d (coex:%d)\n",
+ mode, bt_ctrl_lps);
+
+ switch (mode) {
+ case FW_PS_MIN_MODE:
+ rlbm = 0;
+ awake_intvl = 2;
+ break;
+ case FW_PS_MAX_MODE:
+ rlbm = 1;
+ awake_intvl = 2;
+ break;
+ case FW_PS_DTIM_MODE:
+ rlbm = 2;
+ awake_intvl = ppsc->reg_max_lps_awakeintvl;
+ /* hw->conf.ps_dtim_period or mac->vif->bss_conf.dtim_period
+ * is only used in swlps.
+ */
+ break;
+ default:
+ rlbm = 2;
+ awake_intvl = 4;
+ break;
+ }
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD , "FW LPS mode = %d\n", mode);
+ if (rtlpriv->mac80211.p2p) {
+ awake_intvl = 2;
+ rlbm = 1;
+ }
+
+ if (mode == FW_PS_ACTIVE_MODE) {
+ byte5 = 0x40;
+ power_state = FW_PWR_STATE_ACTIVE;
+ } else {
+ if (bt_ctrl_lps) {
+ byte5 = btc_ops->btc_get_lps_val(rtlpriv);
+ power_state = btc_ops->btc_get_rpwm_val(rtlpriv);
+
+ if ((rlbm == 2) && (byte5 & BIT(4))) {
+ /* Keep awake interval to 1 to prevent from
+ * decreasing coex performance
+ */
+ awake_intvl = 2;
+ rlbm = 2;
+ }
+ } else {
+ byte5 = 0x40;
+ power_state = FW_PWR_STATE_RF_OFF;
+ }
+ }
SET_H2CCMD_PWRMODE_PARM_MODE(u1_h2c_set_pwrmode, ((mode) ? 1 : 0));
- rlbm = 0;/*YJ,temp,120316. FW now not support RLBM=2.*/
SET_H2CCMD_PWRMODE_PARM_RLBM(u1_h2c_set_pwrmode, rlbm);
SET_H2CCMD_PWRMODE_PARM_SMART_PS(u1_h2c_set_pwrmode,
- (rtlpriv->mac80211.p2p) ?
- ppsc->smart_ps : 1);
+ bt_ctrl_lps ? 0 :
+ ((rtlpriv->mac80211.p2p) ?
+ ppsc->smart_ps : 1));
SET_H2CCMD_PWRMODE_PARM_AWAKE_INTERVAL(u1_h2c_set_pwrmode,
- ppsc->reg_max_lps_awakeintvl);
+ awake_intvl);
SET_H2CCMD_PWRMODE_PARM_ALL_QUEUE_UAPSD(u1_h2c_set_pwrmode, 0);
- if (mode == FW_PS_ACTIVE_MODE)
- power_state |= FW_PWR_STATE_ACTIVE;
- else
- power_state |= FW_PWR_STATE_RF_OFF;
SET_H2CCMD_PWRMODE_PARM_PWR_STATE(u1_h2c_set_pwrmode, power_state);
+ SET_H2CCMD_PWRMODE_PARM_BYTE5(u1_h2c_set_pwrmode, byte5);
RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
"rtl92c_set_fw_pwrmode(): u1_h2c_set_pwrmode\n",
u1_h2c_set_pwrmode, H2C_92E_PWEMODE_LENGTH);
+ if (rtlpriv->cfg->ops->get_btc_status())
+ btc_ops->btc_record_pwr_mode(rtlpriv, u1_h2c_set_pwrmode,
+ H2C_92E_PWEMODE_LENGTH);
rtl92ee_fill_h2c_cmd(hw, H2C_92E_SETPWRMODE, H2C_92E_PWEMODE_LENGTH,
u1_h2c_set_pwrmode);
}
@@ -708,8 +766,7 @@ void rtl92ee_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
u1rsvdpageloc, 3);
skb = dev_alloc_skb(totalpacketlen);
- memcpy((u8 *)skb_put(skb, totalpacketlen),
- &reserved_page_packet, totalpacketlen);
+ skb_put_data(skb, &reserved_page_packet, totalpacketlen);
b_dlok = true;
@@ -843,6 +900,7 @@ void rtl92ee_c2h_content_parsing(struct ieee80211_hw *hw, u8 c2h_cmd_id,
case C2H_8192E_TX_REPORT:
RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE ,
"[C2H], C2H_8723BE_TX_REPORT!\n");
+ rtl_tx_report_handler(hw, tmp_buf, c2h_cmd_len);
break;
case C2H_8192E_BT_INFO:
RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.h
index af8271967a88..b770f722daa6 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.h
@@ -37,7 +37,7 @@
#define USE_OLD_WOWLAN_DEBUG_FW 0
#define H2C_92E_RSVDPAGE_LOC_LEN 5
-#define H2C_92E_PWEMODE_LENGTH 5
+#define H2C_92E_PWEMODE_LENGTH 7
#define H2C_92E_JOINBSSRPT_LENGTH 1
#define H2C_92E_AP_OFFLOAD_LENGTH 3
#define H2C_92E_WOWLAN_LENGTH 3
@@ -154,6 +154,8 @@ enum rtl8192e_c2h_evt {
SET_BITS_TO_LE_1BYTE((__cmd)+3, 0, 8, __val)
#define SET_H2CCMD_PWRMODE_PARM_PWR_STATE(__cmd, __val) \
SET_BITS_TO_LE_1BYTE((__cmd)+4, 0, 8, __val)
+#define SET_H2CCMD_PWRMODE_PARM_BYTE5(__cmd, __val) \
+ SET_BITS_TO_LE_1BYTE((__cmd) + 5, 0, 8, __val)
#define GET_92E_H2CCMD_PWRMODE_PARM_MODE(__cmd) \
LE_BITS_TO_1BYTE(__cmd, 0, 8)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c
index 11d97fa0e921..d84ac7adfd82 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c
@@ -1670,7 +1670,8 @@ void rtl92ee_card_disable(struct ieee80211_hw *hw)
_rtl92ee_poweroff_adapter(hw);
/* after power off we should do iqk again */
- rtlpriv->phy.iqk_initialized = false;
+ if (!rtlpriv->cfg->ops->get_btc_status())
+ rtlpriv->phy.iqk_initialized = false;
}
void rtl92ee_interrupt_recognized(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c
index 48820bc497d8..eaa503b7c4b4 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c
@@ -253,7 +253,7 @@ static struct rtl_hal_ops rtl8192ee_hal_ops = {
static struct rtl_mod_params rtl92ee_mod_params = {
.sw_crypto = false,
- .inactiveps = false,
+ .inactiveps = true,
.swctrl_lps = false,
.fwctrl_lps = true,
.msi_support = true,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
index b1864bb07c2c..55f238a2a310 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
@@ -731,6 +731,9 @@ void rtl92ee_tx_fill_desc(struct ieee80211_hw *hw,
SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN);
}
+ /* tx report */
+ rtl_get_tx_report(ptcb_desc, pdesc, hw);
+
SET_TX_DESC_TX_RATE(pdesc, ptcb_desc->hw_rate);
if (ieee80211_is_mgmt(fc)) {
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.h
index 8053d1b12ec4..b0105c529010 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.h
@@ -159,7 +159,7 @@
#define SET_TX_DESC_NULL_0(__pdesc, __val) \
SET_BITS_TO_LE_4BYTE(__pdesc+8, 14, 1, __val)
#define SET_TX_DESC_NULL_1(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+8, 15, 1, __val)
+ SET_BITS_TO_LE_4BYTE((__pdesc) + 8, 15, 1, __val)
#define SET_TX_DESC_BK(__pdesc, __val) \
SET_BITS_TO_LE_4BYTE(__pdesc+8, 16, 1, __val)
#define SET_TX_DESC_MORE_FRAG(__pdesc, __val) \
@@ -167,7 +167,7 @@
#define SET_TX_DESC_RAW(__pdesc, __val) \
SET_BITS_TO_LE_4BYTE(__pdesc+8, 18, 1, __val)
#define SET_TX_DESC_SPE_RPT(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+8, 19, 1, __val)
+ SET_BITS_TO_LE_4BYTE((__pdesc) + 8, 19, 1, __val)
#define SET_TX_DESC_AMPDU_DENSITY(__pdesc, __val) \
SET_BITS_TO_LE_4BYTE(__pdesc+8, 20, 3, __val)
#define SET_TX_DESC_BT_NULL(__pdesc, __val) \
@@ -252,15 +252,15 @@
/* Dword 6 */
#define SET_TX_DESC_SW_DEFINE(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+24, 0, 12, __val)
+ SET_BITS_TO_LE_4BYTE((__pdesc) + 24, 0, 12, __val)
#define SET_TX_DESC_ANTSEL_A(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+24, 16, 3, __val)
+ SET_BITS_TO_LE_4BYTE((__pdesc) + 24, 16, 3, __val)
#define SET_TX_DESC_ANTSEL_B(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+24, 19, 3, __val)
+ SET_BITS_TO_LE_4BYTE((__pdesc) + 24, 19, 3, __val)
#define SET_TX_DESC_ANTSEL_C(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+24, 22, 3, __val)
+ SET_BITS_TO_LE_4BYTE((__pdesc) + 24, 22, 3, __val)
#define SET_TX_DESC_ANTSEL_D(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+24, 25, 3, __val)
+ SET_BITS_TO_LE_4BYTE((__pdesc) + 24, 25, 3, __val)
/* Dword 7 */
#define SET_TX_DESC_TX_BUFFER_SIZE(__pdesc, __val) \
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c
index a954a87b0ed9..bf9859f74b6f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c
@@ -470,8 +470,7 @@ void rtl8723e_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
u1rsvdpageloc, 3);
skb = dev_alloc_skb(totalpacketlen);
- memcpy((u8 *)skb_put(skb, totalpacketlen),
- &reserved_page_packet, totalpacketlen);
+ skb_put_data(skb, &reserved_page_packet, totalpacketlen);
rtstatus = rtl_cmd_send_packet(hw, skb);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/reg.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/reg.h
index 306059f9b9cc..30938cd9fce5 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/reg.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/reg.h
@@ -217,7 +217,6 @@
#define REG_RD_NAV_NXT 0x0544
#define REG_NAV_PROT_LEN 0x0546
#define REG_BCN_CTRL 0x0550
-#define REG_USTIME_TSF 0x0551
#define REG_MBID_NUM 0x0552
#define REG_DUAL_TSF_RST 0x0553
#define REG_BCN_INTERVAL 0x0554
@@ -225,6 +224,7 @@
#define REG_DRVERLYINT 0x0558
#define REG_BCNDMATIM 0x0559
#define REG_ATIMWND 0x055A
+#define REG_USTIME_TSF 0x055C
#define REG_BCN_MAX_ERR 0x055D
#define REG_RXTSF_OFFSET_CCK 0x055E
#define REG_RXTSF_OFFSET_OFDM 0x055F
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c
index 4fc839b1d601..dd6f95cfaec9 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c
@@ -240,27 +240,84 @@ void rtl8723be_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode)
struct rtl_priv *rtlpriv = rtl_priv(hw);
u8 u1_h2c_set_pwrmode[H2C_PWEMODE_LENGTH] = { 0 };
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- u8 rlbm, power_state = 0;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "FW LPS mode = %d\n", mode);
+ u8 rlbm, power_state = 0, byte5 = 0;
+ u8 awake_intvl; /* DTIM = (awake_intvl - 1) */
+ struct rtl_btc_ops *btc_ops = rtlpriv->btcoexist.btc_ops;
+ bool bt_ctrl_lps = (rtlpriv->cfg->ops->get_btc_status() ?
+ btc_ops->btc_is_bt_ctrl_lps(rtlpriv) : false);
+ bool bt_lps_on = (rtlpriv->cfg->ops->get_btc_status() ?
+ btc_ops->btc_is_bt_lps_on(rtlpriv) : false);
+
+ if (bt_ctrl_lps)
+ mode = (bt_lps_on ? FW_PS_MIN_MODE : FW_PS_ACTIVE_MODE);
+
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG, "FW LPS mode = %d (coex:%d)\n",
+ mode, bt_ctrl_lps);
+
+ switch (mode) {
+ case FW_PS_MIN_MODE:
+ rlbm = 0;
+ awake_intvl = 2;
+ break;
+ case FW_PS_MAX_MODE:
+ rlbm = 1;
+ awake_intvl = 2;
+ break;
+ case FW_PS_DTIM_MODE:
+ rlbm = 2;
+ awake_intvl = ppsc->reg_max_lps_awakeintvl;
+ /* hw->conf.ps_dtim_period or mac->vif->bss_conf.dtim_period
+ * is only used in swlps.
+ */
+ break;
+ default:
+ rlbm = 2;
+ awake_intvl = 4;
+ break;
+ }
+
+ if (rtlpriv->mac80211.p2p) {
+ awake_intvl = 2;
+ rlbm = 1;
+ }
+
+ if (mode == FW_PS_ACTIVE_MODE) {
+ byte5 = 0x40;
+ power_state = FW_PWR_STATE_ACTIVE;
+ } else {
+ if (bt_ctrl_lps) {
+ byte5 = btc_ops->btc_get_lps_val(rtlpriv);
+ power_state = btc_ops->btc_get_rpwm_val(rtlpriv);
+
+ if ((rlbm == 2) && (byte5 & BIT(4))) {
+ /* Keep awake interval to 1 to prevent from
+ * decreasing coex performance
+ */
+ awake_intvl = 2;
+ rlbm = 2;
+ }
+ } else {
+ byte5 = 0x40;
+ power_state = FW_PWR_STATE_RF_OFF;
+ }
+ }
SET_H2CCMD_PWRMODE_PARM_MODE(u1_h2c_set_pwrmode, ((mode) ? 1 : 0));
- rlbm = 0;/*YJ,temp,120316. FW now not support RLBM=2.*/
SET_H2CCMD_PWRMODE_PARM_RLBM(u1_h2c_set_pwrmode, rlbm);
SET_H2CCMD_PWRMODE_PARM_SMART_PS(u1_h2c_set_pwrmode,
- (rtlpriv->mac80211.p2p) ?
- ppsc->smart_ps : 1);
+ bt_ctrl_lps ? 0 : ppsc->smart_ps);
SET_H2CCMD_PWRMODE_PARM_AWAKE_INTERVAL(u1_h2c_set_pwrmode,
- ppsc->reg_max_lps_awakeintvl);
+ awake_intvl);
SET_H2CCMD_PWRMODE_PARM_ALL_QUEUE_UAPSD(u1_h2c_set_pwrmode, 0);
- if (mode == FW_PS_ACTIVE_MODE)
- power_state |= FW_PWR_STATE_ACTIVE;
- else
- power_state |= FW_PWR_STATE_RF_OFF;
SET_H2CCMD_PWRMODE_PARM_PWR_STATE(u1_h2c_set_pwrmode, power_state);
+ SET_H2CCMD_PWRMODE_PARM_BYTE5(u1_h2c_set_pwrmode, byte5);
RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
"rtl92c_set_fw_pwrmode(): u1_h2c_set_pwrmode\n",
u1_h2c_set_pwrmode, H2C_PWEMODE_LENGTH);
+ if (rtlpriv->cfg->ops->get_btc_status())
+ btc_ops->btc_record_pwr_mode(rtlpriv, u1_h2c_set_pwrmode,
+ H2C_PWEMODE_LENGTH);
rtl8723be_fill_h2c_cmd(hw, H2C_8723B_SETPWRMODE, H2C_PWEMODE_LENGTH,
u1_h2c_set_pwrmode);
}
@@ -527,8 +584,7 @@ void rtl8723be_set_fw_rsvdpagepkt(struct ieee80211_hw *hw,
u1rsvdpageloc, sizeof(u1rsvdpageloc));
skb = dev_alloc_skb(totalpacketlen);
- memcpy((u8 *)skb_put(skb, totalpacketlen),
- &reserved_page_packet, totalpacketlen);
+ skb_put_data(skb, &reserved_page_packet, totalpacketlen);
rtstatus = rtl_cmd_send_packet(hw, skb);
@@ -662,6 +718,7 @@ void rtl8723be_c2h_content_parsing(struct ieee80211_hw *hw,
case C2H_8723B_TX_REPORT:
RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
"[C2H], C2H_8723BE_TX_REPORT!\n");
+ rtl_tx_report_handler(hw, tmp_buf, c2h_cmd_len);
break;
case C2H_8723B_BT_INFO:
RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.h
index 2482b3bc2bfa..2de4edb62bca 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.h
@@ -33,7 +33,7 @@
#define USE_OLD_WOWLAN_DEBUG_FW 0
-#define H2C_PWEMODE_LENGTH 5
+#define H2C_PWEMODE_LENGTH 7
/* Fw PS state for RPWM.
*BIT[2:0] = HW state
@@ -125,6 +125,8 @@ enum rtl8723b_c2h_evt {
SET_BITS_TO_LE_1BYTE((__ph2ccmd)+3, 0, 8, __val)
#define SET_H2CCMD_PWRMODE_PARM_PWR_STATE(__ph2ccmd, __val) \
SET_BITS_TO_LE_1BYTE((__ph2ccmd)+4, 0, 8, __val)
+#define SET_H2CCMD_PWRMODE_PARM_BYTE5(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE((__ph2ccmd) + 5, 0, 8, __val)
#define GET_88E_H2CCMD_PWRMODE_PARM_MODE(__ph2ccmd) \
LE_BITS_TO_1BYTE(__ph2ccmd, 0, 8)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
index a79f936bb394..2a7ad5ffe997 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
@@ -846,6 +846,9 @@ static bool _rtl8723be_init_mac(struct ieee80211_hw *hw)
return false;
}
+ if (rtlpriv->cfg->ops->get_btc_status())
+ rtlpriv->btcoexist.btc_ops->btc_power_on_setting(rtlpriv);
+
bytetmp = rtl_read_byte(rtlpriv, REG_MULTI_FUNC_CTRL);
rtl_write_byte(rtlpriv, REG_MULTI_FUNC_CTRL, bytetmp | BIT(3));
@@ -1440,7 +1443,9 @@ int rtl8723be_hw_init(struct ieee80211_hw *hw)
*/
if (rtlpriv->btcoexist.btc_info.ant_num == ANT_X2 ||
!rtlpriv->cfg->ops->get_btc_status()) {
- rtl8723be_phy_iq_calibrate(hw, false);
+ rtl8723be_phy_iq_calibrate(hw,
+ (rtlphy->iqk_initialized ?
+ true : false));
rtlphy->iqk_initialized = true;
}
rtl8723be_dm_check_txpower_tracking(hw);
@@ -1674,7 +1679,8 @@ void rtl8723be_card_disable(struct ieee80211_hw *hw)
_rtl8723be_poweroff_adapter(hw);
/* after power off we should do iqk again */
- rtlpriv->phy.iqk_initialized = false;
+ if (!rtlpriv->cfg->ops->get_btc_status())
+ rtlpriv->phy.iqk_initialized = false;
}
void rtl8723be_interrupt_recognized(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
index ab0f39e46e1b..9752175cc466 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
@@ -2352,7 +2352,7 @@ void rtl8723be_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery)
if (b_recovery) {
rtl8723_phy_reload_adda_registers(hw, iqk_bb_reg,
rtlphy->iqk_bb_backup, 9);
- return;
+ goto label_done;
}
/* Save RF Path */
path_sel_bb = rtl_get_bbreg(hw, 0x948, MASKDWORD);
@@ -2460,6 +2460,7 @@ void rtl8723be_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery)
rtl_set_bbreg(hw, 0x948, MASKDWORD, path_sel_bb);
/* rtl_set_rfreg(hw, RF90_PATH_A, 0xb0, 0xfffff, path_sel_rf); */
+label_done:
spin_lock(&rtlpriv->locks.iqk_lock);
rtlphy->lck_inprogress = false;
spin_unlock(&rtlpriv->locks.iqk_lock);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/reg.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/reg.h
index 03581d2a5da0..95c4f8e206c7 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/reg.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/reg.h
@@ -261,7 +261,6 @@
#define REG_RD_NAV_NXT 0x0544
#define REG_NAV_PROT_LEN 0x0546
#define REG_BCN_CTRL 0x0550
-#define REG_USTIME_TSF 0x0551
#define REG_MBID_NUM 0x0552
#define REG_DUAL_TSF_RST 0x0553
#define REG_BCN_INTERVAL 0x0554
@@ -269,6 +268,7 @@
#define REG_DRVERLYINT 0x0558
#define REG_BCNDMATIM 0x0559
#define REG_ATIMWND 0x055A
+#define REG_USTIME_TSF 0x055C
#define REG_BCN_MAX_ERR 0x055D
#define REG_RXTSF_OFFSET_CCK 0x055E
#define REG_RXTSF_OFFSET_OFDM 0x055F
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
index 8c0ac96b5430..f9d10f1e7cf8 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
@@ -155,8 +155,8 @@ int rtl8723be_init_sw_vars(struct ieee80211_hw *hw)
rtlpriv->cfg->mod_params->disable_watchdog;
if (rtlpriv->cfg->mod_params->disable_watchdog)
pr_info("watchdog disabled\n");
- rtlpriv->psc.reg_fwctrl_lps = 3;
- rtlpriv->psc.reg_max_lps_awakeintvl = 5;
+ rtlpriv->psc.reg_fwctrl_lps = 2;
+ rtlpriv->psc.reg_max_lps_awakeintvl = 2;
/* for ASPM, you can close aspm through
* set const_support_pciaspm = 0
*/
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c
index 3c6ce994c6aa..0e8944119652 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c
@@ -488,6 +488,9 @@ void rtl8723be_tx_fill_desc(struct ieee80211_hw *hw,
SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN);
}
+ /* tx report */
+ rtl_get_tx_report(ptcb_desc, pdesc, hw);
+
/* ptcb_desc->use_driver_rate = true; */
SET_TX_DESC_TX_RATE(pdesc, ptcb_desc->hw_rate);
if (ptcb_desc->hw_rate > DESC92C_RATEMCS0)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.h
index 8a9fe41ac15b..0274659f48ed 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.h
@@ -107,7 +107,7 @@
#define SET_TX_DESC_RDG_ENABLE(__pdesc, __val) \
SET_BITS_TO_LE_4BYTE(__pdesc+8, 13, 1, __val)
#define SET_TX_DESC_BAR_RTY_TH(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+8, 14, 2, __val)
+ SET_BITS_TO_LE_4BYTE((__pdesc) + 8, 14, 2, __val)
#define SET_TX_DESC_AGG_BREAK(__pdesc, __val) \
SET_BITS_TO_LE_4BYTE(__pdesc+8, 16, 1, __val)
#define SET_TX_DESC_MORE_FRAG(__pdesc, __val) \
@@ -115,7 +115,7 @@
#define SET_TX_DESC_RAW(__pdesc, __val) \
SET_BITS_TO_LE_4BYTE(__pdesc+8, 18, 1, __val)
#define SET_TX_DESC_SPE_RPT(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+8, 19, 1, __val)
+ SET_BITS_TO_LE_4BYTE((__pdesc) + 8, 19, 1, __val)
#define SET_TX_DESC_AMPDU_DENSITY(__pdesc, __val) \
SET_BITS_TO_LE_4BYTE(__pdesc+8, 20, 3, __val)
#define SET_TX_DESC_BT_INT(__pdesc, __val) \
@@ -187,6 +187,18 @@
#define SET_TX_DESC_RTS_SC(__pdesc, __val) \
SET_BITS_TO_LE_4BYTE(__pdesc+20, 13, 4, __val)
+#define SET_TX_DESC_SW_DEFINE(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE((__pdesc) + 24, 0, 12, __val)
+#define SET_TX_DESC_MBSSID(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE((__pdesc) + 24, 12, 4, __val)
+#define SET_TX_DESC_ANTSEL_A(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE((__pdesc) + 24, 16, 3, __val)
+#define SET_TX_DESC_ANTSEL_B(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE((__pdesc) + 24, 19, 3, __val)
+#define SET_TX_DESC_ANTSEL_C(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE((__pdesc) + 24, 22, 3, __val)
+#define SET_TX_DESC_ANTSEL_D(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE((__pdesc) + 24, 25, 3, __val)
#define SET_TX_DESC_TX_BUFFER_SIZE(__pdesc, __val) \
SET_BITS_TO_LE_4BYTE(__pdesc+28, 0, 16, __val)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c
index 73350103b736..03259aa150fd 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c
@@ -489,29 +489,86 @@ void rtl8821ae_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode)
struct rtl_priv *rtlpriv = rtl_priv(hw);
u8 u1_h2c_set_pwrmode[H2C_8821AE_PWEMODE_LENGTH] = { 0 };
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- u8 rlbm, power_state = 0;
+ u8 rlbm, power_state = 0, byte5 = 0;
+ u8 awake_intvl; /* DTIM = (awake_intvl - 1) */
+ struct rtl_btc_ops *btc_ops = rtlpriv->btcoexist.btc_ops;
+ bool bt_ctrl_lps = (rtlpriv->cfg->ops->get_btc_status() ?
+ btc_ops->btc_is_bt_ctrl_lps(rtlpriv) : false);
+ bool bt_lps_on = (rtlpriv->cfg->ops->get_btc_status() ?
+ btc_ops->btc_is_bt_lps_on(rtlpriv) : false);
+
+ if (bt_ctrl_lps)
+ mode = (bt_lps_on ? FW_PS_MIN_MODE : FW_PS_ACTIVE_MODE);
+
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG, "FW LPS mode = %d (coex:%d)\n",
+ mode, bt_ctrl_lps);
+
+ switch (mode) {
+ case FW_PS_MIN_MODE:
+ rlbm = 0;
+ awake_intvl = 2;
+ break;
+ case FW_PS_MAX_MODE:
+ rlbm = 1;
+ awake_intvl = 2;
+ break;
+ case FW_PS_DTIM_MODE:
+ rlbm = 2;
+ awake_intvl = ppsc->reg_max_lps_awakeintvl;
+ /* hw->conf.ps_dtim_period or mac->vif->bss_conf.dtim_period
+ * is only used in swlps.
+ */
+ break;
+ default:
+ rlbm = 2;
+ awake_intvl = 4;
+ break;
+ }
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "FW LPS mode = %d\n", mode);
+ if (rtlpriv->mac80211.p2p) {
+ awake_intvl = 2;
+ rlbm = 1;
+ }
+
+ if (mode == FW_PS_ACTIVE_MODE) {
+ byte5 = 0x40;
+ power_state = FW_PWR_STATE_ACTIVE;
+ } else {
+ if (bt_ctrl_lps) {
+ byte5 = btc_ops->btc_get_lps_val(rtlpriv);
+ power_state = btc_ops->btc_get_rpwm_val(rtlpriv);
+
+ if ((rlbm == 2) && (byte5 & BIT(4))) {
+ /* Keep awake interval to 1 to prevent from
+ * decreasing coex performance
+ */
+ awake_intvl = 2;
+ rlbm = 2;
+ }
+ } else {
+ byte5 = 0x40;
+ power_state = FW_PWR_STATE_RF_OFF;
+ }
+ }
SET_H2CCMD_PWRMODE_PARM_MODE(u1_h2c_set_pwrmode, ((mode) ? 1 : 0));
- rlbm = 0;/*YJ,temp,120316. FW now not support RLBM=2.*/
SET_H2CCMD_PWRMODE_PARM_RLBM(u1_h2c_set_pwrmode, rlbm);
SET_H2CCMD_PWRMODE_PARM_SMART_PS(u1_h2c_set_pwrmode,
- (rtlpriv->mac80211.p2p) ?
- ppsc->smart_ps : 1);
+ bt_ctrl_lps ? 0 :
+ ((rtlpriv->mac80211.p2p) ?
+ ppsc->smart_ps : 1));
SET_H2CCMD_PWRMODE_PARM_AWAKE_INTERVAL(u1_h2c_set_pwrmode,
- ppsc->reg_max_lps_awakeintvl);
+ awake_intvl);
SET_H2CCMD_PWRMODE_PARM_ALL_QUEUE_UAPSD(u1_h2c_set_pwrmode, 0);
- if (mode == FW_PS_ACTIVE_MODE)
- power_state |= FW_PWR_STATE_ACTIVE;
- else
- power_state |= FW_PWR_STATE_RF_OFF;
-
SET_H2CCMD_PWRMODE_PARM_PWR_STATE(u1_h2c_set_pwrmode, power_state);
+ SET_H2CCMD_PWRMODE_PARM_BYTE5(u1_h2c_set_pwrmode, byte5);
RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
"rtl92c_set_fw_pwrmode(): u1_h2c_set_pwrmode\n",
u1_h2c_set_pwrmode, H2C_8821AE_PWEMODE_LENGTH);
+ if (rtlpriv->cfg->ops->get_btc_status())
+ btc_ops->btc_record_pwr_mode(rtlpriv, u1_h2c_set_pwrmode,
+ H2C_8821AE_PWEMODE_LENGTH);
rtl8821ae_fill_h2c_cmd(hw, H2C_8821AE_SETPWRMODE,
H2C_8821AE_PWEMODE_LENGTH,
u1_h2c_set_pwrmode);
@@ -1588,8 +1645,7 @@ out:
&reserved_page_packet_8812[0], totalpacketlen);
skb = dev_alloc_skb(totalpacketlen);
- memcpy((u8 *)skb_put(skb, totalpacketlen),
- &reserved_page_packet_8812, totalpacketlen);
+ skb_put_data(skb, &reserved_page_packet_8812, totalpacketlen);
rtstatus = rtl_cmd_send_packet(hw, skb);
@@ -1725,8 +1781,7 @@ out:
&reserved_page_packet_8821[0], totalpacketlen);
skb = dev_alloc_skb(totalpacketlen);
- memcpy((u8 *)skb_put(skb, totalpacketlen),
- &reserved_page_packet_8821, totalpacketlen);
+ skb_put_data(skb, &reserved_page_packet_8821, totalpacketlen);
rtstatus = rtl_cmd_send_packet(hw, skb);
@@ -1873,6 +1928,9 @@ void rtl8821ae_c2h_content_parsing(struct ieee80211_hw *hw,
case C2H_8812_DBG:
RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "[C2H], C2H_8812_DBG!!\n");
break;
+ case C2H_8812_TX_REPORT:
+ rtl_tx_report_handler(hw, tmp_buf, c2h_cmd_len);
+ break;
case C2H_8812_RA_RPT:
rtl8821ae_c2h_ra_report_handler(hw, tmp_buf, c2h_cmd_len);
break;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h
index 98d871afd92a..32d46d7128f5 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h
@@ -42,7 +42,7 @@
#define USE_OLD_WOWLAN_DEBUG_FW 0
#define H2C_8821AE_RSVDPAGE_LOC_LEN 5
-#define H2C_8821AE_PWEMODE_LENGTH 5
+#define H2C_8821AE_PWEMODE_LENGTH 7
#define H2C_8821AE_JOINBSSRPT_LENGTH 1
#define H2C_8821AE_AP_OFFLOAD_LENGTH 3
#define H2C_8821AE_WOWLAN_LENGTH 3
@@ -218,6 +218,8 @@ enum rtl8821a_h2c_cmd {
SET_BITS_TO_LE_1BYTE((__cmd)+3, 0, 8, __value)
#define SET_H2CCMD_PWRMODE_PARM_PWR_STATE(__cmd, __value) \
SET_BITS_TO_LE_1BYTE((__cmd)+4, 0, 8, __value)
+#define SET_H2CCMD_PWRMODE_PARM_BYTE5(__cmd, __value) \
+ SET_BITS_TO_LE_1BYTE((__cmd) + 5, 0, 8, __value)
#define GET_8821AE_H2CCMD_PWRMODE_PARM_MODE(__cmd) \
LE_BITS_TO_1BYTE(__cmd, 0, 8)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/reg.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/reg.h
index ed69dbe178ff..db8bc8a2de61 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/reg.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/reg.h
@@ -267,7 +267,6 @@
#define REG_RD_NAV_NXT 0x0544
#define REG_NAV_PROT_LEN 0x0546
#define REG_BCN_CTRL 0x0550
-#define REG_USTIME_TSF 0x0551
#define REG_MBID_NUM 0x0552
#define REG_DUAL_TSF_RST 0x0553
#define REG_BCN_INTERVAL 0x0554
@@ -275,6 +274,7 @@
#define REG_DRVERLYINT 0x0558
#define REG_BCNDMATIM 0x0559
#define REG_ATIMWND 0x055A
+#define REG_USTIME_TSF 0x055C
#define REG_BCN_MAX_ERR 0x055D
#define REG_RXTSF_OFFSET_CCK 0x055E
#define REG_RXTSF_OFFSET_OFDM 0x055F
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
index abaf34cb1433..d71d2776ca03 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
@@ -172,8 +172,8 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw)
rtlpriv->cfg->mod_params->disable_watchdog;
if (rtlpriv->cfg->mod_params->disable_watchdog)
pr_info("watchdog disabled\n");
- rtlpriv->psc.reg_fwctrl_lps = 3;
- rtlpriv->psc.reg_max_lps_awakeintvl = 5;
+ rtlpriv->psc.reg_fwctrl_lps = 2;
+ rtlpriv->psc.reg_max_lps_awakeintvl = 2;
/* for ASPM, you can close aspm through
* set const_support_pciaspm = 0
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c
index 03665e82065f..749818929e8f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c
@@ -740,6 +740,9 @@ void rtl8821ae_tx_fill_desc(struct ieee80211_hw *hw,
SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN);
}
+ /* tx report */
+ rtl_get_tx_report(ptcb_desc, pdesc, hw);
+
/* ptcb_desc->use_driver_rate = true; */
SET_TX_DESC_TX_RATE(pdesc, ptcb_desc->hw_rate);
if (ptcb_desc->hw_rate > DESC_RATEMCS0)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.h
index b6f3c564b8d1..9843a616dcec 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.h
@@ -114,7 +114,7 @@
#define SET_TX_DESC_RAW(__pdesc, __val) \
SET_BITS_TO_LE_4BYTE(__pdesc+8, 18, 1, __val)
#define SET_TX_DESC_SPE_RPT(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+8, 19, 1, __val)
+ SET_BITS_TO_LE_4BYTE((__pdesc) + 8, 19, 1, __val)
#define SET_TX_DESC_AMPDU_DENSITY(__pdesc, __val) \
SET_BITS_TO_LE_4BYTE(__pdesc+8, 20, 3, __val)
#define SET_TX_DESC_BT_INT(__pdesc, __val) \
@@ -185,8 +185,21 @@
#define SET_TX_DESC_RTS_SC(__pdesc, __val) \
SET_BITS_TO_LE_4BYTE(__pdesc+20, 13, 4, __val)
+#define SET_TX_DESC_SW_DEFINE(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE((__pdesc) + 24, 0, 12, __val)
+#define SET_TX_DESC_ANTSEL_A(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE((__pdesc) + 24, 16, 3, __val)
+#define SET_TX_DESC_ANTSEL_B(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE((__pdesc) + 24, 19, 3, __val)
+#define SET_TX_DESC_ANTSEL_C(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE((__pdesc) + 24, 22, 3, __val)
+#define SET_TX_DESC_ANTSEL_D(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE((__pdesc) + 24, 25, 3, __val)
+#define SET_TX_DESC_MBSSID(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(i(__pdesc) + 24, 12, 4, __val)
+
#define SET_TX_DESC_TX_BUFFER_SIZE(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+28, 0, 16, __val)
+ SET_BITS_TO_LE_4BYTE((__pdesc) + 28, 0, 16, __val)
#define GET_TX_DESC_TX_BUFFER_SIZE(__pdesc) \
LE_BITS_TO_4BYTE(__pdesc+28, 0, 16)
diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h
index c0d2601bc55f..fb1ebb01133f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/wifi.h
+++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h
@@ -1886,6 +1886,13 @@ struct rtl_efuse {
u8 channel_plan;
};
+struct rtl_tx_report {
+ atomic_t sn;
+ u16 last_sent_sn;
+ unsigned long last_sent_time;
+ u16 last_recv_sn;
+};
+
struct rtl_ps_ctl {
bool pwrdomain_protect;
bool in_powersavemode;
@@ -2075,6 +2082,8 @@ struct rtl_tcb_desc {
u8 use_driver_rate:1;
u8 disable_ratefallback:1;
+ u8 use_spe_rpt:1;
+
u8 ratr_index;
u8 mac_id;
u8 hw_rate;
@@ -2324,6 +2333,7 @@ struct rtl_locks {
spinlock_t entry_list_lock;
spinlock_t usb_lock;
spinlock_t c2hcmd_lock;
+ spinlock_t scan_list_lock; /* lock for the scan list */
/*FW clock change */
spinlock_t fw_ps_lock;
@@ -2472,6 +2482,9 @@ struct rtl_btc_info {
u8 btcoexist;
u8 ant_num;
u8 single_ant_path;
+
+ u8 ap_num;
+ bool in_4way;
};
struct bt_coexist_info {
@@ -2534,6 +2547,7 @@ struct bt_coexist_info {
struct rtl_btc_ops {
void (*btc_init_variables) (struct rtl_priv *rtlpriv);
void (*btc_init_hal_vars) (struct rtl_priv *rtlpriv);
+ void (*btc_power_on_setting)(struct rtl_priv *rtlpriv);
void (*btc_init_hw_config) (struct rtl_priv *rtlpriv);
void (*btc_ips_notify) (struct rtl_priv *rtlpriv, u8 type);
void (*btc_lps_notify)(struct rtl_priv *rtlpriv, u8 type);
@@ -2550,6 +2564,13 @@ struct rtl_btc_ops {
bool (*btc_is_bt_disabled) (struct rtl_priv *rtlpriv);
void (*btc_special_packet_notify)(struct rtl_priv *rtlpriv,
u8 pkt_type);
+ void (*btc_record_pwr_mode)(struct rtl_priv *rtlpriv, u8 *buf, u8 len);
+ u8 (*btc_get_lps_val)(struct rtl_priv *rtlpriv);
+ u8 (*btc_get_rpwm_val)(struct rtl_priv *rtlpriv);
+ bool (*btc_is_bt_ctrl_lps)(struct rtl_priv *rtlpriv);
+ void (*btc_get_ampdu_cfg)(struct rtl_priv *rtlpriv, u8 *reject_agg,
+ u8 *ctrl_agg_size, u8 *agg_size);
+ bool (*btc_is_bt_lps_on)(struct rtl_priv *rtlpriv);
};
struct proxim {
@@ -2568,6 +2589,17 @@ struct rtl_c2hcmd {
u8 *val;
};
+struct rtl_bssid_entry {
+ struct list_head list;
+ u8 bssid[ETH_ALEN];
+ u32 age;
+};
+
+struct rtl_scan_list {
+ int num;
+ struct list_head list; /* sort by age */
+};
+
struct rtl_priv {
struct ieee80211_hw *hw;
struct completion firmware_loading_complete;
@@ -2588,6 +2620,8 @@ struct rtl_priv {
struct rtl_security sec;
struct rtl_efuse efuse;
struct rtl_led_ctl ledctl;
+ struct rtl_tx_report tx_report;
+ struct rtl_scan_list scan_list;
struct rtl_ps_ctl psc;
struct rate_adaptive ra;
diff --git a/drivers/net/wireless/rsi/rsi_91x_debugfs.c b/drivers/net/wireless/rsi/rsi_91x_debugfs.c
index 828a042f903f..4c0a493bd44e 100644
--- a/drivers/net/wireless/rsi/rsi_91x_debugfs.c
+++ b/drivers/net/wireless/rsi/rsi_91x_debugfs.c
@@ -125,7 +125,9 @@ static int rsi_stats_read(struct seq_file *seq, void *data)
struct rsi_common *common = seq->private;
unsigned char fsm_state[][32] = {
+ "FSM_FW_NOT_LOADED",
"FSM_CARD_NOT_READY",
+ "FSM_COMMON_DEV_PARAMS_SENT",
"FSM_BOOT_PARAMS_SENT",
"FSM_EEPROM_READ_MAC_ADDR",
"FSM_RESET_MAC_SENT",
diff --git a/drivers/net/wireless/rsi/rsi_91x_hal.c b/drivers/net/wireless/rsi/rsi_91x_hal.c
index 3d24e8ed74df..c2303599c12e 100644
--- a/drivers/net/wireless/rsi/rsi_91x_hal.c
+++ b/drivers/net/wireless/rsi/rsi_91x_hal.c
@@ -718,7 +718,8 @@ int rsi_hal_device_init(struct rsi_hw *adapter)
{
struct rsi_common *common = adapter->priv;
- common->coex_mode = 1;
+ common->coex_mode = RSI_DEV_COEX_MODE_WIFI_ALONE;
+ common->oper_mode = RSI_DEV_OPMODE_WIFI_ALONE;
adapter->device_model = RSI_DEV_9113;
switch (adapter->device_model) {
@@ -733,6 +734,7 @@ int rsi_hal_device_init(struct rsi_hw *adapter)
default:
return -EINVAL;
}
+ common->fsm_state = FSM_CARD_NOT_READY;
return 0;
}
diff --git a/drivers/net/wireless/rsi/rsi_91x_main.c b/drivers/net/wireless/rsi/rsi_91x_main.c
index 8810862ae826..f1cde0ca81f9 100644
--- a/drivers/net/wireless/rsi/rsi_91x_main.c
+++ b/drivers/net/wireless/rsi/rsi_91x_main.c
@@ -123,9 +123,16 @@ int rsi_read_pkt(struct rsi_common *common, s32 rcv_pkt_len)
queueno = rsi_get_queueno(frame_desc, offset);
length = rsi_get_length(frame_desc, offset);
- extended_desc = rsi_get_extended_desc(frame_desc, offset);
+
+ /* Extended descriptor is valid for WLAN queues only */
+ if (queueno == RSI_WIFI_DATA_Q || queueno == RSI_WIFI_MGMT_Q)
+ extended_desc = rsi_get_extended_desc(frame_desc,
+ offset);
switch (queueno) {
+ case RSI_COEX_Q:
+ rsi_mgmt_pkt_recv(common, (frame_desc + offset));
+ break;
case RSI_WIFI_DATA_Q:
skb = rsi_prepare_skb(common,
(frame_desc + offset),
diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
index 4433cec4367c..d4d365b5d2d6 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
@@ -45,10 +45,10 @@ static struct bootup_params boot_params_20 = {
}
},
.switch_clk_g = {
- .switch_clk_info = cpu_to_le16(BIT(3)),
- .bbp_lmac_clk_reg_val = cpu_to_le16(0x121),
- .umac_clock_reg_config = 0x0,
- .qspi_uart_clock_reg_config = 0x0
+ .switch_clk_info = cpu_to_le16(0xb),
+ .bbp_lmac_clk_reg_val = cpu_to_le16(0x111),
+ .umac_clock_reg_config = cpu_to_le16(0x48),
+ .qspi_uart_clock_reg_config = cpu_to_le16(0x1211)
}
},
{
@@ -106,7 +106,10 @@ static struct bootup_params boot_params_20 = {
.wdt_prog_value = 0x0,
.wdt_soc_rst_delay = 0x0,
.dcdc_operation_mode = 0x0,
- .soc_reset_wait_cnt = 0x0
+ .soc_reset_wait_cnt = 0x0,
+ .waiting_time_at_fresh_sleep = 0x0,
+ .max_threshold_to_avoid_sleep = 0x0,
+ .beacon_resedue_alg_en = 0,
};
static struct bootup_params boot_params_40 = {
@@ -139,7 +142,7 @@ static struct bootup_params boot_params_40 = {
.switch_clk_info = cpu_to_le16(0x09),
.bbp_lmac_clk_reg_val = cpu_to_le16(0x1121),
.umac_clock_reg_config = cpu_to_le16(0x48),
- .qspi_uart_clock_reg_config = 0x0
+ .qspi_uart_clock_reg_config = cpu_to_le16(0x1211)
}
},
{
@@ -197,7 +200,10 @@ static struct bootup_params boot_params_40 = {
.wdt_prog_value = 0x0,
.wdt_soc_rst_delay = 0x0,
.dcdc_operation_mode = 0x0,
- .soc_reset_wait_cnt = 0x0
+ .soc_reset_wait_cnt = 0x0,
+ .waiting_time_at_fresh_sleep = 0x0,
+ .max_threshold_to_avoid_sleep = 0x0,
+ .beacon_resedue_alg_en = 0,
};
static u16 mcs[] = {13, 26, 39, 52, 78, 104, 117, 130};
@@ -218,6 +224,12 @@ static void rsi_set_default_parameters(struct rsi_common *common)
common->fsm_state = FSM_CARD_NOT_READY;
common->iface_down = true;
common->endpoint = EP_2GHZ_20MHZ;
+ common->driver_mode = 1; /* End to end mode */
+ common->lp_ps_handshake_mode = 0; /* Default no handShake mode*/
+ common->ulp_ps_handshake_mode = 2; /* Default PKT handShake mode*/
+ common->rf_power_val = 0; /* Default 1.9V */
+ common->wlan_rf_power_mode = 0;
+ common->obm_ant_sel_val = 2;
}
/**
@@ -389,9 +401,7 @@ static int rsi_mgmt_pkt_to_core(struct rsi_common *common,
struct ieee80211_tx_info *info;
struct skb_info *rx_params;
u8 pad_bytes = msg[4];
- u8 pkt_recv;
struct sk_buff *skb;
- char *buffer;
if (type == RX_DOT11_MGMT) {
if (!adapter->sc_nvifs)
@@ -412,11 +422,9 @@ static int rsi_mgmt_pkt_to_core(struct rsi_common *common,
return -ENOMEM;
}
- buffer = skb_put_data(skb,
- (u8 *)(msg + FRAME_DESC_SZ + pad_bytes),
- msg_len);
-
- pkt_recv = buffer[0];
+ skb_put_data(skb,
+ (u8 *)(msg + FRAME_DESC_SZ + pad_bytes),
+ msg_len);
info = IEEE80211_SKB_CB(skb);
rx_params = (struct skb_info *)info->driver_data;
@@ -754,6 +762,53 @@ int rsi_hal_load_key(struct rsi_common *common,
}
/*
+ * This function sends the common device configuration parameters to device.
+ * This frame includes the useful information to make device works on
+ * specific operating mode.
+ */
+static int rsi_send_common_dev_params(struct rsi_common *common)
+{
+ struct sk_buff *skb;
+ u16 frame_len;
+ struct rsi_config_vals *dev_cfgs;
+
+ frame_len = sizeof(struct rsi_config_vals);
+
+ rsi_dbg(MGMT_TX_ZONE, "Sending common device config params\n");
+ skb = dev_alloc_skb(frame_len);
+ if (!skb) {
+ rsi_dbg(ERR_ZONE, "%s: Unable to allocate skb\n", __func__);
+ return -ENOMEM;
+ }
+
+ memset(skb->data, 0, frame_len);
+
+ dev_cfgs = (struct rsi_config_vals *)skb->data;
+ memset(dev_cfgs, 0, (sizeof(struct rsi_config_vals)));
+
+ rsi_set_len_qno(&dev_cfgs->len_qno, (frame_len - FRAME_DESC_SZ),
+ RSI_COEX_Q);
+ dev_cfgs->pkt_type = COMMON_DEV_CONFIG;
+
+ dev_cfgs->lp_ps_handshake = common->lp_ps_handshake_mode;
+ dev_cfgs->ulp_ps_handshake = common->ulp_ps_handshake_mode;
+
+ dev_cfgs->unused_ulp_gpio = RSI_UNUSED_ULP_GPIO_BITMAP;
+ dev_cfgs->unused_soc_gpio_bitmap =
+ cpu_to_le32(RSI_UNUSED_SOC_GPIO_BITMAP);
+
+ dev_cfgs->opermode = common->oper_mode;
+ dev_cfgs->wlan_rf_pwr_mode = common->wlan_rf_power_mode;
+ dev_cfgs->driver_mode = common->driver_mode;
+ dev_cfgs->region_code = NL80211_DFS_FCC;
+ dev_cfgs->antenna_sel_val = common->obm_ant_sel_val;
+
+ skb_put(skb, frame_len);
+
+ return rsi_send_internal_mgmt_frame(common, skb);
+}
+
+/*
* rsi_load_bootup_params() - This function send bootup params to the firmware.
* @common: Pointer to the driver private structure.
*
@@ -1485,6 +1540,40 @@ out:
return -EINVAL;
}
+static int rsi_handle_card_ready(struct rsi_common *common, u8 *msg)
+{
+ switch (common->fsm_state) {
+ case FSM_CARD_NOT_READY:
+ rsi_dbg(INIT_ZONE, "Card ready indication from Common HAL\n");
+ rsi_set_default_parameters(common);
+ if (rsi_send_common_dev_params(common) < 0)
+ return -EINVAL;
+ common->fsm_state = FSM_COMMON_DEV_PARAMS_SENT;
+ break;
+ case FSM_COMMON_DEV_PARAMS_SENT:
+ rsi_dbg(INIT_ZONE, "Card ready indication from WLAN HAL\n");
+
+ /* Get usb buffer status register address */
+ common->priv->usb_buffer_status_reg = *(u32 *)&msg[8];
+ rsi_dbg(INFO_ZONE, "USB buffer status register = %x\n",
+ common->priv->usb_buffer_status_reg);
+
+ if (rsi_load_bootup_params(common)) {
+ common->fsm_state = FSM_CARD_NOT_READY;
+ return -EINVAL;
+ }
+ common->fsm_state = FSM_BOOT_PARAMS_SENT;
+ break;
+ default:
+ rsi_dbg(ERR_ZONE,
+ "%s: card ready indication in invalid state %d.\n",
+ __func__, common->fsm_state);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
/**
* rsi_mgmt_pkt_recv() - This function processes the management packets
* recieved from the hardware.
@@ -1497,7 +1586,6 @@ int rsi_mgmt_pkt_recv(struct rsi_common *common, u8 *msg)
{
s32 msg_len = (le16_to_cpu(*(__le16 *)&msg[0]) & 0x0fff);
u16 msg_type = (msg[2]);
- int ret;
rsi_dbg(FSM_ZONE, "%s: Msg Len: %d, Msg Type: %4x\n",
__func__, msg_len, msg_type);
@@ -1507,17 +1595,7 @@ int rsi_mgmt_pkt_recv(struct rsi_common *common, u8 *msg)
} else if (msg_type == CARD_READY_IND) {
rsi_dbg(FSM_ZONE, "%s: Card ready indication received\n",
__func__);
- if (common->fsm_state == FSM_CARD_NOT_READY) {
- rsi_set_default_parameters(common);
-
- ret = rsi_load_bootup_params(common);
- if (ret)
- return ret;
- else
- common->fsm_state = FSM_BOOT_PARAMS_SENT;
- } else {
- return -EINVAL;
- }
+ return rsi_handle_card_ready(common, msg);
} else if (msg_type == TX_STATUS_IND) {
if (msg[15] == PROBEREQ_CONFIRM) {
common->mgmt_q_block = false;
diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c
index 2ef844adacf3..e5ea99bb2dd8 100644
--- a/drivers/net/wireless/rsi/rsi_91x_sdio.c
+++ b/drivers/net/wireless/rsi/rsi_91x_sdio.c
@@ -139,6 +139,8 @@ static void rsi_handle_interrupt(struct sdio_func *function)
{
struct rsi_hw *adapter = sdio_get_drvdata(function);
+ if (adapter->priv->fsm_state == FSM_FW_NOT_LOADED)
+ return;
sdio_release_host(function);
rsi_interrupt_handler(adapter);
sdio_claim_host(function);
@@ -908,10 +910,19 @@ static int rsi_probe(struct sdio_func *pfunction,
__func__);
goto fail;
}
+ sdio_claim_host(pfunction);
+ if (sdio_claim_irq(pfunction, rsi_handle_interrupt)) {
+ rsi_dbg(ERR_ZONE, "%s: Failed to request IRQ\n", __func__);
+ sdio_release_host(pfunction);
+ goto fail;
+ }
+ sdio_release_host(pfunction);
+ rsi_dbg(INIT_ZONE, "%s: Registered Interrupt handler\n", __func__);
if (rsi_hal_device_init(adapter)) {
rsi_dbg(ERR_ZONE, "%s: Failed in device init\n", __func__);
sdio_claim_host(pfunction);
+ sdio_release_irq(pfunction);
sdio_disable_func(pfunction);
sdio_release_host(pfunction);
goto fail;
@@ -923,16 +934,6 @@ static int rsi_probe(struct sdio_func *pfunction,
return -EIO;
}
- sdio_claim_host(pfunction);
- if (sdio_claim_irq(pfunction, rsi_handle_interrupt)) {
- rsi_dbg(ERR_ZONE, "%s: Failed to request IRQ\n", __func__);
- sdio_release_host(pfunction);
- goto fail;
- }
-
- sdio_release_host(pfunction);
- rsi_dbg(INIT_ZONE, "%s: Registered Interrupt handler\n", __func__);
-
return 0;
fail:
rsi_91x_deinit(adapter);
diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c
index f5de6934f7ec..bcd7f454ef30 100644
--- a/drivers/net/wireless/rsi/rsi_91x_usb.c
+++ b/drivers/net/wireless/rsi/rsi_91x_usb.c
@@ -666,6 +666,7 @@ static const struct usb_device_id rsi_dev_table[] = {
{ USB_DEVICE(0x041B, 0x0301) },
{ USB_DEVICE(0x041B, 0x0201) },
{ USB_DEVICE(0x041B, 0x9330) },
+ { USB_DEVICE(0x1618, 0x9113) },
{ /* Blank */},
};
diff --git a/drivers/net/wireless/rsi/rsi_boot_params.h b/drivers/net/wireless/rsi/rsi_boot_params.h
index 5e2721f7909c..238ee96434ec 100644
--- a/drivers/net/wireless/rsi/rsi_boot_params.h
+++ b/drivers/net/wireless/rsi/rsi_boot_params.h
@@ -24,19 +24,19 @@
#define WIFI_AFEPLL_CONFIGS BIT(7)
#define WIFI_SWITCH_CLK_CONFIGS BIT(8)
-#define TA_PLL_M_VAL_20 8
-#define TA_PLL_N_VAL_20 1
+#define TA_PLL_M_VAL_20 9
+#define TA_PLL_N_VAL_20 0
#define TA_PLL_P_VAL_20 4
#define PLL960_M_VAL_20 0x14
#define PLL960_N_VAL_20 0
#define PLL960_P_VAL_20 5
-#define UMAC_CLK_40MHZ 40
+#define UMAC_CLK_40MHZ 80
-#define TA_PLL_M_VAL_40 46
-#define TA_PLL_N_VAL_40 3
-#define TA_PLL_P_VAL_40 3
+#define TA_PLL_M_VAL_40 9
+#define TA_PLL_N_VAL_40 0
+#define TA_PLL_P_VAL_40 4
#define PLL960_M_VAL_40 0x14
#define PLL960_N_VAL_40 0
@@ -122,5 +122,8 @@ struct bootup_params {
/* dcdc modes configs */
__le32 dcdc_operation_mode;
__le32 soc_reset_wait_cnt;
+ __le32 waiting_time_at_fresh_sleep;
+ __le32 max_threshold_to_avoid_sleep;
+ u8 beacon_resedue_alg_en;
} __packed;
#endif
diff --git a/drivers/net/wireless/rsi/rsi_hal.h b/drivers/net/wireless/rsi/rsi_hal.h
index b95200df5b0f..902dc540849c 100644
--- a/drivers/net/wireless/rsi/rsi_hal.h
+++ b/drivers/net/wireless/rsi/rsi_hal.h
@@ -63,6 +63,9 @@
#define COMMAN_HAL_WAIT_FOR_CARD_READY 1
+#define RSI_DEV_OPMODE_WIFI_ALONE 1
+#define RSI_DEV_COEX_MODE_WIFI_ALONE 1
+
struct bl_header {
__le32 flags;
__le32 image_no;
diff --git a/drivers/net/wireless/rsi/rsi_main.h b/drivers/net/wireless/rsi/rsi_main.h
index ea4fc223cea7..f3985250b593 100644
--- a/drivers/net/wireless/rsi/rsi_main.h
+++ b/drivers/net/wireless/rsi/rsi_main.h
@@ -31,13 +31,17 @@
#define FSM_ZONE BIT(7) /* For State Machine Msgs */
#define ISR_ZONE BIT(8) /* For Interrupt Msgs */
-#define FSM_CARD_NOT_READY 0
-#define FSM_BOOT_PARAMS_SENT 1
-#define FSM_EEPROM_READ_MAC_ADDR 2
-#define FSM_RESET_MAC_SENT 3
-#define FSM_RADIO_CAPS_SENT 4
-#define FSM_BB_RF_PROG_SENT 5
-#define FSM_MAC_INIT_DONE 6
+enum RSI_FSM_STATES {
+ FSM_FW_NOT_LOADED,
+ FSM_CARD_NOT_READY,
+ FSM_COMMON_DEV_PARAMS_SENT,
+ FSM_BOOT_PARAMS_SENT,
+ FSM_EEPROM_READ_MAC_ADDR,
+ FSM_RESET_MAC_SENT,
+ FSM_RADIO_CAPS_SENT,
+ FSM_BB_RF_PROG_SENT,
+ FSM_MAC_INIT_DONE
+};
extern u32 rsi_zone_enabled;
extern __printf(2, 3) void rsi_dbg(u32 zone, const char *fmt, ...);
@@ -60,6 +64,7 @@ extern __printf(2, 3) void rsi_dbg(u32 zone, const char *fmt, ...);
#define MAX_CONTINUOUS_VI_PKTS 4
/* Queue information */
+#define RSI_COEX_Q 0x0
#define RSI_WIFI_MGMT_Q 0x4
#define RSI_WIFI_DATA_Q 0x5
#define IEEE80211_MGMT_FRAME 0x00
@@ -206,8 +211,14 @@ struct rsi_common {
struct cqm_info cqm_info;
bool hw_data_qs_blocked;
+ u8 driver_mode;
u8 coex_mode;
-
+ u16 oper_mode;
+ u8 lp_ps_handshake_mode;
+ u8 ulp_ps_handshake_mode;
+ u8 rf_power_val;
+ u8 wlan_rf_power_mode;
+ u8 obm_ant_sel_val;
int tx_power;
u8 ant_in_use;
};
@@ -230,6 +241,7 @@ struct rsi_hw {
enum host_intf rsi_host_intf;
u16 block_size;
+ u32 usb_buffer_status_reg;
#ifdef CONFIG_RSI_DEBUGFS
struct rsi_debugfs *dfsentry;
u8 num_debugfs_entries;
diff --git a/drivers/net/wireless/rsi/rsi_mgmt.h b/drivers/net/wireless/rsi/rsi_mgmt.h
index dfbf7a50269b..dcb6db728cbd 100644
--- a/drivers/net/wireless/rsi/rsi_mgmt.h
+++ b/drivers/net/wireless/rsi/rsi_mgmt.h
@@ -205,6 +205,7 @@ enum cmd_frame_type {
CW_MODE_REQ,
PER_CMD_PKT,
ANT_SEL_FRAME = 0x20,
+ COMMON_DEV_CONFIG = 0x28,
RADIO_PARAMS_UPDATE = 0x29
};
@@ -282,6 +283,76 @@ struct rsi_radio_caps {
__le16 preamble_type;
} __packed;
+/* ULP GPIO flags */
+#define RSI_GPIO_MOTION_SENSOR_ULP_WAKEUP BIT(0)
+#define RSI_GPIO_SLEEP_IND_FROM_DEVICE BIT(1)
+#define RSI_GPIO_2_ULP BIT(2)
+#define RSI_GPIO_PUSH_BUTTON_ULP_WAKEUP BIT(3)
+
+/* SOC GPIO flags */
+#define RSI_GPIO_0_PSPI_CSN_0 BIT(0)
+#define RSI_GPIO_1_PSPI_CSN_1 BIT(1)
+#define RSI_GPIO_2_HOST_WAKEUP_INTR BIT(2)
+#define RSI_GPIO_3_PSPI_DATA_0 BIT(3)
+#define RSI_GPIO_4_PSPI_DATA_1 BIT(4)
+#define RSI_GPIO_5_PSPI_DATA_2 BIT(5)
+#define RSI_GPIO_6_PSPI_DATA_3 BIT(6)
+#define RSI_GPIO_7_I2C_SCL BIT(7)
+#define RSI_GPIO_8_I2C_SDA BIT(8)
+#define RSI_GPIO_9_UART1_RX BIT(9)
+#define RSI_GPIO_10_UART1_TX BIT(10)
+#define RSI_GPIO_11_UART1_RTS_I2S_CLK BIT(11)
+#define RSI_GPIO_12_UART1_CTS_I2S_WS BIT(12)
+#define RSI_GPIO_13_DBG_UART_RX_I2S_DIN BIT(13)
+#define RSI_GPIO_14_DBG_UART_RX_I2S_DOUT BIT(14)
+#define RSI_GPIO_15_LP_WAKEUP_BOOT_BYPASS BIT(15)
+#define RSI_GPIO_16_LED_0 BIT(16)
+#define RSI_GPIO_17_BTCOEX_WLAN_ACT_EXT_ANT_SEL BIT(17)
+#define RSI_GPIO_18_BTCOEX_BT_PRIO_EXT_ANT_SEL BIT(18)
+#define RSI_GPIO_19_BTCOEX_BT_ACT_EXT_ON_OFF BIT(19)
+#define RSI_GPIO_20_RF_RESET BIT(20)
+#define RSI_GPIO_21_SLEEP_IND_FROM_DEVICE BIT(21)
+
+#define RSI_UNUSED_SOC_GPIO_BITMAP (RSI_GPIO_9_UART1_RX | \
+ RSI_GPIO_10_UART1_TX | \
+ RSI_GPIO_11_UART1_RTS_I2S_CLK | \
+ RSI_GPIO_12_UART1_CTS_I2S_WS | \
+ RSI_GPIO_13_DBG_UART_RX_I2S_DIN | \
+ RSI_GPIO_14_DBG_UART_RX_I2S_DOUT | \
+ RSI_GPIO_15_LP_WAKEUP_BOOT_BYPASS | \
+ RSI_GPIO_17_BTCOEX_WLAN_ACT_EXT_ANT_SEL | \
+ RSI_GPIO_18_BTCOEX_BT_PRIO_EXT_ANT_SEL | \
+ RSI_GPIO_19_BTCOEX_BT_ACT_EXT_ON_OFF | \
+ RSI_GPIO_21_SLEEP_IND_FROM_DEVICE)
+
+#define RSI_UNUSED_ULP_GPIO_BITMAP (RSI_GPIO_MOTION_SENSOR_ULP_WAKEUP | \
+ RSI_GPIO_SLEEP_IND_FROM_DEVICE | \
+ RSI_GPIO_2_ULP | \
+ RSI_GPIO_PUSH_BUTTON_ULP_WAKEUP);
+struct rsi_config_vals {
+ __le16 len_qno;
+ u8 pkt_type;
+ u8 misc_flags;
+ __le16 reserved1[6];
+ u8 lp_ps_handshake;
+ u8 ulp_ps_handshake;
+ u8 sleep_config_params; /* 0 for no handshake,
+ * 1 for GPIO based handshake,
+ * 2 packet handshake
+ */
+ u8 unused_ulp_gpio;
+ __le32 unused_soc_gpio_bitmap;
+ u8 ext_pa_or_bt_coex_en;
+ u8 opermode;
+ u8 wlan_rf_pwr_mode;
+ u8 bt_rf_pwr_mode;
+ u8 zigbee_rf_pwr_mode;
+ u8 driver_mode;
+ u8 region_code;
+ u8 antenna_sel_val;
+ u8 reserved2[16];
+} __packed;
+
static inline u32 rsi_get_queueno(u8 *addr, u16 offset)
{
return (le16_to_cpu(*(__le16 *)&addr[offset]) & 0x7000) >> 12;
@@ -307,6 +378,11 @@ static inline u8 rsi_get_channel(u8 *addr)
return *(char *)(addr + 15);
}
+static inline void rsi_set_len_qno(__le16 *addr, u16 len, u8 qno)
+{
+ *addr = cpu_to_le16(len | ((qno & 7) << 12));
+}
+
int rsi_mgmt_pkt_recv(struct rsi_common *common, u8 *msg);
int rsi_set_vap_capabilities(struct rsi_common *common, enum opmode mode,
u8 vap_status);
diff --git a/drivers/net/wireless/st/cw1200/cw1200_sdio.c b/drivers/net/wireless/st/cw1200/cw1200_sdio.c
index 709f56e5ad87..1037ec62659d 100644
--- a/drivers/net/wireless/st/cw1200/cw1200_sdio.c
+++ b/drivers/net/wireless/st/cw1200/cw1200_sdio.c
@@ -266,7 +266,7 @@ static int cw1200_sdio_pm(struct hwbus_priv *self, bool suspend)
return ret;
}
-static struct hwbus_ops cw1200_sdio_hwbus_ops = {
+static const struct hwbus_ops cw1200_sdio_hwbus_ops = {
.hwbus_memcpy_fromio = cw1200_sdio_memcpy_fromio,
.hwbus_memcpy_toio = cw1200_sdio_memcpy_toio,
.lock = cw1200_sdio_lock,
diff --git a/drivers/net/wireless/st/cw1200/cw1200_spi.c b/drivers/net/wireless/st/cw1200/cw1200_spi.c
index 63f95e9c2992..412fb6e49aed 100644
--- a/drivers/net/wireless/st/cw1200/cw1200_spi.c
+++ b/drivers/net/wireless/st/cw1200/cw1200_spi.c
@@ -352,7 +352,7 @@ static int cw1200_spi_pm(struct hwbus_priv *self, bool suspend)
return irq_set_irq_wake(self->func->irq, suspend);
}
-static struct hwbus_ops cw1200_spi_hwbus_ops = {
+static const struct hwbus_ops cw1200_spi_hwbus_ops = {
.hwbus_memcpy_fromio = cw1200_spi_memcpy_fromio,
.hwbus_memcpy_toio = cw1200_spi_memcpy_toio,
.lock = cw1200_spi_lock,
diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
index d1aa3eee0e81..0cf3b4013dd6 100644
--- a/drivers/net/wireless/ti/wl18xx/main.c
+++ b/drivers/net/wireless/ti/wl18xx/main.c
@@ -793,9 +793,13 @@ static int wl18xx_set_clk(struct wl1271 *wl)
ret = wl18xx_top_reg_write(wl, PLLSH_WCS_PLL_P_FACTOR_CFG_2,
(wl18xx_clk_table[clk_freq].p >> 16) &
PLLSH_WCS_PLL_P_FACTOR_CFG_2_MASK);
+ if (ret < 0)
+ goto out;
} else {
ret = wl18xx_top_reg_write(wl, PLLSH_WCS_PLL_SWALLOW_EN,
PLLSH_WCS_PLL_SWALLOW_EN_VAL2);
+ if (ret < 0)
+ goto out;
}
/* choose WCS PLL */
@@ -819,8 +823,6 @@ static int wl18xx_set_clk(struct wl1271 *wl)
/* reset the swallowing logic */
ret = wl18xx_top_reg_write(wl, PLLSH_COEX_PLL_SWALLOW_EN,
PLLSH_COEX_PLL_SWALLOW_EN_VAL2);
- if (ret < 0)
- goto out;
out:
return ret;
diff --git a/drivers/net/wireless/ti/wlcore/rx.c b/drivers/net/wireless/ti/wlcore/rx.c
index 53cd6d4d5b50..0f15696195f8 100644
--- a/drivers/net/wireless/ti/wlcore/rx.c
+++ b/drivers/net/wireless/ti/wlcore/rx.c
@@ -117,7 +117,6 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
struct wl1271_rx_descriptor *desc;
struct sk_buff *skb;
struct ieee80211_hdr *hdr;
- u8 *buf;
u8 beacon = 0;
u8 is_data = 0;
u8 reserved = 0, offset_to_data = 0;
@@ -180,7 +179,7 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
* packets copy the packets in offset of 2 bytes guarantee IP header
* payload aligned to 4 bytes.
*/
- buf = skb_put_data(skb, data + sizeof(*desc), pkt_data_len);
+ skb_put_data(skb, data + sizeof(*desc), pkt_data_len);
if (rx_align == WLCORE_RX_BUF_PADDED)
skb_pull(skb, RX_BUF_ALIGN);
diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c
index 287023ef4a78..2fb38717346f 100644
--- a/drivers/net/wireless/ti/wlcore/sdio.c
+++ b/drivers/net/wireless/ti/wlcore/sdio.c
@@ -237,6 +237,7 @@ static const struct of_device_id wlcore_sdio_of_match_table[] = {
{ .compatible = "ti,wl1273", .data = &wl127x_data },
{ .compatible = "ti,wl1281", .data = &wl128x_data },
{ .compatible = "ti,wl1283", .data = &wl128x_data },
+ { .compatible = "ti,wl1285", .data = &wl128x_data },
{ .compatible = "ti,wl1801", .data = &wl18xx_data },
{ .compatible = "ti,wl1805", .data = &wl18xx_data },
{ .compatible = "ti,wl1807", .data = &wl18xx_data },
diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c
index fa3547e06424..fdabb9242cca 100644
--- a/drivers/net/wireless/ti/wlcore/spi.c
+++ b/drivers/net/wireless/ti/wlcore/spi.c
@@ -366,17 +366,14 @@ static int __wl12xx_spi_raw_write(struct device *child, int addr,
static int __must_check wl12xx_spi_raw_write(struct device *child, int addr,
void *buf, size_t len, bool fixed)
{
- int ret;
-
/* The ELP wakeup write may fail the first time due to internal
* hardware latency. It is safer to send the wakeup command twice to
* avoid unexpected failures.
*/
if (addr == HW_ACCESS_ELP_CTRL_REG)
- ret = __wl12xx_spi_raw_write(child, addr, buf, len, fixed);
- ret = __wl12xx_spi_raw_write(child, addr, buf, len, fixed);
+ __wl12xx_spi_raw_write(child, addr, buf, len, fixed);
- return ret;
+ return __wl12xx_spi_raw_write(child, addr, buf, len, fixed);
}
/**
@@ -433,6 +430,7 @@ static const struct of_device_id wlcore_spi_of_match_table[] = {
{ .compatible = "ti,wl1273", .data = &wl127x_data},
{ .compatible = "ti,wl1281", .data = &wl128x_data},
{ .compatible = "ti,wl1283", .data = &wl128x_data},
+ { .compatible = "ti,wl1285", .data = &wl128x_data},
{ .compatible = "ti,wl1801", .data = &wl18xx_data},
{ .compatible = "ti,wl1805", .data = &wl18xx_data},
{ .compatible = "ti,wl1807", .data = &wl18xx_data},
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 530586be05b4..5b1d2e8402d9 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -199,6 +199,7 @@ struct xenvif_queue { /* Per-queue data for xenvif */
unsigned long remaining_credit;
struct timer_list credit_timeout;
u64 credit_window_start;
+ bool rate_limited;
/* Statistics */
struct xenvif_stats stats;
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 8397f6c92451..e322a862ddfe 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -106,7 +106,11 @@ static int xenvif_poll(struct napi_struct *napi, int budget)
if (work_done < budget) {
napi_complete_done(napi, work_done);
- xenvif_napi_schedule_or_enable_events(queue);
+ /* If the queue is rate-limited, it shall be
+ * rescheduled in the timer callback.
+ */
+ if (likely(!queue->rate_limited))
+ xenvif_napi_schedule_or_enable_events(queue);
}
return work_done;
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 602d408fa25e..5042ff8d449a 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -180,6 +180,7 @@ static void tx_add_credit(struct xenvif_queue *queue)
max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */
queue->remaining_credit = min(max_credit, max_burst);
+ queue->rate_limited = false;
}
void xenvif_tx_credit_callback(unsigned long data)
@@ -686,8 +687,10 @@ static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size)
msecs_to_jiffies(queue->credit_usec / 1000);
/* Timer could already be pending in rare cases. */
- if (timer_pending(&queue->credit_timeout))
+ if (timer_pending(&queue->credit_timeout)) {
+ queue->rate_limited = true;
return true;
+ }
/* Passed the point where we can replenish credit? */
if (time_after_eq64(now, next_credit)) {
@@ -702,6 +705,7 @@ static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size)
mod_timer(&queue->credit_timeout,
next_credit);
queue->credit_window_start = next_credit;
+ queue->rate_limited = true;
return true;
}
diff --git a/drivers/nfc/pn533/pn533.c b/drivers/nfc/pn533/pn533.c
index c8a8f5badb5b..c05cb637ba92 100644
--- a/drivers/nfc/pn533/pn533.c
+++ b/drivers/nfc/pn533/pn533.c
@@ -1006,7 +1006,7 @@ static int pn533_start_poll_complete(struct pn533 *dev, struct sk_buff *resp)
static struct sk_buff *pn533_alloc_poll_tg_frame(struct pn533 *dev)
{
struct sk_buff *skb;
- u8 *felica, *nfcid3, *gb;
+ u8 *felica, *nfcid3;
u8 *gbytes = dev->gb;
size_t gbytes_len = dev->gb_len;
@@ -1048,7 +1048,7 @@ static struct sk_buff *pn533_alloc_poll_tg_frame(struct pn533 *dev)
/* General bytes */
skb_put_u8(skb, gbytes_len);
- gb = skb_put_data(skb, gbytes, gbytes_len);
+ skb_put_data(skb, gbytes, gbytes_len);
/* Len Tk */
skb_put_u8(skb, 0);
diff --git a/drivers/ntb/hw/intel/ntb_hw_intel.c b/drivers/ntb/hw/intel/ntb_hw_intel.c
index c00238491673..7b3b6fd63d7d 100644
--- a/drivers/ntb/hw/intel/ntb_hw_intel.c
+++ b/drivers/ntb/hw/intel/ntb_hw_intel.c
@@ -2878,7 +2878,7 @@ static const struct intel_ntb_reg skx_reg = {
.link_is_up = xeon_link_is_up,
.db_ioread = skx_db_ioread,
.db_iowrite = skx_db_iowrite,
- .db_size = sizeof(u64),
+ .db_size = sizeof(u32),
.ntb_ctl = SKX_NTBCNTL_OFFSET,
.mw_bar = {2, 4},
};
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index 02ca45fdd892..10e5bf460139 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -177,14 +177,12 @@ struct ntb_transport_qp {
u64 rx_err_ver;
u64 rx_memcpy;
u64 rx_async;
- u64 dma_rx_prep_err;
u64 tx_bytes;
u64 tx_pkts;
u64 tx_ring_full;
u64 tx_err_no_buf;
u64 tx_memcpy;
u64 tx_async;
- u64 dma_tx_prep_err;
};
struct ntb_transport_mw {
@@ -254,8 +252,6 @@ enum {
#define QP_TO_MW(nt, qp) ((qp) % nt->mw_count)
#define NTB_QP_DEF_NUM_ENTRIES 100
#define NTB_LINK_DOWN_TIMEOUT 10
-#define DMA_RETRIES 20
-#define DMA_OUT_RESOURCE_TO msecs_to_jiffies(50)
static void ntb_transport_rxc_db(unsigned long data);
static const struct ntb_ctx_ops ntb_transport_ops;
@@ -516,12 +512,6 @@ static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count,
out_offset += snprintf(buf + out_offset, out_count - out_offset,
"free tx - \t%u\n",
ntb_transport_tx_free_entry(qp));
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
- "DMA tx prep err - \t%llu\n",
- qp->dma_tx_prep_err);
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
- "DMA rx prep err - \t%llu\n",
- qp->dma_rx_prep_err);
out_offset += snprintf(buf + out_offset, out_count - out_offset,
"\n");
@@ -623,7 +613,7 @@ static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt,
if (!mw->virt_addr)
return -ENOMEM;
- if (qp_count % mw_count && mw_num + 1 < qp_count / mw_count)
+ if (mw_num < qp_count % mw_count)
num_qps_mw = qp_count / mw_count + 1;
else
num_qps_mw = qp_count / mw_count;
@@ -768,8 +758,6 @@ static void ntb_qp_link_down_reset(struct ntb_transport_qp *qp)
qp->tx_err_no_buf = 0;
qp->tx_memcpy = 0;
qp->tx_async = 0;
- qp->dma_tx_prep_err = 0;
- qp->dma_rx_prep_err = 0;
}
static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp)
@@ -1000,7 +988,7 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt,
qp->event_handler = NULL;
ntb_qp_link_down_reset(qp);
- if (qp_count % mw_count && mw_num + 1 < qp_count / mw_count)
+ if (mw_num < qp_count % mw_count)
num_qps_mw = qp_count / mw_count + 1;
else
num_qps_mw = qp_count / mw_count;
@@ -1128,8 +1116,8 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
qp_count = ilog2(qp_bitmap);
if (max_num_clients && max_num_clients < qp_count)
qp_count = max_num_clients;
- else if (mw_count < qp_count)
- qp_count = mw_count;
+ else if (nt->mw_count < qp_count)
+ qp_count = nt->mw_count;
qp_bitmap &= BIT_ULL(qp_count) - 1;
@@ -1317,7 +1305,6 @@ static int ntb_async_rx_submit(struct ntb_queue_entry *entry, void *offset)
struct dmaengine_unmap_data *unmap;
dma_cookie_t cookie;
void *buf = entry->buf;
- int retries = 0;
len = entry->len;
device = chan->device;
@@ -1346,22 +1333,11 @@ static int ntb_async_rx_submit(struct ntb_queue_entry *entry, void *offset)
unmap->from_cnt = 1;
- for (retries = 0; retries < DMA_RETRIES; retries++) {
- txd = device->device_prep_dma_memcpy(chan,
- unmap->addr[1],
- unmap->addr[0], len,
- DMA_PREP_INTERRUPT);
- if (txd)
- break;
-
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(DMA_OUT_RESOURCE_TO);
- }
-
- if (!txd) {
- qp->dma_rx_prep_err++;
+ txd = device->device_prep_dma_memcpy(chan, unmap->addr[1],
+ unmap->addr[0], len,
+ DMA_PREP_INTERRUPT);
+ if (!txd)
goto err_get_unmap;
- }
txd->callback_result = ntb_rx_copy_callback;
txd->callback_param = entry;
@@ -1606,7 +1582,6 @@ static int ntb_async_tx_submit(struct ntb_transport_qp *qp,
struct dmaengine_unmap_data *unmap;
dma_addr_t dest;
dma_cookie_t cookie;
- int retries = 0;
device = chan->device;
dest = qp->tx_mw_phys + qp->tx_max_frame * entry->tx_index;
@@ -1628,21 +1603,10 @@ static int ntb_async_tx_submit(struct ntb_transport_qp *qp,
unmap->to_cnt = 1;
- for (retries = 0; retries < DMA_RETRIES; retries++) {
- txd = device->device_prep_dma_memcpy(chan, dest,
- unmap->addr[0], len,
- DMA_PREP_INTERRUPT);
- if (txd)
- break;
-
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(DMA_OUT_RESOURCE_TO);
- }
-
- if (!txd) {
- qp->dma_tx_prep_err++;
+ txd = device->device_prep_dma_memcpy(chan, dest, unmap->addr[0], len,
+ DMA_PREP_INTERRUPT);
+ if (!txd)
goto err_get_unmap;
- }
txd->callback_result = ntb_tx_copy_callback;
txd->callback_param = entry;
diff --git a/drivers/ntb/test/ntb_perf.c b/drivers/ntb/test/ntb_perf.c
index 434e1d474f33..5cab2831ce99 100644
--- a/drivers/ntb/test/ntb_perf.c
+++ b/drivers/ntb/test/ntb_perf.c
@@ -90,11 +90,11 @@ MODULE_PARM_DESC(max_mw_size, "Limit size of large memory windows");
static unsigned int seg_order = 19; /* 512K */
module_param(seg_order, uint, 0644);
-MODULE_PARM_DESC(seg_order, "size order [n^2] of buffer segment for testing");
+MODULE_PARM_DESC(seg_order, "size order [2^n] of buffer segment for testing");
static unsigned int run_order = 32; /* 4G */
module_param(run_order, uint, 0644);
-MODULE_PARM_DESC(run_order, "size order [n^2] of total data to transfer");
+MODULE_PARM_DESC(run_order, "size order [2^n] of total data to transfer");
static bool use_dma; /* default to 0 */
module_param(use_dma, bool, 0644);
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 951042a375d6..40c7581caeb0 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1805,7 +1805,8 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
if (pci_is_enabled(pdev)) {
u32 csts = readl(dev->bar + NVME_REG_CSTS);
- if (dev->ctrl.state == NVME_CTRL_LIVE)
+ if (dev->ctrl.state == NVME_CTRL_LIVE ||
+ dev->ctrl.state == NVME_CTRL_RESETTING)
nvme_start_freeze(&dev->ctrl);
dead = !!((csts & NVME_CSTS_CFS) || !(csts & NVME_CSTS_RDY) ||
pdev->error_state != pci_channel_io_normal);
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index 74cf5fffb1e1..c80e37a69305 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -896,7 +896,7 @@ int pci_read_config_byte(const struct pci_dev *dev, int where, u8 *val)
{
if (pci_dev_is_disconnected(dev)) {
*val = ~0;
- return -ENODEV;
+ return PCIBIOS_DEVICE_NOT_FOUND;
}
return pci_bus_read_config_byte(dev->bus, dev->devfn, where, val);
}
@@ -906,7 +906,7 @@ int pci_read_config_word(const struct pci_dev *dev, int where, u16 *val)
{
if (pci_dev_is_disconnected(dev)) {
*val = ~0;
- return -ENODEV;
+ return PCIBIOS_DEVICE_NOT_FOUND;
}
return pci_bus_read_config_word(dev->bus, dev->devfn, where, val);
}
@@ -917,7 +917,7 @@ int pci_read_config_dword(const struct pci_dev *dev, int where,
{
if (pci_dev_is_disconnected(dev)) {
*val = ~0;
- return -ENODEV;
+ return PCIBIOS_DEVICE_NOT_FOUND;
}
return pci_bus_read_config_dword(dev->bus, dev->devfn, where, val);
}
@@ -926,7 +926,7 @@ EXPORT_SYMBOL(pci_read_config_dword);
int pci_write_config_byte(const struct pci_dev *dev, int where, u8 val)
{
if (pci_dev_is_disconnected(dev))
- return -ENODEV;
+ return PCIBIOS_DEVICE_NOT_FOUND;
return pci_bus_write_config_byte(dev->bus, dev->devfn, where, val);
}
EXPORT_SYMBOL(pci_write_config_byte);
@@ -934,7 +934,7 @@ EXPORT_SYMBOL(pci_write_config_byte);
int pci_write_config_word(const struct pci_dev *dev, int where, u16 val)
{
if (pci_dev_is_disconnected(dev))
- return -ENODEV;
+ return PCIBIOS_DEVICE_NOT_FOUND;
return pci_bus_write_config_word(dev->bus, dev->devfn, where, val);
}
EXPORT_SYMBOL(pci_write_config_word);
@@ -943,7 +943,7 @@ int pci_write_config_dword(const struct pci_dev *dev, int where,
u32 val)
{
if (pci_dev_is_disconnected(dev))
- return -ENODEV;
+ return PCIBIOS_DEVICE_NOT_FOUND;
return pci_bus_write_config_dword(dev->bus, dev->devfn, where, val);
}
EXPORT_SYMBOL(pci_write_config_dword);
diff --git a/drivers/pci/endpoint/functions/Kconfig b/drivers/pci/endpoint/functions/Kconfig
index 175edad42d2f..2942066607e0 100644
--- a/drivers/pci/endpoint/functions/Kconfig
+++ b/drivers/pci/endpoint/functions/Kconfig
@@ -5,6 +5,7 @@
config PCI_EPF_TEST
tristate "PCI Endpoint Test driver"
depends on PCI_ENDPOINT
+ select CRC32
help
Enable this configuration option to enable the test driver
for PCI Endpoint.
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index 1482d132fbb8..e432ec887479 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -495,64 +495,54 @@ static struct irq_chip amd_gpio_irqchip = {
.flags = IRQCHIP_SKIP_SET_WAKE,
};
-static void amd_gpio_irq_handler(struct irq_desc *desc)
+#define PIN_IRQ_PENDING (BIT(INTERRUPT_STS_OFF) | BIT(WAKE_STS_OFF))
+
+static irqreturn_t amd_gpio_irq_handler(int irq, void *dev_id)
{
- u32 i;
- u32 off;
- u32 reg;
- u32 pin_reg;
- u64 reg64;
- int handled = 0;
- unsigned int irq;
+ struct amd_gpio *gpio_dev = dev_id;
+ struct gpio_chip *gc = &gpio_dev->gc;
+ irqreturn_t ret = IRQ_NONE;
+ unsigned int i, irqnr;
unsigned long flags;
- struct irq_chip *chip = irq_desc_get_chip(desc);
- struct gpio_chip *gc = irq_desc_get_handler_data(desc);
- struct amd_gpio *gpio_dev = gpiochip_get_data(gc);
+ u32 *regs, regval;
+ u64 status, mask;
- chained_irq_enter(chip, desc);
- /*enable GPIO interrupt again*/
+ /* Read the wake status */
raw_spin_lock_irqsave(&gpio_dev->lock, flags);
- reg = readl(gpio_dev->base + WAKE_INT_STATUS_REG1);
- reg64 = reg;
- reg64 = reg64 << 32;
-
- reg = readl(gpio_dev->base + WAKE_INT_STATUS_REG0);
- reg64 |= reg;
+ status = readl(gpio_dev->base + WAKE_INT_STATUS_REG1);
+ status <<= 32;
+ status |= readl(gpio_dev->base + WAKE_INT_STATUS_REG0);
raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
- /*
- * first 46 bits indicates interrupt status.
- * one bit represents four interrupt sources.
- */
- for (off = 0; off < 46 ; off++) {
- if (reg64 & BIT(off)) {
- for (i = 0; i < 4; i++) {
- pin_reg = readl(gpio_dev->base +
- (off * 4 + i) * 4);
- if ((pin_reg & BIT(INTERRUPT_STS_OFF)) ||
- (pin_reg & BIT(WAKE_STS_OFF))) {
- irq = irq_find_mapping(gc->irqdomain,
- off * 4 + i);
- generic_handle_irq(irq);
- writel(pin_reg,
- gpio_dev->base
- + (off * 4 + i) * 4);
- handled++;
- }
- }
+ /* Bit 0-45 contain the relevant status bits */
+ status &= (1ULL << 46) - 1;
+ regs = gpio_dev->base;
+ for (mask = 1, irqnr = 0; status; mask <<= 1, regs += 4, irqnr += 4) {
+ if (!(status & mask))
+ continue;
+ status &= ~mask;
+
+ /* Each status bit covers four pins */
+ for (i = 0; i < 4; i++) {
+ regval = readl(regs + i);
+ if (!(regval & PIN_IRQ_PENDING))
+ continue;
+ irq = irq_find_mapping(gc->irqdomain, irqnr + i);
+ generic_handle_irq(irq);
+ /* Clear interrupt */
+ writel(regval, regs + i);
+ ret = IRQ_HANDLED;
}
}
- if (handled == 0)
- handle_bad_irq(desc);
-
+ /* Signal EOI to the GPIO unit */
raw_spin_lock_irqsave(&gpio_dev->lock, flags);
- reg = readl(gpio_dev->base + WAKE_INT_MASTER_REG);
- reg |= EOI_MASK;
- writel(reg, gpio_dev->base + WAKE_INT_MASTER_REG);
+ regval = readl(gpio_dev->base + WAKE_INT_MASTER_REG);
+ regval |= EOI_MASK;
+ writel(regval, gpio_dev->base + WAKE_INT_MASTER_REG);
raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
- chained_irq_exit(chip, desc);
+ return ret;
}
static int amd_get_groups_count(struct pinctrl_dev *pctldev)
@@ -821,10 +811,11 @@ static int amd_gpio_probe(struct platform_device *pdev)
goto out2;
}
- gpiochip_set_chained_irqchip(&gpio_dev->gc,
- &amd_gpio_irqchip,
- irq_base,
- amd_gpio_irq_handler);
+ ret = devm_request_irq(&pdev->dev, irq_base, amd_gpio_irq_handler, 0,
+ KBUILD_MODNAME, gpio_dev);
+ if (ret)
+ goto out2;
+
platform_set_drvdata(pdev, gpio_dev);
dev_dbg(&pdev->dev, "amd gpio driver loaded\n");
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c
index d3c5f5dfbbd7..222b6685b09f 100644
--- a/drivers/pinctrl/stm32/pinctrl-stm32.c
+++ b/drivers/pinctrl/stm32/pinctrl-stm32.c
@@ -798,7 +798,7 @@ static int stm32_pconf_parse_conf(struct pinctrl_dev *pctldev,
break;
case PIN_CONFIG_OUTPUT:
__stm32_gpio_set(bank, offset, arg);
- ret = stm32_pmx_gpio_set_direction(pctldev, NULL, pin, false);
+ ret = stm32_pmx_gpio_set_direction(pctldev, range, pin, false);
break;
default:
ret = -EINVAL;
diff --git a/drivers/platform/x86/intel_telemetry_debugfs.c b/drivers/platform/x86/intel_telemetry_debugfs.c
index ef29f18b1951..4cc2f4ea0a25 100644
--- a/drivers/platform/x86/intel_telemetry_debugfs.c
+++ b/drivers/platform/x86/intel_telemetry_debugfs.c
@@ -97,11 +97,9 @@
} \
}
-#ifdef CONFIG_PM_SLEEP
static u8 suspend_prep_ok;
static u32 suspend_shlw_ctr_temp, suspend_deep_ctr_temp;
static u64 suspend_shlw_res_temp, suspend_deep_res_temp;
-#endif
struct telemetry_susp_stats {
u32 shlw_swake_ctr;
@@ -807,7 +805,6 @@ static const struct file_operations telem_ioss_trc_verb_ops = {
.release = single_release,
};
-#ifdef CONFIG_PM_SLEEP
static int pm_suspend_prep_cb(void)
{
struct telemetry_evtlog evtlog[TELEM_MAX_OS_ALLOCATED_EVENTS];
@@ -937,7 +934,6 @@ static int pm_notification(struct notifier_block *this,
static struct notifier_block pm_notifier = {
.notifier_call = pm_notification,
};
-#endif /* CONFIG_PM_SLEEP */
static int __init telemetry_debugfs_init(void)
{
@@ -960,14 +956,13 @@ static int __init telemetry_debugfs_init(void)
if (err < 0)
return -EINVAL;
-
-#ifdef CONFIG_PM_SLEEP
register_pm_notifier(&pm_notifier);
-#endif /* CONFIG_PM_SLEEP */
debugfs_conf->telemetry_dbg_dir = debugfs_create_dir("telemetry", NULL);
- if (!debugfs_conf->telemetry_dbg_dir)
- return -ENOMEM;
+ if (!debugfs_conf->telemetry_dbg_dir) {
+ err = -ENOMEM;
+ goto out_pm;
+ }
f = debugfs_create_file("pss_info", S_IFREG | S_IRUGO,
debugfs_conf->telemetry_dbg_dir, NULL,
@@ -1014,6 +1009,8 @@ static int __init telemetry_debugfs_init(void)
out:
debugfs_remove_recursive(debugfs_conf->telemetry_dbg_dir);
debugfs_conf->telemetry_dbg_dir = NULL;
+out_pm:
+ unregister_pm_notifier(&pm_notifier);
return err;
}
@@ -1022,6 +1019,7 @@ static void __exit telemetry_debugfs_exit(void)
{
debugfs_remove_recursive(debugfs_conf->telemetry_dbg_dir);
debugfs_conf->telemetry_dbg_dir = NULL;
+ unregister_pm_notifier(&pm_notifier);
}
late_initcall(telemetry_debugfs_init);
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index 99121352c57b..e8782a8619f7 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -483,7 +483,7 @@ static int ctcm_transmit_skb(struct channel *ch, struct sk_buff *skb)
spin_unlock_irqrestore(&ch->collect_lock, saveflags);
return -EBUSY;
} else {
- atomic_inc(&skb->users);
+ refcount_inc(&skb->users);
header.length = l;
header.type = be16_to_cpu(skb->protocol);
header.unused = 0;
@@ -500,7 +500,7 @@ static int ctcm_transmit_skb(struct channel *ch, struct sk_buff *skb)
* Protect skb against beeing free'd by upper
* layers.
*/
- atomic_inc(&skb->users);
+ refcount_inc(&skb->users);
ch->prof.txlen += skb->len;
header.length = skb->len + LL_HEADER_LENGTH;
header.type = be16_to_cpu(skb->protocol);
@@ -517,14 +517,14 @@ static int ctcm_transmit_skb(struct channel *ch, struct sk_buff *skb)
if (hi) {
nskb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
if (!nskb) {
- atomic_dec(&skb->users);
+ refcount_dec(&skb->users);
skb_pull(skb, LL_HEADER_LENGTH + 2);
ctcm_clear_busy(ch->netdev);
return -ENOMEM;
} else {
skb_put_data(nskb, skb->data, skb->len);
- atomic_inc(&nskb->users);
- atomic_dec(&skb->users);
+ refcount_inc(&nskb->users);
+ refcount_dec(&skb->users);
dev_kfree_skb_irq(skb);
skb = nskb;
}
@@ -542,7 +542,7 @@ static int ctcm_transmit_skb(struct channel *ch, struct sk_buff *skb)
* Remove our header. It gets added
* again on retransmit.
*/
- atomic_dec(&skb->users);
+ refcount_dec(&skb->users);
skb_pull(skb, LL_HEADER_LENGTH + 2);
ctcm_clear_busy(ch->netdev);
return -ENOMEM;
@@ -553,7 +553,7 @@ static int ctcm_transmit_skb(struct channel *ch, struct sk_buff *skb)
ch->ccw[1].count = skb->len;
skb_copy_from_linear_data(skb,
skb_put(ch->trans_skb, skb->len), skb->len);
- atomic_dec(&skb->users);
+ refcount_dec(&skb->users);
dev_kfree_skb_irq(skb);
ccw_idx = 0;
} else {
@@ -679,7 +679,7 @@ static int ctcmpc_transmit_skb(struct channel *ch, struct sk_buff *skb)
if ((fsm_getstate(ch->fsm) != CTC_STATE_TXIDLE) || grp->in_sweep) {
spin_lock_irqsave(&ch->collect_lock, saveflags);
- atomic_inc(&skb->users);
+ refcount_inc(&skb->users);
p_header = kmalloc(PDU_HEADER_LENGTH, gfp_type());
if (!p_header) {
@@ -716,7 +716,7 @@ static int ctcmpc_transmit_skb(struct channel *ch, struct sk_buff *skb)
* Protect skb against beeing free'd by upper
* layers.
*/
- atomic_inc(&skb->users);
+ refcount_inc(&skb->users);
/*
* IDAL support in CTCM is broken, so we have to
@@ -729,8 +729,8 @@ static int ctcmpc_transmit_skb(struct channel *ch, struct sk_buff *skb)
goto nomem_exit;
} else {
skb_put_data(nskb, skb->data, skb->len);
- atomic_inc(&nskb->users);
- atomic_dec(&skb->users);
+ refcount_inc(&nskb->users);
+ refcount_dec(&skb->users);
dev_kfree_skb_irq(skb);
skb = nskb;
}
@@ -810,7 +810,7 @@ static int ctcmpc_transmit_skb(struct channel *ch, struct sk_buff *skb)
ch->trans_skb->len = 0;
ch->ccw[1].count = skb->len;
skb_put_data(ch->trans_skb, skb->data, skb->len);
- atomic_dec(&skb->users);
+ refcount_dec(&skb->users);
dev_kfree_skb_irq(skb);
ccw_idx = 0;
CTCM_PR_DBGDATA("%s(%s): trans_skb len: %04x\n"
@@ -855,7 +855,7 @@ nomem_exit:
"%s(%s): MEMORY allocation ERROR\n",
CTCM_FUNTAIL, ch->id);
rc = -ENOMEM;
- atomic_dec(&skb->users);
+ refcount_dec(&skb->users);
dev_kfree_skb_any(skb);
fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev);
done:
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index 7db427c0a6a4..1579695f4e64 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -743,7 +743,7 @@ static void conn_action_txdone(fsm_instance *fi, int event, void *arg)
conn->prof.tx_pending--;
if (single_flag) {
if ((skb = skb_dequeue(&conn->commit_queue))) {
- atomic_dec(&skb->users);
+ refcount_dec(&skb->users);
if (privptr) {
privptr->stats.tx_packets++;
privptr->stats.tx_bytes +=
@@ -766,7 +766,7 @@ static void conn_action_txdone(fsm_instance *fi, int event, void *arg)
txbytes += skb->len;
txpackets++;
stat_maxcq++;
- atomic_dec(&skb->users);
+ refcount_dec(&skb->users);
dev_kfree_skb_any(skb);
}
if (conn->collect_len > conn->prof.maxmulti)
@@ -958,7 +958,7 @@ static void netiucv_purge_skb_queue(struct sk_buff_head *q)
struct sk_buff *skb;
while ((skb = skb_dequeue(q))) {
- atomic_dec(&skb->users);
+ refcount_dec(&skb->users);
dev_kfree_skb_any(skb);
}
}
@@ -1176,7 +1176,7 @@ static int netiucv_transmit_skb(struct iucv_connection *conn,
IUCV_DBF_TEXT(data, 2,
"EBUSY from netiucv_transmit_skb\n");
} else {
- atomic_inc(&skb->users);
+ refcount_inc(&skb->users);
skb_queue_tail(&conn->collect_queue, skb);
conn->collect_len += l;
rc = 0;
@@ -1245,7 +1245,7 @@ static int netiucv_transmit_skb(struct iucv_connection *conn,
} else {
if (copied)
dev_kfree_skb(skb);
- atomic_inc(&nskb->users);
+ refcount_inc(&nskb->users);
skb_queue_tail(&conn->commit_queue, nskb);
}
}
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 0efc54a4d82f..7a0ffc71b25d 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -986,6 +986,7 @@ struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *,
int qeth_set_features(struct net_device *, netdev_features_t);
int qeth_recover_features(struct net_device *);
netdev_features_t qeth_fix_features(struct net_device *, netdev_features_t);
+int qeth_vm_request_mac(struct qeth_card *card);
/* exports for OSN */
int qeth_osn_assist(struct net_device *, void *, int);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 08338f27c82c..aec06e10b969 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -27,6 +27,9 @@
#include <asm/io.h>
#include <asm/sysinfo.h>
#include <asm/compat.h>
+#include <asm/diag.h>
+#include <asm/cio.h>
+#include <asm/ccwdev.h>
#include "qeth_core.h"
@@ -1239,7 +1242,7 @@ static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf)
iucv->sk_txnotify(skb, TX_NOTIFY_GENERALERROR);
}
}
- atomic_dec(&skb->users);
+ refcount_dec(&skb->users);
dev_kfree_skb_any(skb);
skb = skb_dequeue(&buf->skb_list);
}
@@ -3972,7 +3975,7 @@ static inline int qeth_fill_buffer(struct qeth_qdio_out_q *queue,
int flush_cnt = 0, hdr_len, large_send = 0;
buffer = buf->buffer;
- atomic_inc(&skb->users);
+ refcount_inc(&skb->users);
skb_queue_tail(&buf->skb_list, skb);
/*check first on TSO ....*/
@@ -4103,7 +4106,8 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
flush_count);
atomic_set(&queue->state,
QETH_OUT_Q_UNLOCKED);
- return -EBUSY;
+ rc = -EBUSY;
+ goto out;
}
}
}
@@ -4122,19 +4126,21 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
* In that case we will enter this loop
*/
while (atomic_dec_return(&queue->state)) {
- flush_count = 0;
start_index = queue->next_buf_to_fill;
/* check if we can go back to non-packing state */
- flush_count += qeth_switch_to_nonpacking_if_needed(queue);
+ tmp = qeth_switch_to_nonpacking_if_needed(queue);
/*
* check if we need to flush a packing buffer to get a pci
* flag out on the queue
*/
- if (!flush_count && !atomic_read(&queue->set_pci_flags_count))
- flush_count += qeth_prep_flush_pack_buffer(queue);
- if (flush_count)
- qeth_flush_buffers(queue, start_index, flush_count);
+ if (!tmp && !atomic_read(&queue->set_pci_flags_count))
+ tmp = qeth_prep_flush_pack_buffer(queue);
+ if (tmp) {
+ qeth_flush_buffers(queue, start_index, tmp);
+ flush_count += tmp;
+ }
}
+out:
/* at this point the queue is UNLOCKED again */
if (queue->card->options.performance_stats && do_pack)
queue->card->perf_stats.bufs_sent_pack += flush_count;
@@ -4770,6 +4776,64 @@ static int qeth_query_card_info(struct qeth_card *card,
(void *)carrier_info);
}
+/**
+ * qeth_vm_request_mac() - Request a hypervisor-managed MAC address
+ * @card: pointer to a qeth_card
+ *
+ * Returns
+ * 0, if a MAC address has been set for the card's netdevice
+ * a return code, for various error conditions
+ */
+int qeth_vm_request_mac(struct qeth_card *card)
+{
+ struct diag26c_mac_resp *response;
+ struct diag26c_mac_req *request;
+ struct ccw_dev_id id;
+ int rc;
+
+ QETH_DBF_TEXT(SETUP, 2, "vmreqmac");
+
+ if (!card->dev)
+ return -ENODEV;
+
+ request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA);
+ response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA);
+ if (!request || !response) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ ccw_device_get_id(CARD_DDEV(card), &id);
+ request->resp_buf_len = sizeof(*response);
+ request->resp_version = DIAG26C_VERSION2;
+ request->op_code = DIAG26C_GET_MAC;
+ request->devno = id.devno;
+
+ rc = diag26c(request, response, DIAG26C_MAC_SERVICES);
+ if (rc)
+ goto out;
+
+ if (request->resp_buf_len < sizeof(*response) ||
+ response->version != request->resp_version) {
+ rc = -EIO;
+ QETH_DBF_TEXT(SETUP, 2, "badresp");
+ QETH_DBF_HEX(SETUP, 2, &request->resp_buf_len,
+ sizeof(request->resp_buf_len));
+ } else if (!is_valid_ether_addr(response->mac)) {
+ rc = -EINVAL;
+ QETH_DBF_TEXT(SETUP, 2, "badmac");
+ QETH_DBF_HEX(SETUP, 2, response->mac, ETH_ALEN);
+ } else {
+ ether_addr_copy(card->dev->dev_addr, response->mac);
+ }
+
+out:
+ kfree(response);
+ kfree(request);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(qeth_vm_request_mac);
+
static inline int qeth_get_qdio_q_format(struct qeth_card *card)
{
if (card->info.type == QETH_CARD_TYPE_IQD)
diff --git a/drivers/s390/net/qeth_core_mpc.c b/drivers/s390/net/qeth_core_mpc.c
index ab9b1376467f..6dd7d05e5693 100644
--- a/drivers/s390/net/qeth_core_mpc.c
+++ b/drivers/s390/net/qeth_core_mpc.c
@@ -170,12 +170,18 @@ static struct ipa_rc_msg qeth_ipa_rc_msg[] = {
{IPA_RC_TRACE_ALREADY_ACTIVE, "trace already active"},
{IPA_RC_INVALID_FORMAT, "invalid format or length"},
{IPA_RC_DUP_IPV6_REMOTE, "ipv6 address already registered remote"},
+ {IPA_RC_SBP_IQD_NOT_CONFIGURED, "Not configured for bridgeport"},
{IPA_RC_DUP_IPV6_HOME, "ipv6 address already registered"},
{IPA_RC_UNREGISTERED_ADDR, "Address not registered"},
{IPA_RC_NO_ID_AVAILABLE, "No identifiers available"},
{IPA_RC_ID_NOT_FOUND, "Identifier not found"},
+ {IPA_RC_SBP_IQD_ANO_DEV_PRIMARY, "Primary bridgeport exists already"},
+ {IPA_RC_SBP_IQD_CURRENT_SECOND, "Bridgeport is currently secondary"},
+ {IPA_RC_SBP_IQD_LIMIT_SECOND, "Limit of secondary bridgeports reached"},
{IPA_RC_INVALID_IP_VERSION, "IP version incorrect"},
+ {IPA_RC_SBP_IQD_CURRENT_PRIMARY, "Bridgeport is currently primary"},
{IPA_RC_LAN_FRAME_MISMATCH, "LAN and frame mismatch"},
+ {IPA_RC_SBP_IQD_NO_QDIO_QUEUES, "QDIO queues not established"},
{IPA_RC_L2_UNSUPPORTED_CMD, "Unsupported layer 2 command"},
{IPA_RC_L2_DUP_MAC, "Duplicate MAC address"},
{IPA_RC_L2_ADDR_TABLE_FULL, "Layer2 address table full"},
@@ -187,6 +193,14 @@ static struct ipa_rc_msg qeth_ipa_rc_msg[] = {
{IPA_RC_L2_INVALID_VLAN_ID, "L2 invalid vlan id"},
{IPA_RC_L2_DUP_VLAN_ID, "L2 duplicate vlan id"},
{IPA_RC_L2_VLAN_ID_NOT_FOUND, "L2 vlan id not found"},
+ {IPA_RC_SBP_OSA_NOT_CONFIGURED, "Not configured for bridgeport"},
+ {IPA_RC_SBP_OSA_OS_MISMATCH, "OS mismatch"},
+ {IPA_RC_SBP_OSA_ANO_DEV_PRIMARY, "Primary bridgeport exists already"},
+ {IPA_RC_SBP_OSA_CURRENT_SECOND, "Bridgeport is currently secondary"},
+ {IPA_RC_SBP_OSA_LIMIT_SECOND, "Limit of secondary bridgeports reached"},
+ {IPA_RC_SBP_OSA_NOT_AUTHD_BY_ZMAN, "Not authorized by zManager"},
+ {IPA_RC_SBP_OSA_CURRENT_PRIMARY, "Bridgeport is currently primary"},
+ {IPA_RC_SBP_OSA_NO_QDIO_QUEUES, "QDIO queues not established"},
{IPA_RC_DATA_MISMATCH, "Data field mismatch (v4/v6 mixed)"},
{IPA_RC_INVALID_MTU_SIZE, "Invalid MTU size"},
{IPA_RC_INVALID_LANTYPE, "Invalid LAN type"},
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index 45bbea2843bf..912e0107de8f 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -142,12 +142,18 @@ enum qeth_ipa_return_codes {
IPA_RC_TRACE_ALREADY_ACTIVE = 0x0005,
IPA_RC_INVALID_FORMAT = 0x0006,
IPA_RC_DUP_IPV6_REMOTE = 0x0008,
+ IPA_RC_SBP_IQD_NOT_CONFIGURED = 0x000C,
IPA_RC_DUP_IPV6_HOME = 0x0010,
IPA_RC_UNREGISTERED_ADDR = 0x0011,
IPA_RC_NO_ID_AVAILABLE = 0x0012,
IPA_RC_ID_NOT_FOUND = 0x0013,
+ IPA_RC_SBP_IQD_ANO_DEV_PRIMARY = 0x0014,
+ IPA_RC_SBP_IQD_CURRENT_SECOND = 0x0018,
+ IPA_RC_SBP_IQD_LIMIT_SECOND = 0x001C,
IPA_RC_INVALID_IP_VERSION = 0x0020,
+ IPA_RC_SBP_IQD_CURRENT_PRIMARY = 0x0024,
IPA_RC_LAN_FRAME_MISMATCH = 0x0040,
+ IPA_RC_SBP_IQD_NO_QDIO_QUEUES = 0x00EB,
IPA_RC_L2_UNSUPPORTED_CMD = 0x2003,
IPA_RC_L2_DUP_MAC = 0x2005,
IPA_RC_L2_ADDR_TABLE_FULL = 0x2006,
@@ -159,6 +165,14 @@ enum qeth_ipa_return_codes {
IPA_RC_L2_INVALID_VLAN_ID = 0x2015,
IPA_RC_L2_DUP_VLAN_ID = 0x2016,
IPA_RC_L2_VLAN_ID_NOT_FOUND = 0x2017,
+ IPA_RC_SBP_OSA_NOT_CONFIGURED = 0x2B0C,
+ IPA_RC_SBP_OSA_OS_MISMATCH = 0x2B10,
+ IPA_RC_SBP_OSA_ANO_DEV_PRIMARY = 0x2B14,
+ IPA_RC_SBP_OSA_CURRENT_SECOND = 0x2B18,
+ IPA_RC_SBP_OSA_LIMIT_SECOND = 0x2B1C,
+ IPA_RC_SBP_OSA_NOT_AUTHD_BY_ZMAN = 0x2B20,
+ IPA_RC_SBP_OSA_CURRENT_PRIMARY = 0x2B24,
+ IPA_RC_SBP_OSA_NO_QDIO_QUEUES = 0x2BEB,
IPA_RC_DATA_MISMATCH = 0xe001,
IPA_RC_INVALID_MTU_SIZE = 0xe002,
IPA_RC_INVALID_LANTYPE = 0xe003,
@@ -187,6 +201,10 @@ enum qeth_ipa_return_codes {
#define IPA_RC_INVALID_SUBCMD IPA_RC_IP_TABLE_FULL
#define IPA_RC_HARDWARE_AUTH_ERROR IPA_RC_UNKNOWN_ERROR
+/* for SETBRIDGEPORT (double occupancies) */
+#define IPA_RC_SBP_IQD_OS_MISMATCH IPA_RC_DUP_IPV6_HOME
+#define IPA_RC_SBP_IQD_NOT_AUTHD_BY_ZMAN IPA_RC_INVALID_IP_VERSION
+
/* IPA function flags; each flag marks availability of respective function */
enum qeth_ipa_funcs {
IPA_ARP_PROCESSING = 0x00000001L,
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index c6bc63b8b295..ad110abfdd47 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -21,6 +21,7 @@
#include <linux/hash.h>
#include <linux/hashtable.h>
#include <linux/string.h>
+#include <asm/setup.h>
#include "qeth_core.h"
#include "qeth_l2.h"
@@ -505,9 +506,19 @@ static int qeth_l2_request_initial_mac(struct qeth_card *card)
int rc = 0;
char vendor_pre[] = {0x02, 0x00, 0x00};
- QETH_DBF_TEXT(SETUP, 2, "doL2init");
+ QETH_DBF_TEXT(SETUP, 2, "l2reqmac");
QETH_DBF_TEXT_(SETUP, 2, "doL2%s", CARD_BUS_ID(card));
+ if (MACHINE_IS_VM) {
+ rc = qeth_vm_request_mac(card);
+ if (!rc)
+ goto out;
+ QETH_DBF_MESSAGE(2, "z/VM MAC Service failed on device %s: x%x\n",
+ CARD_BUS_ID(card), rc);
+ QETH_DBF_TEXT_(SETUP, 2, "err%04x", rc);
+ /* fall back to alternative mechanism: */
+ }
+
if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) {
rc = qeth_query_setadapterparms(card);
if (rc) {
@@ -528,11 +539,12 @@ static int qeth_l2_request_initial_mac(struct qeth_card *card)
QETH_DBF_TEXT_(SETUP, 2, "1err%04x", rc);
return rc;
}
- QETH_DBF_HEX(SETUP, 2, card->dev->dev_addr, OSA_ADDR_LEN);
} else {
eth_random_addr(card->dev->dev_addr);
memcpy(card->dev->dev_addr, vendor_pre, 3);
}
+out:
+ QETH_DBF_HEX(SETUP, 2, card->dev->dev_addr, card->dev->addr_len);
return 0;
}
@@ -1650,27 +1662,27 @@ static int qeth_bridgeport_makerc(struct qeth_card *card,
if ((is_iqd && (cbctl->ipa_rc == IPA_RC_SUCCESS)) ||
(!is_iqd && (cbctl->ipa_rc == cbctl->cmd_rc)))
switch (cbctl->cmd_rc) {
- case 0x0000:
+ case IPA_RC_SUCCESS:
rc = 0;
break;
- case 0x2B04:
- case 0x0004:
+ case IPA_RC_L2_UNSUPPORTED_CMD:
+ case IPA_RC_UNSUPPORTED_COMMAND:
rc = -EOPNOTSUPP;
break;
- case 0x2B0C:
- case 0x000C: /* Not configured as bridge Port */
+ case IPA_RC_SBP_OSA_NOT_CONFIGURED:
+ case IPA_RC_SBP_IQD_NOT_CONFIGURED:
rc = -ENODEV; /* maybe not the best code here? */
dev_err(&card->gdev->dev,
"The device is not configured as a Bridge Port\n");
break;
- case 0x2B10:
- case 0x0010: /* OS mismatch */
+ case IPA_RC_SBP_OSA_OS_MISMATCH:
+ case IPA_RC_SBP_IQD_OS_MISMATCH:
rc = -EPERM;
dev_err(&card->gdev->dev,
"A Bridge Port is already configured by a different operating system\n");
break;
- case 0x2B14:
- case 0x0014: /* Another device is Primary */
+ case IPA_RC_SBP_OSA_ANO_DEV_PRIMARY:
+ case IPA_RC_SBP_IQD_ANO_DEV_PRIMARY:
switch (setcmd) {
case IPA_SBP_SET_PRIMARY_BRIDGE_PORT:
rc = -EEXIST;
@@ -1686,26 +1698,26 @@ static int qeth_bridgeport_makerc(struct qeth_card *card,
rc = -EIO;
}
break;
- case 0x2B18:
- case 0x0018: /* This device is currently Secondary */
+ case IPA_RC_SBP_OSA_CURRENT_SECOND:
+ case IPA_RC_SBP_IQD_CURRENT_SECOND:
rc = -EBUSY;
dev_err(&card->gdev->dev,
"The device is already a secondary Bridge Port\n");
break;
- case 0x2B1C:
- case 0x001C: /* Limit for Secondary devices reached */
+ case IPA_RC_SBP_OSA_LIMIT_SECOND:
+ case IPA_RC_SBP_IQD_LIMIT_SECOND:
rc = -EEXIST;
dev_err(&card->gdev->dev,
"The LAN cannot have more secondary Bridge Ports\n");
break;
- case 0x2B24:
- case 0x0024: /* This device is currently Primary */
+ case IPA_RC_SBP_OSA_CURRENT_PRIMARY:
+ case IPA_RC_SBP_IQD_CURRENT_PRIMARY:
rc = -EBUSY;
dev_err(&card->gdev->dev,
"The device is already a primary Bridge Port\n");
break;
- case 0x2B20:
- case 0x0020: /* Not authorized by zManager */
+ case IPA_RC_SBP_OSA_NOT_AUTHD_BY_ZMAN:
+ case IPA_RC_SBP_IQD_NOT_AUTHD_BY_ZMAN:
rc = -EACCES;
dev_err(&card->gdev->dev,
"The device is not authorized to be a Bridge Port\n");
diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c
index 2ee92aa90fe9..e937490d5d97 100644
--- a/drivers/scsi/qedi/qedi_fw.c
+++ b/drivers/scsi/qedi/qedi_fw.c
@@ -870,7 +870,6 @@ static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi,
QEDI_ERR(&qedi->dbg_ctx,
"Delayed or untracked cleanup response, itt=0x%x, tid=0x%x, cid=0x%x, task=%p\n",
protoitt, cqe->itid, qedi_conn->iscsi_conn_id, task);
- WARN_ON(1);
}
}
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index f46880315ba8..5f5a4ef2e529 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -1499,11 +1499,9 @@ err_idx:
void qedi_clear_task_idx(struct qedi_ctx *qedi, int idx)
{
- if (!test_and_clear_bit(idx, qedi->task_idx_map)) {
+ if (!test_and_clear_bit(idx, qedi->task_idx_map))
QEDI_ERR(&qedi->dbg_ctx,
"FW task context, already cleared, tid=0x%x\n", idx);
- WARN_ON(1);
- }
}
void qedi_update_itt_map(struct qedi_ctx *qedi, u32 tid, u32 proto_itt,
diff --git a/drivers/staging/iio/cdc/ad7152.c b/drivers/staging/iio/cdc/ad7152.c
index dc6ecd824365..ff10d1f0a7e4 100644
--- a/drivers/staging/iio/cdc/ad7152.c
+++ b/drivers/staging/iio/cdc/ad7152.c
@@ -231,16 +231,12 @@ static int ad7152_write_raw_samp_freq(struct device *dev, int val)
if (i >= ARRAY_SIZE(ad7152_filter_rate_table))
i = ARRAY_SIZE(ad7152_filter_rate_table) - 1;
- mutex_lock(&chip->state_lock);
ret = i2c_smbus_write_byte_data(chip->client,
AD7152_REG_CFG2, AD7152_CFG2_OSR(i));
- if (ret < 0) {
- mutex_unlock(&chip->state_lock);
+ if (ret < 0)
return ret;
- }
chip->filter_rate_setup = i;
- mutex_unlock(&chip->state_lock);
return ret;
}
diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c
index ff4119e8de42..31f35025d19e 100644
--- a/drivers/staging/octeon/ethernet-tx.c
+++ b/drivers/staging/octeon/ethernet-tx.c
@@ -251,8 +251,7 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
if ((skb_tail_pointer(skb) + add_bytes) <=
skb_end_pointer(skb))
- memset(__skb_put(skb, add_bytes), 0,
- add_bytes);
+ __skb_put_zero(skb, add_bytes);
}
}
}
diff --git a/drivers/staging/rtl8188eu/core/rtw_recv.c b/drivers/staging/rtl8188eu/core/rtw_recv.c
index 14173cf6e1e7..afb9dadc1cfe 100644
--- a/drivers/staging/rtl8188eu/core/rtw_recv.c
+++ b/drivers/staging/rtl8188eu/core/rtw_recv.c
@@ -1510,7 +1510,6 @@ static int amsdu_to_msdu(struct adapter *padapter, struct recv_frame *prframe)
u8 nr_subframes, i;
unsigned char *pdata;
struct rx_pkt_attrib *pattrib;
- unsigned char *data_ptr;
struct sk_buff *sub_skb, *subframes[MAX_SUBFRAME_COUNT];
struct recv_priv *precvpriv = &padapter->recvpriv;
struct __queue *pfree_recv_queue = &(precvpriv->free_recv_queue);
@@ -1544,8 +1543,7 @@ static int amsdu_to_msdu(struct adapter *padapter, struct recv_frame *prframe)
sub_skb = dev_alloc_skb(nSubframe_Length + 12);
if (sub_skb) {
skb_reserve(sub_skb, 12);
- data_ptr = skb_put_data(sub_skb, pdata,
- nSubframe_Length);
+ skb_put_data(sub_skb, pdata, nSubframe_Length);
} else {
sub_skb = skb_clone(prframe->pkt, GFP_ATOMIC);
if (sub_skb) {
diff --git a/drivers/staging/rtl8192e/rtllib_rx.c b/drivers/staging/rtl8192e/rtllib_rx.c
index bae98ca0a9b6..03a81ba136b2 100644
--- a/drivers/staging/rtl8192e/rtllib_rx.c
+++ b/drivers/staging/rtl8192e/rtllib_rx.c
@@ -782,7 +782,6 @@ static u8 parse_subframe(struct rtllib_device *ieee, struct sk_buff *skb,
u8 nPadding_Length = 0;
u16 SeqNum = 0;
struct sk_buff *sub_skb;
- u8 *data_ptr;
/* just for debug purpose */
SeqNum = WLAN_GET_SEQ_SEQ(le16_to_cpu(hdr->seq_ctl));
if ((RTLLIB_QOS_HAS_SEQ(fc)) &&
@@ -817,7 +816,7 @@ static u8 parse_subframe(struct rtllib_device *ieee, struct sk_buff *skb,
if (!sub_skb)
return 0;
skb_reserve(sub_skb, 12);
- data_ptr = skb_put_data(sub_skb, skb->data, skb->len);
+ skb_put_data(sub_skb, skb->data, skb->len);
sub_skb->dev = ieee->dev;
rxb->subframes[0] = sub_skb;
@@ -869,7 +868,7 @@ static u8 parse_subframe(struct rtllib_device *ieee, struct sk_buff *skb,
if (!sub_skb)
return 0;
skb_reserve(sub_skb, 12);
- data_ptr = skb_put_data(sub_skb, skb->data, nSubframe_Length);
+ skb_put_data(sub_skb, skb->data, nSubframe_Length);
sub_skb->dev = ieee->dev;
rxb->subframes[rxb->nr_subframes++] = sub_skb;
diff --git a/drivers/staging/rtl8192e/rtllib_softmac.c b/drivers/staging/rtl8192e/rtllib_softmac.c
index 5f2751d4d464..09d2c8649171 100644
--- a/drivers/staging/rtl8192e/rtllib_softmac.c
+++ b/drivers/staging/rtl8192e/rtllib_softmac.c
@@ -1264,7 +1264,7 @@ rtllib_association_req(struct rtllib_network *beacon,
hdr->info_element[0].id = MFIE_TYPE_SSID;
hdr->info_element[0].len = beacon->ssid_len;
- tag = skb_put_data(skb, beacon->ssid, beacon->ssid_len);
+ skb_put_data(skb, beacon->ssid, beacon->ssid_len);
tag = skb_put(skb, rate_len);
@@ -1340,7 +1340,7 @@ rtllib_association_req(struct rtllib_network *beacon,
}
if (wpa_ie_len) {
- tag = skb_put_data(skb, ieee->wpa_ie, ieee->wpa_ie_len);
+ skb_put_data(skb, ieee->wpa_ie, ieee->wpa_ie_len);
if (PMKCacheIdx >= 0) {
tag = skb_put(skb, 18);
@@ -1356,12 +1356,13 @@ rtllib_association_req(struct rtllib_network *beacon,
}
if (wps_ie_len && ieee->wps_ie) {
- tag = skb_put_data(skb, ieee->wps_ie, wps_ie_len);
+ skb_put_data(skb, ieee->wps_ie, wps_ie_len);
}
- tag = skb_put(skb, turbo_info_len);
- if (turbo_info_len)
+ if (turbo_info_len) {
+ tag = skb_put(skb, turbo_info_len);
rtllib_TURBO_Info(ieee, &tag);
+ }
if (ieee->pHTInfo->bCurrentHTSupport && ieee->pHTInfo->bEnableHT) {
if (ieee->pHTInfo->ePeerHTSpecVer == HT_SPEC_VER_EWC) {
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
index c0e2f711cb4e..a4aedb489e92 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
@@ -780,7 +780,6 @@ static u8 parse_subframe(struct sk_buff *skb,
u16 SeqNum=0;
struct sk_buff *sub_skb;
- u8 *data_ptr;
/* just for debug purpose */
SeqNum = WLAN_GET_SEQ_SEQ(le16_to_cpu(hdr->seq_ctl));
@@ -848,8 +847,7 @@ static u8 parse_subframe(struct sk_buff *skb,
if (!sub_skb)
return 0;
skb_reserve(sub_skb, 12);
- data_ptr = skb_put_data(sub_skb, skb->data,
- nSubframe_Length);
+ skb_put_data(sub_skb, skb->data, nSubframe_Length);
#endif
rxb->subframes[rxb->nr_subframes++] = sub_skb;
if (rxb->nr_subframes >= MAX_SUBFRAME_COUNT) {
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
index 107069180ed2..fe6f38b7ec35 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
@@ -1112,7 +1112,7 @@ ieee80211_association_req(struct ieee80211_network *beacon,
hdr->info_element[0].id = MFIE_TYPE_SSID;
hdr->info_element[0].len = beacon->ssid_len;
- tag = skb_put_data(skb, beacon->ssid, beacon->ssid_len);
+ skb_put_data(skb, beacon->ssid, beacon->ssid_len);
tag = skb_put(skb, rate_len);
@@ -1184,18 +1184,17 @@ ieee80211_association_req(struct ieee80211_network *beacon,
//choose what wpa_supplicant gives to associate.
- tag = skb_put(skb, wpa_ie_len);
if (wpa_ie_len) {
- memcpy(tag, ieee->wpa_ie, ieee->wpa_ie_len);
+ skb_put_data(skb, ieee->wpa_ie, wpa_ie_len);
}
- tag = skb_put(skb, wmm_info_len);
if (wmm_info_len) {
- ieee80211_WMM_Info(ieee, &tag);
+ tag = skb_put(skb, wmm_info_len);
+ ieee80211_WMM_Info(ieee, &tag);
}
#ifdef THOMAS_TURBO
- tag = skb_put(skb, turbo_info_len);
if (turbo_info_len) {
+ tag = skb_put(skb, turbo_info_len);
ieee80211_TURBO_Info(ieee, &tag);
}
#endif
diff --git a/drivers/staging/rtl8192u/r819xU_cmdpkt.c b/drivers/staging/rtl8192u/r819xU_cmdpkt.c
index c3cf01c842a3..87ab3ba760fc 100644
--- a/drivers/staging/rtl8192u/r819xU_cmdpkt.c
+++ b/drivers/staging/rtl8192u/r819xU_cmdpkt.c
@@ -31,7 +31,6 @@ rt_status SendTxCommandPacket(struct net_device *dev, void *pData, u32 DataLen)
struct r8192_priv *priv = ieee80211_priv(dev);
struct sk_buff *skb;
struct cb_desc *tcb_desc;
- unsigned char *ptr_buf;
/* Get TCB and local buffer from common pool.
* (It is shared by CmdQ, MgntQ, and USB coalesce DataQ)
@@ -45,7 +44,7 @@ rt_status SendTxCommandPacket(struct net_device *dev, void *pData, u32 DataLen)
tcb_desc->bCmdOrInit = DESC_PACKET_TYPE_NORMAL;
tcb_desc->bLastIniPkt = 0;
skb_reserve(skb, USB_HWDESC_HEADER_LEN);
- ptr_buf = skb_put_data(skb, pData, DataLen);
+ skb_put_data(skb, pData, DataLen);
tcb_desc->txbuf_size = (u16)DataLen;
if (!priv->ieee80211->check_nic_enough_desc(dev, tcb_desc->queue_index) ||
diff --git a/drivers/staging/rtl8712/rtl8712_recv.c b/drivers/staging/rtl8712/rtl8712_recv.c
index f96c558b3c6a..ea3eb94b28b3 100644
--- a/drivers/staging/rtl8712/rtl8712_recv.c
+++ b/drivers/staging/rtl8712/rtl8712_recv.c
@@ -340,7 +340,7 @@ static int amsdu_to_msdu(struct _adapter *padapter, union recv_frame *prframe)
int a_len, padding_len;
u16 eth_type, nSubframe_Length;
u8 nr_subframes, i;
- unsigned char *data_ptr, *pdata;
+ unsigned char *pdata;
struct rx_pkt_attrib *pattrib;
_pkt *sub_skb, *subframes[MAX_SUBFRAME_COUNT];
struct recv_priv *precvpriv = &padapter->recvpriv;
@@ -372,7 +372,7 @@ static int amsdu_to_msdu(struct _adapter *padapter, union recv_frame *prframe)
if (!sub_skb)
break;
skb_reserve(sub_skb, 12);
- data_ptr = skb_put_data(sub_skb, pdata, nSubframe_Length);
+ skb_put_data(sub_skb, pdata, nSubframe_Length);
subframes[nr_subframes++] = sub_skb;
if (nr_subframes >= MAX_SUBFRAME_COUNT) {
netdev_warn(padapter->pnetdev, "r8712u: ParseSubframe(): Too many Subframes! Packets dropped!\n");
diff --git a/drivers/staging/rtl8723bs/os_dep/osdep_service.c b/drivers/staging/rtl8723bs/os_dep/osdep_service.c
index 02db59e8b593..aa16d1ab955b 100644
--- a/drivers/staging/rtl8723bs/os_dep/osdep_service.c
+++ b/drivers/staging/rtl8723bs/os_dep/osdep_service.c
@@ -160,7 +160,7 @@ static int isFileReadable(char *path)
oldfs = get_fs(); set_fs(get_ds());
if (1!=readFile(fp, &buf, 1))
- ret = PTR_ERR(fp);
+ ret = -EINVAL;
set_fs(oldfs);
filp_close(fp, NULL);
diff --git a/drivers/staging/rtl8723bs/os_dep/recv_linux.c b/drivers/staging/rtl8723bs/os_dep/recv_linux.c
index 1a6443dc3ff0..f42e00081e0e 100644
--- a/drivers/staging/rtl8723bs/os_dep/recv_linux.c
+++ b/drivers/staging/rtl8723bs/os_dep/recv_linux.c
@@ -72,7 +72,6 @@ int rtw_os_recvbuf_resource_free(struct adapter *padapter, struct recv_buf *prec
_pkt *rtw_os_alloc_msdu_pkt(union recv_frame *prframe, u16 nSubframe_Length, u8 *pdata)
{
u16 eth_type;
- u8 *data_ptr;
_pkt *sub_skb;
struct rx_pkt_attrib *pattrib;
@@ -82,8 +81,7 @@ _pkt *rtw_os_alloc_msdu_pkt(union recv_frame *prframe, u16 nSubframe_Length, u8
if (sub_skb)
{
skb_reserve(sub_skb, 12);
- data_ptr = skb_put_data(sub_skb, (pdata + ETH_HLEN),
- nSubframe_Length);
+ skb_put_data(sub_skb, (pdata + ETH_HLEN), nSubframe_Length);
}
else
{
diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
index 1de67f209f2c..83ea8ab4f2f4 100644
--- a/drivers/staging/wlan-ng/hfa384x_usb.c
+++ b/drivers/staging/wlan-ng/hfa384x_usb.c
@@ -3530,7 +3530,7 @@ static void hfa384x_int_rxmonitor(struct wlandevice *wlandev,
/* Copy the 802.11 header to the skb
* (ctl frames may be less than a full header)
*/
- datap = skb_put_data(skb, &rxdesc->frame_control, hdrlen);
+ skb_put_data(skb, &rxdesc->frame_control, hdrlen);
/* If any, copy the data from the card to the skb */
if (datalen > 0) {
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_cm.c b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
index 15cd1e33b16b..e583dd8a418b 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_cm.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
@@ -1085,8 +1085,7 @@ cxgbit_pass_accept_rpl(struct cxgbit_sock *csk, struct cpl_pass_accept_req *req)
return;
}
- rpl5 = __skb_put(skb, len);
- memset(rpl5, 0, len);
+ rpl5 = __skb_put_zero(skb, len);
INIT_TP_WR(rpl5, csk->tid);
OPCODE_TID(rpl5) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
@@ -1367,8 +1366,7 @@ u32 cxgbit_send_tx_flowc_wr(struct cxgbit_sock *csk)
flowclen16 = cxgbit_tx_flowc_wr_credits(csk, &nparams, &flowclen);
skb = __skb_dequeue(&csk->skbq);
- flowc = __skb_put(skb, flowclen);
- memset(flowc, 0, flowclen);
+ flowc = __skb_put_zero(skb, flowclen);
flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) |
FW_FLOWC_WR_NPARAMS_V(nparams));
@@ -1439,8 +1437,7 @@ int cxgbit_setup_conn_digest(struct cxgbit_sock *csk)
return -ENOMEM;
/* set up ulp submode */
- req = __skb_put(skb, len);
- memset(req, 0, len);
+ req = __skb_put_zero(skb, len);
INIT_TP_WR(req, csk->tid);
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
@@ -1476,8 +1473,7 @@ int cxgbit_setup_conn_pgidx(struct cxgbit_sock *csk, u32 pg_idx)
if (!skb)
return -ENOMEM;
- req = __skb_put(skb, len);
- memset(req, 0, len);
+ req = __skb_put_zero(skb, len);
INIT_TP_WR(req, csk->tid);
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 0d8f81591bed..3fdca2cdd8da 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -1279,6 +1279,18 @@ iscsit_get_immediate_data(struct iscsi_cmd *cmd, struct iscsi_scsi_req *hdr,
*/
if (dump_payload)
goto after_immediate_data;
+ /*
+ * Check for underflow case where both EDTL and immediate data payload
+ * exceeds what is presented by CDB's TRANSFER LENGTH, and what has
+ * already been set in target_cmd_size_check() as se_cmd->data_length.
+ *
+ * For this special case, fail the command and dump the immediate data
+ * payload.
+ */
+ if (cmd->first_burst_len > cmd->se_cmd.data_length) {
+ cmd->sense_reason = TCM_INVALID_CDB_FIELD;
+ goto after_immediate_data;
+ }
immed_ret = iscsit_handle_immediate_data(cmd, hdr,
cmd->first_burst_len);
@@ -4423,8 +4435,11 @@ static void iscsit_logout_post_handler_closesession(
* always sleep waiting for RX/TX thread shutdown to complete
* within iscsit_close_connection().
*/
- if (!conn->conn_transport->rdma_shutdown)
+ if (!conn->conn_transport->rdma_shutdown) {
sleep = cmpxchg(&conn->tx_thread_active, true, false);
+ if (!sleep)
+ return;
+ }
atomic_set(&conn->conn_logout_remove, 0);
complete(&conn->conn_logout_comp);
@@ -4440,8 +4455,11 @@ static void iscsit_logout_post_handler_samecid(
{
int sleep = 1;
- if (!conn->conn_transport->rdma_shutdown)
+ if (!conn->conn_transport->rdma_shutdown) {
sleep = cmpxchg(&conn->tx_thread_active, true, false);
+ if (!sleep)
+ return;
+ }
atomic_set(&conn->conn_logout_remove, 0);
complete(&conn->conn_logout_comp);
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index 9ab7090f7c83..0912de7c0cf8 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -136,7 +136,7 @@ int init_se_kmem_caches(void);
void release_se_kmem_caches(void);
u32 scsi_get_new_index(scsi_index_t);
void transport_subsystem_check_init(void);
-void transport_cmd_finish_abort(struct se_cmd *, int);
+int transport_cmd_finish_abort(struct se_cmd *, int);
unsigned char *transport_dump_cmd_direction(struct se_cmd *);
void transport_dump_dev_state(struct se_device *, char *, int *);
void transport_dump_dev_info(struct se_device *, struct se_lun *,
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index dce1e1b47316..13f47bf4d16b 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -75,7 +75,7 @@ void core_tmr_release_req(struct se_tmr_req *tmr)
kfree(tmr);
}
-static void core_tmr_handle_tas_abort(struct se_cmd *cmd, int tas)
+static int core_tmr_handle_tas_abort(struct se_cmd *cmd, int tas)
{
unsigned long flags;
bool remove = true, send_tas;
@@ -91,7 +91,7 @@ static void core_tmr_handle_tas_abort(struct se_cmd *cmd, int tas)
transport_send_task_abort(cmd);
}
- transport_cmd_finish_abort(cmd, remove);
+ return transport_cmd_finish_abort(cmd, remove);
}
static int target_check_cdb_and_preempt(struct list_head *list,
@@ -184,8 +184,8 @@ void core_tmr_abort_task(
cancel_work_sync(&se_cmd->work);
transport_wait_for_tasks(se_cmd);
- transport_cmd_finish_abort(se_cmd, true);
- target_put_sess_cmd(se_cmd);
+ if (!transport_cmd_finish_abort(se_cmd, true))
+ target_put_sess_cmd(se_cmd);
printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for"
" ref_tag: %llu\n", ref_tag);
@@ -281,8 +281,8 @@ static void core_tmr_drain_tmr_list(
cancel_work_sync(&cmd->work);
transport_wait_for_tasks(cmd);
- transport_cmd_finish_abort(cmd, 1);
- target_put_sess_cmd(cmd);
+ if (!transport_cmd_finish_abort(cmd, 1))
+ target_put_sess_cmd(cmd);
}
}
@@ -380,8 +380,8 @@ static void core_tmr_drain_state_list(
cancel_work_sync(&cmd->work);
transport_wait_for_tasks(cmd);
- core_tmr_handle_tas_abort(cmd, tas);
- target_put_sess_cmd(cmd);
+ if (!core_tmr_handle_tas_abort(cmd, tas))
+ target_put_sess_cmd(cmd);
}
}
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 6025935036c9..f1b3a46bdcaf 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -651,9 +651,10 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd)
percpu_ref_put(&lun->lun_ref);
}
-void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
+int transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
{
bool ack_kref = (cmd->se_cmd_flags & SCF_ACK_KREF);
+ int ret = 0;
if (cmd->se_cmd_flags & SCF_SE_LUN_CMD)
transport_lun_remove_cmd(cmd);
@@ -665,9 +666,11 @@ void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
cmd->se_tfo->aborted_task(cmd);
if (transport_cmd_check_stop_to_fabric(cmd))
- return;
+ return 1;
if (remove && ack_kref)
- transport_put_cmd(cmd);
+ ret = transport_put_cmd(cmd);
+
+ return ret;
}
static void target_complete_failure_work(struct work_struct *work)
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 49d685ad0da9..45b554032332 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -315,6 +315,9 @@ void usb_remove_function(struct usb_configuration *c, struct usb_function *f)
list_del(&f->list);
if (f->unbind)
f->unbind(c, f);
+
+ if (f->bind_deactivated)
+ usb_function_activate(f);
}
EXPORT_SYMBOL_GPL(usb_remove_function);
@@ -956,12 +959,8 @@ static void remove_config(struct usb_composite_dev *cdev,
f = list_first_entry(&config->functions,
struct usb_function, list);
- list_del(&f->list);
- if (f->unbind) {
- DBG(cdev, "unbind function '%s'/%p\n", f->name, f);
- f->unbind(config, f);
- /* may free memory for "f" */
- }
+
+ usb_remove_function(config, f);
}
list_del(&config->list);
if (config->unbind) {
diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
index a9c28c72c1c7..24e34cfcb4bd 100644
--- a/drivers/usb/gadget/function/f_ncm.c
+++ b/drivers/usb/gadget/function/f_ncm.c
@@ -1004,16 +1004,15 @@ static struct sk_buff *package_for_tx(struct f_ncm *ncm)
}
/* Insert NDP alignment. */
- ntb_iter = skb_put_zero(skb2, ndp_pad);
+ skb_put_zero(skb2, ndp_pad);
/* Copy NTB across. */
- ntb_iter = skb_put_data(skb2, ncm->skb_tx_ndp->data,
- ncm->skb_tx_ndp->len);
+ skb_put_data(skb2, ncm->skb_tx_ndp->data, ncm->skb_tx_ndp->len);
dev_consume_skb_any(ncm->skb_tx_ndp);
ncm->skb_tx_ndp = NULL;
/* Insert zero'd datagram. */
- ntb_iter = skb_put_zero(skb2, dgram_idx_len);
+ skb_put_zero(skb2, dgram_idx_len);
return skb2;
}
@@ -1127,8 +1126,8 @@ static struct sk_buff *ncm_wrap_ntb(struct gether *port,
ncm->ndp_dgram_count++;
/* Add the new data to the skb */
- ntb_data = skb_put_zero(ncm->skb_tx_data, dgram_pad);
- ntb_data = skb_put_data(ncm->skb_tx_data, skb->data, skb->len);
+ skb_put_zero(ncm->skb_tx_data, dgram_pad);
+ skb_put_data(ncm->skb_tx_data, skb->data, skb->len);
dev_consume_skb_any(skb);
skb = NULL;
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index b9ca0a26cbd9..684900fcfe24 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -1183,8 +1183,10 @@ dev_release (struct inode *inode, struct file *fd)
/* closing ep0 === shutdown all */
- if (dev->gadget_registered)
+ if (dev->gadget_registered) {
usb_gadget_unregister_driver (&gadgetfs_driver);
+ dev->gadget_registered = false;
+ }
/* at this point "good" hardware has disconnected the
* device from USB; the host won't see it any more.
@@ -1677,9 +1679,10 @@ static void
gadgetfs_suspend (struct usb_gadget *gadget)
{
struct dev_data *dev = get_gadget_data (gadget);
+ unsigned long flags;
INFO (dev, "suspended from state %d\n", dev->state);
- spin_lock (&dev->lock);
+ spin_lock_irqsave(&dev->lock, flags);
switch (dev->state) {
case STATE_DEV_SETUP: // VERY odd... host died??
case STATE_DEV_CONNECTED:
@@ -1690,7 +1693,7 @@ gadgetfs_suspend (struct usb_gadget *gadget)
default:
break;
}
- spin_unlock (&dev->lock);
+ spin_unlock_irqrestore(&dev->lock, flags);
}
static struct usb_gadget_driver gadgetfs_driver = {
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
index ccabb51cb98d..7635fd7cc328 100644
--- a/drivers/usb/gadget/udc/dummy_hcd.c
+++ b/drivers/usb/gadget/udc/dummy_hcd.c
@@ -442,23 +442,16 @@ static void set_link_state(struct dummy_hcd *dum_hcd)
/* Report reset and disconnect events to the driver */
if (dum->driver && (disconnect || reset)) {
stop_activity(dum);
- spin_unlock(&dum->lock);
if (reset)
usb_gadget_udc_reset(&dum->gadget, dum->driver);
else
dum->driver->disconnect(&dum->gadget);
- spin_lock(&dum->lock);
}
} else if (dum_hcd->active != dum_hcd->old_active) {
- if (dum_hcd->old_active && dum->driver->suspend) {
- spin_unlock(&dum->lock);
+ if (dum_hcd->old_active && dum->driver->suspend)
dum->driver->suspend(&dum->gadget);
- spin_lock(&dum->lock);
- } else if (!dum_hcd->old_active && dum->driver->resume) {
- spin_unlock(&dum->lock);
+ else if (!dum_hcd->old_active && dum->driver->resume)
dum->driver->resume(&dum->gadget);
- spin_lock(&dum->lock);
- }
}
dum_hcd->old_status = dum_hcd->port_status;
@@ -983,7 +976,9 @@ static int dummy_udc_stop(struct usb_gadget *g)
struct dummy_hcd *dum_hcd = gadget_to_dummy_hcd(g);
struct dummy *dum = dum_hcd->dum;
+ spin_lock_irq(&dum->lock);
dum->driver = NULL;
+ spin_unlock_irq(&dum->lock);
return 0;
}
diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c
index 6cf07857eaca..f2cbd7f8005e 100644
--- a/drivers/usb/gadget/udc/net2280.c
+++ b/drivers/usb/gadget/udc/net2280.c
@@ -2470,11 +2470,8 @@ static void stop_activity(struct net2280 *dev, struct usb_gadget_driver *driver)
nuke(&dev->ep[i]);
/* report disconnect; the driver is already quiesced */
- if (driver) {
- spin_unlock(&dev->lock);
+ if (driver)
driver->disconnect(&dev->gadget);
- spin_lock(&dev->lock);
- }
usb_reinit(dev);
}
@@ -3348,8 +3345,6 @@ next_endpoints:
BIT(PCI_RETRY_ABORT_INTERRUPT))
static void handle_stat1_irqs(struct net2280 *dev, u32 stat)
-__releases(dev->lock)
-__acquires(dev->lock)
{
struct net2280_ep *ep;
u32 tmp, num, mask, scratch;
@@ -3390,14 +3385,12 @@ __acquires(dev->lock)
if (disconnect || reset) {
stop_activity(dev, dev->driver);
ep0_start(dev);
- spin_unlock(&dev->lock);
if (reset)
usb_gadget_udc_reset
(&dev->gadget, dev->driver);
else
(dev->driver->disconnect)
(&dev->gadget);
- spin_lock(&dev->lock);
return;
}
}
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 1f1687e888d6..fddf2731f798 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -2119,11 +2119,12 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
{
u32 temp, port_offset, port_count;
int i;
- u8 major_revision;
+ u8 major_revision, minor_revision;
struct xhci_hub *rhub;
temp = readl(addr);
major_revision = XHCI_EXT_PORT_MAJOR(temp);
+ minor_revision = XHCI_EXT_PORT_MINOR(temp);
if (major_revision == 0x03) {
rhub = &xhci->usb3_rhub;
@@ -2137,7 +2138,9 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
return;
}
rhub->maj_rev = XHCI_EXT_PORT_MAJOR(temp);
- rhub->min_rev = XHCI_EXT_PORT_MINOR(temp);
+
+ if (rhub->min_rev < minor_revision)
+ rhub->min_rev = minor_revision;
/* Port offset and count in the third dword, see section 7.2 */
temp = readl(addr + 2);
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index fcf1f3f63e7a..1bcf971141c0 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -201,6 +201,9 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
pdev->device == 0x1042)
xhci->quirks |= XHCI_BROKEN_STREAMS;
+ if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
+ pdev->device == 0x1142)
+ xhci->quirks |= XHCI_TRUST_TX_LENGTH;
if (pdev->vendor == PCI_VENDOR_ID_TI && pdev->device == 0x8241)
xhci->quirks |= XHCI_LIMIT_ENDPOINT_INTERVAL_7;
diff --git a/drivers/video/fbdev/core/fbmon.c b/drivers/video/fbdev/core/fbmon.c
index 687ebb053438..41d7979d81c5 100644
--- a/drivers/video/fbdev/core/fbmon.c
+++ b/drivers/video/fbdev/core/fbmon.c
@@ -1048,7 +1048,7 @@ void fb_edid_add_monspecs(unsigned char *edid, struct fb_monspecs *specs)
for (i = 0; i < (128 - edid[2]) / DETAILED_TIMING_DESCRIPTION_SIZE;
i++, block += DETAILED_TIMING_DESCRIPTION_SIZE)
- if (PIXEL_CLOCK)
+ if (PIXEL_CLOCK != 0)
edt[num++] = block - edid;
/* Yikes, EDID data is totally useless */
diff --git a/drivers/video/fbdev/smscufx.c b/drivers/video/fbdev/smscufx.c
index ec2e7e353685..449fceaf79d5 100644
--- a/drivers/video/fbdev/smscufx.c
+++ b/drivers/video/fbdev/smscufx.c
@@ -1646,8 +1646,9 @@ static int ufx_usb_probe(struct usb_interface *interface,
dev_dbg(dev->gdev, "%s %s - serial #%s\n",
usbdev->manufacturer, usbdev->product, usbdev->serial);
dev_dbg(dev->gdev, "vid_%04x&pid_%04x&rev_%04x driver's ufx_data struct at %p\n",
- usbdev->descriptor.idVendor, usbdev->descriptor.idProduct,
- usbdev->descriptor.bcdDevice, dev);
+ le16_to_cpu(usbdev->descriptor.idVendor),
+ le16_to_cpu(usbdev->descriptor.idProduct),
+ le16_to_cpu(usbdev->descriptor.bcdDevice), dev);
dev_dbg(dev->gdev, "console enable=%d\n", console);
dev_dbg(dev->gdev, "fb_defio enable=%d\n", fb_defio);
diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
index 6a3c353de7c3..05ef657235df 100644
--- a/drivers/video/fbdev/udlfb.c
+++ b/drivers/video/fbdev/udlfb.c
@@ -1105,8 +1105,8 @@ static int dlfb_ops_blank(int blank_mode, struct fb_info *info)
char *bufptr;
struct urb *urb;
- pr_info("/dev/fb%d FB_BLANK mode %d --> %d\n",
- info->node, dev->blank_mode, blank_mode);
+ pr_debug("/dev/fb%d FB_BLANK mode %d --> %d\n",
+ info->node, dev->blank_mode, blank_mode);
if ((dev->blank_mode == FB_BLANK_POWERDOWN) &&
(blank_mode != FB_BLANK_POWERDOWN)) {
@@ -1613,8 +1613,9 @@ static int dlfb_usb_probe(struct usb_interface *interface,
pr_info("%s %s - serial #%s\n",
usbdev->manufacturer, usbdev->product, usbdev->serial);
pr_info("vid_%04x&pid_%04x&rev_%04x driver's dlfb_data struct at %p\n",
- usbdev->descriptor.idVendor, usbdev->descriptor.idProduct,
- usbdev->descriptor.bcdDevice, dev);
+ le16_to_cpu(usbdev->descriptor.idVendor),
+ le16_to_cpu(usbdev->descriptor.idProduct),
+ le16_to_cpu(usbdev->descriptor.bcdDevice), dev);
pr_info("console enable=%d\n", console);
pr_info("fb_defio enable=%d\n", fb_defio);
pr_info("shadow enable=%d\n", shadow);
diff --git a/drivers/video/fbdev/via/viafbdev.c b/drivers/video/fbdev/via/viafbdev.c
index f9718f012aae..badee04ef496 100644
--- a/drivers/video/fbdev/via/viafbdev.c
+++ b/drivers/video/fbdev/via/viafbdev.c
@@ -1630,16 +1630,14 @@ static void viafb_init_proc(struct viafb_shared *shared)
}
static void viafb_remove_proc(struct viafb_shared *shared)
{
- struct proc_dir_entry *viafb_entry = shared->proc_entry,
- *iga1_entry = shared->iga1_proc_entry,
- *iga2_entry = shared->iga2_proc_entry;
+ struct proc_dir_entry *viafb_entry = shared->proc_entry;
if (!viafb_entry)
return;
- remove_proc_entry("output_devices", iga2_entry);
+ remove_proc_entry("output_devices", shared->iga2_proc_entry);
remove_proc_entry("iga2", viafb_entry);
- remove_proc_entry("output_devices", iga1_entry);
+ remove_proc_entry("output_devices", shared->iga1_proc_entry);
remove_proc_entry("iga1", viafb_entry);
remove_proc_entry("supported_output_devices", viafb_entry);
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 408c174ef0d5..22caf808bfab 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -663,6 +663,12 @@ static int virtballoon_restore(struct virtio_device *vdev)
}
#endif
+static int virtballoon_validate(struct virtio_device *vdev)
+{
+ __virtio_clear_bit(vdev, VIRTIO_F_IOMMU_PLATFORM);
+ return 0;
+}
+
static unsigned int features[] = {
VIRTIO_BALLOON_F_MUST_TELL_HOST,
VIRTIO_BALLOON_F_STATS_VQ,
@@ -675,6 +681,7 @@ static struct virtio_driver virtio_balloon_driver = {
.driver.name = KBUILD_MODNAME,
.driver.owner = THIS_MODULE,
.id_table = id_table,
+ .validate = virtballoon_validate,
.probe = virtballoon_probe,
.remove = virtballoon_remove,
.config_changed = virtballoon_changed,