summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/acpi/acpi_bus.h1
-rw-r--r--include/acpi/actbl1.h2
-rw-r--r--include/asm-generic/dma-mapping.h2
-rw-r--r--include/asm-generic/error-injection.h7
-rw-r--r--include/asm-generic/gpio.h12
-rw-r--r--include/asm-generic/hugetlb.h16
-rw-r--r--include/asm-generic/memory_model.h12
-rw-r--r--include/asm-generic/page.h2
-rw-r--r--include/crypto/aead.h20
-rw-r--r--include/crypto/algapi.h6
-rw-r--r--include/crypto/if_alg.h4
-rw-r--r--include/crypto/internal/acompress.h2
-rw-r--r--include/crypto/internal/aead.h2
-rw-r--r--include/crypto/internal/akcipher.h2
-rw-r--r--include/crypto/internal/hash.h2
-rw-r--r--include/crypto/internal/kpp.h2
-rw-r--r--include/crypto/internal/skcipher.h2
-rw-r--r--include/crypto/scatterwalk.h4
-rw-r--r--include/crypto/xts.h25
-rw-r--r--include/drm/display/drm_dp.h15
-rw-r--r--include/drm/display/drm_dp_mst_helper.h6
-rw-r--r--include/drm/drm_accel.h3
-rw-r--r--include/drm/drm_atomic.h32
-rw-r--r--include/drm/drm_atomic_state_helper.h4
-rw-r--r--include/drm/drm_audio_component.h3
-rw-r--r--include/drm/drm_bridge.h36
-rw-r--r--include/drm/drm_bridge_connector.h2
-rw-r--r--include/drm/drm_client.h8
-rw-r--r--include/drm/drm_connector.h100
-rw-r--r--include/drm/drm_crtc_helper.h16
-rw-r--r--include/drm/drm_debugfs.h59
-rw-r--r--include/drm/drm_device.h32
-rw-r--r--include/drm/drm_drv.h7
-rw-r--r--include/drm/drm_edid.h2
-rw-r--r--include/drm/drm_fb_helper.h21
-rw-r--r--include/drm/drm_fixed.h1
-rw-r--r--include/drm/drm_format_helper.h16
-rw-r--r--include/drm/drm_gem.h1
-rw-r--r--include/drm/drm_gem_atomic_helper.h2
-rw-r--r--include/drm/drm_gem_ttm_helper.h3
-rw-r--r--include/drm/drm_gem_vram_helper.h4
-rw-r--r--include/drm/drm_kunit_helpers.h91
-rw-r--r--include/drm/drm_mipi_dbi.h43
-rw-r--r--include/drm/drm_mipi_dsi.h48
-rw-r--r--include/drm/drm_mode_config.h12
-rw-r--r--include/drm/drm_modes.h17
-rw-r--r--include/drm/drm_modeset_helper_vtables.h28
-rw-r--r--include/drm/drm_panel.h10
-rw-r--r--include/drm/drm_pciids.h112
-rw-r--r--include/drm/drm_plane.h4
-rw-r--r--include/drm/drm_print.h5
-rw-r--r--include/drm/drm_probe_helper.h1
-rw-r--r--include/drm/drm_simple_kms_helper.h4
-rw-r--r--include/drm/gpu_scheduler.h8
-rw-r--r--include/drm/ttm/ttm_bo.h (renamed from include/drm/ttm/ttm_bo_api.h)370
-rw-r--r--include/drm/ttm/ttm_bo_driver.h303
-rw-r--r--include/drm/ttm/ttm_device.h7
-rw-r--r--include/drm/ttm/ttm_execbuf_util.h4
-rw-r--r--include/dt-bindings/arm/qcom,ids.h83
-rw-r--r--include/dt-bindings/clock/imx6qdl-clock.h4
-rw-r--r--include/dt-bindings/clock/imx6sll-clock.h2
-rw-r--r--include/dt-bindings/clock/imx6ul-clock.h7
-rw-r--r--include/dt-bindings/clock/imx8ulp-clock.h4
-rw-r--r--include/dt-bindings/clock/loongson,ls2k-clk.h29
-rw-r--r--include/dt-bindings/clock/mediatek,mt7981-clk.h215
-rw-r--r--include/dt-bindings/clock/qcom,gcc-apq8084.h1
-rw-r--r--include/dt-bindings/clock/qcom,gcc-qcs404.h4
-rw-r--r--include/dt-bindings/clock/qcom,gcc-sm8350.h1
-rw-r--r--include/dt-bindings/clock/qcom,gcc-sm8450.h1
-rw-r--r--include/dt-bindings/clock/qcom,rpmcc.h2
-rw-r--r--include/dt-bindings/clock/qcom,sa8775p-gcc.h320
-rw-r--r--include/dt-bindings/clock/stih416-clks.h17
-rw-r--r--include/dt-bindings/clock/sun20i-d1-ccu.h2
-rw-r--r--include/dt-bindings/firmware/qcom,scm.h16
-rw-r--r--include/dt-bindings/gpio/gpio.h2
-rw-r--r--include/dt-bindings/interconnect/qcom,sa8775p-rpmh.h231
-rw-r--r--include/dt-bindings/interconnect/qcom,sc7180.h3
-rw-r--r--include/dt-bindings/interconnect/qcom,sc8180x.h3
-rw-r--r--include/dt-bindings/interconnect/qcom,sc8280xp.h4
-rw-r--r--include/dt-bindings/interconnect/qcom,sdm670-rpmh.h136
-rw-r--r--include/dt-bindings/interconnect/qcom,sdx55.h2
-rw-r--r--include/dt-bindings/interconnect/qcom,sm8150.h3
-rw-r--r--include/dt-bindings/interconnect/qcom,sm8250.h3
-rw-r--r--include/dt-bindings/pinctrl/starfive,jh7110-pinctrl.h137
-rw-r--r--include/dt-bindings/power/allwinner,sun20i-d1-ppu.h10
-rw-r--r--include/dt-bindings/power/mediatek,mt8188-power.h44
-rw-r--r--include/dt-bindings/power/qcom-rpmpd.h30
-rw-r--r--include/dt-bindings/power/r8a779g0-sysc.h1
-rw-r--r--include/dt-bindings/power/starfive,jh7110-pmu.h17
-rw-r--r--include/dt-bindings/reset/mt8195-resets.h45
-rw-r--r--include/dt-bindings/reset/sun20i-d1-ccu.h2
-rw-r--r--include/kunit/static_stub.h113
-rw-r--r--include/kunit/test-bug.h29
-rw-r--r--include/kvm/arm_arch_timer.h6
-rw-r--r--include/kvm/arm_vgic.h4
-rw-r--r--include/linux/acpi.h19
-rw-r--r--include/linux/avf/virtchnl.h159
-rw-r--r--include/linux/bitfield.h26
-rw-r--r--include/linux/blkdev.h12
-rw-r--r--include/linux/bootconfig.h2
-rw-r--r--include/linux/bpf.h156
-rw-r--r--include/linux/bpf_verifier.h83
-rw-r--r--include/linux/btf.h23
-rw-r--r--include/linux/buffer_head.h5
-rw-r--r--include/linux/cacheinfo.h13
-rw-r--r--include/linux/can/bittiming.h12
-rw-r--r--include/linux/capability.h125
-rw-r--r--include/linux/console.h105
-rw-r--r--include/linux/console_struct.h3
-rw-r--r--include/linux/container_of.h2
-rw-r--r--include/linux/coresight-pmu.h34
-rw-r--r--include/linux/coresight.h4
-rw-r--r--include/linux/cpuhotplug.h6
-rw-r--r--include/linux/cpumask.h20
-rw-r--r--include/linux/crypto.h4
-rw-r--r--include/linux/cxl_err.h22
-rw-r--r--include/linux/damon.h68
-rw-r--r--include/linux/dax.h7
-rw-r--r--include/linux/devfreq.h7
-rw-r--r--include/linux/device-mapper.h60
-rw-r--r--include/linux/device.h7
-rw-r--r--include/linux/device/bus.h97
-rw-r--r--include/linux/device/driver.h1
-rw-r--r--include/linux/dfl.h8
-rw-r--r--include/linux/dm-bufio.h13
-rw-r--r--include/linux/dm-dirty-log.h9
-rw-r--r--include/linux/dm-io.h9
-rw-r--r--include/linux/dm-kcopyd.h23
-rw-r--r--include/linux/dm-region-hash.h9
-rw-r--r--include/linux/dma-buf.h4
-rw-r--r--include/linux/dma-map-ops.h2
-rw-r--r--include/linux/dma/amd_xdma.h16
-rw-r--r--include/linux/dma/edma.h25
-rw-r--r--include/linux/dma/imx-dma.h1
-rw-r--r--include/linux/dmaengine.h15
-rw-r--r--include/linux/dmar.h1
-rw-r--r--include/linux/dsa/ksz_common.h53
-rw-r--r--include/linux/efi.h36
-rw-r--r--include/linux/error-injection.h3
-rw-r--r--include/linux/etherdevice.h14
-rw-r--r--include/linux/ethtool.h265
-rw-r--r--include/linux/ethtool_netlink.h42
-rw-r--r--include/linux/exportfs.h2
-rw-r--r--include/linux/f2fs_fs.h2
-rw-r--r--include/linux/fb.h23
-rw-r--r--include/linux/filter.h1
-rw-r--r--include/linux/find.h33
-rw-r--r--include/linux/firewire.h20
-rw-r--r--include/linux/firmware/qcom/qcom_scm.h (renamed from include/linux/qcom_scm.h)6
-rw-r--r--include/linux/firmware/xlnx-zynqmp.h4
-rw-r--r--include/linux/fs.h8
-rw-r--r--include/linux/fsl/enetc_mdio.h21
-rw-r--r--include/linux/fsl/ptp_qoriq.h1
-rw-r--r--include/linux/fwnode.h12
-rw-r--r--include/linux/gfp_types.h12
-rw-r--r--include/linux/gpio.h20
-rw-r--r--include/linux/gpio/consumer.h48
-rw-r--r--include/linux/gpio/driver.h21
-rw-r--r--include/linux/hid-sensor-ids.h1
-rw-r--r--include/linux/hid.h34
-rw-r--r--include/linux/hid_bpf.h170
-rw-r--r--include/linux/highmem.h72
-rw-r--r--include/linux/hisi_acc_qm.h20
-rw-r--r--include/linux/host1x.h10
-rw-r--r--include/linux/huge_mm.h13
-rw-r--r--include/linux/hugetlb.h109
-rw-r--r--include/linux/hugetlb_cgroup.h8
-rw-r--r--include/linux/hwmon.h4
-rw-r--r--include/linux/hyperv.h5
-rw-r--r--include/linux/i2c.h24
-rw-r--r--include/linux/i3c/device.h22
-rw-r--r--include/linux/ieee80211.h1
-rw-r--r--include/linux/ieee802154.h7
-rw-r--r--include/linux/igmp.h1
-rw-r--r--include/linux/iio/iio.h5
-rw-r--r--include/linux/ima.h6
-rw-r--r--include/linux/intel-svm.h16
-rw-r--r--include/linux/iomap.h27
-rw-r--r--include/linux/iommu.h41
-rw-r--r--include/linux/iommufd.h12
-rw-r--r--include/linux/ip.h21
-rw-r--r--include/linux/irqdomain.h29
-rw-r--r--include/linux/ism.h98
-rw-r--r--include/linux/jbd2.h2
-rw-r--r--include/linux/kasan.h36
-rw-r--r--include/linux/kernel.h2
-rw-r--r--include/linux/kexec.h3
-rw-r--r--include/linux/kobject.h2
-rw-r--r--include/linux/kprobes.h2
-rw-r--r--include/linux/kvm_host.h19
-rw-r--r--include/linux/kvm_types.h2
-rw-r--r--include/linux/leds.h15
-rw-r--r--include/linux/libata.h61
-rw-r--r--include/linux/libnvdimm.h3
-rw-r--r--include/linux/list.h15
-rw-r--r--include/linux/lockd/lockd.h4
-rw-r--r--include/linux/maple_tree.h19
-rw-r--r--include/linux/mcb.h5
-rw-r--r--include/linux/mdio-bitbang.h6
-rw-r--r--include/linux/mdio.h150
-rw-r--r--include/linux/mdio/mdio-mscc-miim.h2
-rw-r--r--include/linux/memcontrol.h77
-rw-r--r--include/linux/memregion.h2
-rw-r--r--include/linux/memremap.h2
-rw-r--r--include/linux/mfd/axp20x.h15
-rw-r--r--include/linux/mfd/core.h2
-rw-r--r--include/linux/mfd/intel-m10-bmc.h205
-rw-r--r--include/linux/mfd/intel_soc_pmic.h1
-rw-r--r--include/linux/mfd/ntxec.h2
-rw-r--r--include/linux/mfd/syscon/imx6q-iomuxc-gpr.h6
-rw-r--r--include/linux/mfd/twl.h2
-rw-r--r--include/linux/mhi_ep.h4
-rw-r--r--include/linux/micrel_phy.h3
-rw-r--r--include/linux/migrate.h30
-rw-r--r--include/linux/mlx4/qp.h2
-rw-r--r--include/linux/mlx5/device.h6
-rw-r--r--include/linux/mlx5/driver.h25
-rw-r--r--include/linux/mlx5/fs.h5
-rw-r--r--include/linux/mlx5/mlx5_ifc.h319
-rw-r--r--include/linux/mlx5/qp.h2
-rw-r--r--include/linux/mm.h542
-rw-r--r--include/linux/mm_inline.h19
-rw-r--r--include/linux/mm_types.h186
-rw-r--r--include/linux/mman.h34
-rw-r--r--include/linux/mmc/host.h1
-rw-r--r--include/linux/mmc/sdio_ids.h1
-rw-r--r--include/linux/mmc/slot-gpio.h1
-rw-r--r--include/linux/mmu_notifier.h13
-rw-r--r--include/linux/mmzone.h150
-rw-r--r--include/linux/module.h14
-rw-r--r--include/linux/moduleloader.h17
-rw-r--r--include/linux/mount.h1
-rw-r--r--include/linux/msi.h17
-rw-r--r--include/linux/mtd/rawnand.h21
-rw-r--r--include/linux/mtd/spinand.h1
-rw-r--r--include/linux/mtd/ubi.h1
-rw-r--r--include/linux/namei.h2
-rw-r--r--include/linux/netdevice.h22
-rw-r--r--include/linux/netfilter.h8
-rw-r--r--include/linux/netfs.h8
-rw-r--r--include/linux/netlink.h14
-rw-r--r--include/linux/nfs_fs.h7
-rw-r--r--include/linux/nfs_page.h79
-rw-r--r--include/linux/nfs_ssc.h2
-rw-r--r--include/linux/nmi.h8
-rw-r--r--include/linux/nvmem-consumer.h10
-rw-r--r--include/linux/nvmem-provider.h31
-rw-r--r--include/linux/of.h55
-rw-r--r--include/linux/of_address.h2
-rw-r--r--include/linux/of_device.h4
-rw-r--r--include/linux/of_gpio.h102
-rw-r--r--include/linux/of_iommu.h8
-rw-r--r--include/linux/page-flags.h1
-rw-r--r--include/linux/page_ext.h20
-rw-r--r--include/linux/pagemap.h44
-rw-r--r--include/linux/pagevec.h13
-rw-r--r--include/linux/pagewalk.h11
-rw-r--r--include/linux/parport_pc.h3
-rw-r--r--include/linux/pata_parport.h111
-rw-r--r--include/linux/pci-epc.h10
-rw-r--r--include/linux/pci-epf.h19
-rw-r--r--include/linux/pci.h9
-rw-r--r--include/linux/pci_ids.h4
-rw-r--r--include/linux/percpu_counter.h6
-rw-r--r--include/linux/perf/riscv_pmu.h5
-rw-r--r--include/linux/pgtable.h34
-rw-r--r--include/linux/phy.h122
-rw-r--r--include/linux/phy/phy.h16
-rw-r--r--include/linux/pid_namespace.h19
-rw-r--r--include/linux/pinctrl/devinfo.h15
-rw-r--r--include/linux/pinctrl/pinctrl.h20
-rw-r--r--include/linux/pipe_fs_i.h20
-rw-r--r--include/linux/platform_data/amd_xdma.h34
-rw-r--r--include/linux/platform_data/asoc-ux500-msp.h20
-rw-r--r--include/linux/platform_data/cros_ec_commands.h3
-rw-r--r--include/linux/platform_data/i2c-gpio.h9
-rw-r--r--include/linux/platform_data/pcf857x.h45
-rw-r--r--include/linux/platform_data/simplefb.h1
-rw-r--r--include/linux/platform_data/tsl2563.h9
-rw-r--r--include/linux/platform_device.h11
-rw-r--r--include/linux/pm_domain.h5
-rw-r--r--include/linux/poison.h3
-rw-r--r--include/linux/printk.h2
-rw-r--r--include/linux/property.h9
-rw-r--r--include/linux/ptp_classify.h73
-rw-r--r--include/linux/pwm.h14
-rw-r--r--include/linux/pwm_backlight.h1
-rw-r--r--include/linux/range.h5
-rw-r--r--include/linux/regmap.h19
-rw-r--r--include/linux/remoteproc/pruss.h83
-rw-r--r--include/linux/rmap.h13
-rw-r--r--include/linux/rpmsg/qcom_glink.h12
-rw-r--r--include/linux/sched/coredump.h6
-rw-r--r--include/linux/scmi_protocol.h5
-rw-r--r--include/linux/serial.h10
-rw-r--r--include/linux/serial_core.h9
-rw-r--r--include/linux/sh_intc.h5
-rw-r--r--include/linux/shmem_fs.h18
-rw-r--r--include/linux/skbuff.h49
-rw-r--r--include/linux/slab_def.h2
-rw-r--r--include/linux/slub_def.h2
-rw-r--r--include/linux/soc/apple/rtkit.h26
-rw-r--r--include/linux/soc/mediatek/infracfg.h121
-rw-r--r--include/linux/soc/mediatek/mtk-mmsys.h25
-rw-r--r--include/linux/soc/mediatek/mtk_wed.h3
-rw-r--r--include/linux/soc/qcom/apr.h2
-rw-r--r--include/linux/soc/qcom/geni-se.h (renamed from include/linux/qcom-geni-se.h)3
-rw-r--r--include/linux/soc/qcom/pmic_glink.h32
-rw-r--r--include/linux/soc/qcom/smd-rpm.h1
-rw-r--r--include/linux/soundwire/sdw.h130
-rw-r--r--include/linux/soundwire/sdw_type.h2
-rw-r--r--include/linux/spi/altera.h4
-rw-r--r--include/linux/spi/at86rf230.h20
-rw-r--r--include/linux/spi/cc2520.h21
-rw-r--r--include/linux/spi/spi.h34
-rw-r--r--include/linux/spi/xilinx_spi.h1
-rw-r--r--include/linux/ssb/ssb.h2
-rw-r--r--include/linux/stackdepot.h152
-rw-r--r--include/linux/string.h1
-rw-r--r--include/linux/string_helpers.h5
-rw-r--r--include/linux/sunrpc/gss_krb5.h196
-rw-r--r--include/linux/sunrpc/gss_krb5_enctypes.h41
-rw-r--r--include/linux/sunrpc/msg_prot.h5
-rw-r--r--include/linux/sunrpc/svc.h133
-rw-r--r--include/linux/sunrpc/svc_xprt.h1
-rw-r--r--include/linux/sunrpc/xdr.h28
-rw-r--r--include/linux/surface_aggregator/device.h5
-rw-r--r--include/linux/swap.h4
-rw-r--r--include/linux/swapops.h6
-rw-r--r--include/linux/swiotlb.h5
-rw-r--r--include/linux/topology.h33
-rw-r--r--include/linux/trace.h12
-rw-r--r--include/linux/trace_seq.h5
-rw-r--r--include/linux/tracepoint.h4
-rw-r--r--include/linux/transport_class.h8
-rw-r--r--include/linux/tty.h2
-rw-r--r--include/linux/tty_ldisc.h4
-rw-r--r--include/linux/tty_port.h10
-rw-r--r--include/linux/u64_stats_sync.h12
-rw-r--r--include/linux/uacce.h12
-rw-r--r--include/linux/uio.h35
-rw-r--r--include/linux/usb.h27
-rw-r--r--include/linux/usb/composite.h12
-rw-r--r--include/linux/usb/gadget.h11
-rw-r--r--include/linux/usb/rzv2m_usb3drd.h20
-rw-r--r--include/linux/usb/tcpci.h7
-rw-r--r--include/linux/usb/tcpm.h8
-rw-r--r--include/linux/usb/uvc.h (renamed from include/media/v4l2-uvc.h)209
-rw-r--r--include/linux/usb/webusb.h80
-rw-r--r--include/linux/userfaultfd_k.h2
-rw-r--r--include/linux/util_macros.h2
-rw-r--r--include/linux/uuid.h19
-rw-r--r--include/linux/vdpa.h12
-rw-r--r--include/linux/vfio.h6
-rw-r--r--include/linux/virtio.h5
-rw-r--r--include/linux/virtio_config.h8
-rw-r--r--include/linux/virtio_ring.h16
-rw-r--r--include/linux/virtio_vsock.h129
-rw-r--r--include/linux/vmalloc.h1
-rw-r--r--include/linux/vringh.h2
-rw-r--r--include/linux/writeback.h16
-rw-r--r--include/linux/xarray.h3
-rw-r--r--include/media/davinci/ccdc_types.h30
-rw-r--r--include/media/drv-intf/saa7146.h472
-rw-r--r--include/media/drv-intf/saa7146_vv.h266
-rw-r--r--include/media/i2c/s5c73m3.h41
-rw-r--r--include/media/media-entity.h102
-rw-r--r--include/media/ov_16bit_addr_reg_helpers.h92
-rw-r--r--include/media/v4l2-subdev.h377
-rw-r--r--include/memory/renesas-rpc-if.h34
-rw-r--r--include/net/act_api.h2
-rw-r--r--include/net/bluetooth/hci.h4
-rw-r--r--include/net/bluetooth/mgmt.h2
-rw-r--r--include/net/cfg80211.h148
-rw-r--r--include/net/cfg802154.h78
-rw-r--r--include/net/checksum.h4
-rw-r--r--include/net/dcbnl.h18
-rw-r--r--include/net/devlink.h55
-rw-r--r--include/net/dropreason.h26
-rw-r--r--include/net/dsa.h11
-rw-r--r--include/net/dst_ops.h2
-rw-r--r--include/net/flow.h5
-rw-r--r--include/net/flow_offload.h6
-rw-r--r--include/net/ieee802154_netdev.h52
-rw-r--r--include/net/inet_sock.h4
-rw-r--r--include/net/ip.h3
-rw-r--r--include/net/ip6_route.h4
-rw-r--r--include/net/ip_vs.h1
-rw-r--r--include/net/ipv6.h3
-rw-r--r--include/net/mac80211.h81
-rw-r--r--include/net/ndisc.h2
-rw-r--r--include/net/netfilter/nf_conntrack.h12
-rw-r--r--include/net/netfilter/nf_flow_table.h8
-rw-r--r--include/net/netfilter/nf_tables_core.h16
-rw-r--r--include/net/netfilter/nf_tables_ipv4.h4
-rw-r--r--include/net/netlink.h3
-rw-r--r--include/net/netns/conntrack.h1
-rw-r--r--include/net/netns/core.h5
-rw-r--r--include/net/nl802154.h61
-rw-r--r--include/net/page_pool.h14
-rw-r--r--include/net/pkt_cls.h74
-rw-r--r--include/net/pkt_sched.h21
-rw-r--r--include/net/raw.h13
-rw-r--r--include/net/route.h3
-rw-r--r--include/net/sch_generic.h2
-rw-r--r--include/net/sctp/structs.h1
-rw-r--r--include/net/smc.h24
-rw-r--r--include/net/sock.h35
-rw-r--r--include/net/tc_act/tc_connmark.h9
-rw-r--r--include/net/tc_act/tc_nat.h10
-rw-r--r--include/net/tc_act/tc_pedit.h81
-rw-r--r--include/net/tc_wrapper.h15
-rw-r--r--include/net/xdp.h36
-rw-r--r--include/net/xsk_buff_pool.h5
-rw-r--r--include/rdma/ib_sa.h2
-rw-r--r--include/rdma/ib_umem.h1
-rw-r--r--include/rdma/ib_verbs.h2
-rw-r--r--include/rdma/rdma_cm.h1
-rw-r--r--include/rdma/restrack.h4
-rw-r--r--include/scsi/libsas.h1
-rw-r--r--include/scsi/sas_ata.h20
-rw-r--r--include/scsi/scsi_device.h38
-rw-r--r--include/soc/mscc/ocelot.h64
-rw-r--r--include/soc/mscc/ocelot_dev.h23
-rw-r--r--include/soc/mscc/vsc7514_regs.h18
-rw-r--r--include/sound/ac97/codec.h2
-rw-r--r--include/sound/acp63_chip_offset_byte.h751
-rw-r--r--include/sound/core.h6
-rw-r--r--include/sound/cs42l42.h5
-rw-r--r--include/sound/hda_codec.h1
-rw-r--r--include/sound/hdaudio.h4
-rw-r--r--include/sound/simple_card_utils.h3
-rw-r--r--include/sound/soc-component.h2
-rw-r--r--include/sound/soc-dai.h90
-rw-r--r--include/sound/soc-dapm.h1
-rw-r--r--include/sound/soc-dpcm.h2
-rw-r--r--include/sound/soc-topology.h2
-rw-r--r--include/sound/soc.h6
-rw-r--r--include/sound/sof/ipc4/header.h3
-rw-r--r--include/trace/bpf_probe.h45
-rw-r--r--include/trace/events/bridge.h58
-rw-r--r--include/trace/events/cma.h32
-rw-r--r--include/trace/events/cxl.h112
-rw-r--r--include/trace/events/devlink.h2
-rw-r--r--include/trace/events/f2fs.h104
-rw-r--r--include/trace/events/habanalabs.h75
-rw-r--r--include/trace/events/iommu.h7
-rw-r--r--include/trace/events/mmflags.h1
-rw-r--r--include/trace/events/rpcgss.h22
-rw-r--r--include/trace/events/rxrpc.h492
-rw-r--r--include/trace/events/scmi.h18
-rw-r--r--include/trace/events/skb.h10
-rw-r--r--include/trace/events/sock.h69
-rw-r--r--include/trace/events/sunrpc.h28
-rw-r--r--include/trace/perf.h46
-rw-r--r--include/trace/stages/stage3_trace_output.h3
-rw-r--r--include/trace/stages/stage6_event_callback.h3
-rw-r--r--include/trace/stages/stage7_class_define.h1
-rw-r--r--include/uapi/drm/amdgpu_drm.h12
-rw-r--r--include/uapi/drm/drm_fourcc.h12
-rw-r--r--include/uapi/drm/habanalabs_accel.h (renamed from include/uapi/misc/habanalabs.h)33
-rw-r--r--include/uapi/drm/i810_drm.h292
-rw-r--r--include/uapi/drm/ivpu_accel.h306
-rw-r--r--include/uapi/drm/mga_drm.h429
-rw-r--r--include/uapi/drm/msm_drm.h4
-rw-r--r--include/uapi/drm/r128_drm.h336
-rw-r--r--include/uapi/drm/savage_drm.h220
-rw-r--r--include/uapi/drm/sis_drm.h77
-rw-r--r--include/uapi/drm/via_drm.h282
-rw-r--r--include/uapi/linux/android/binder.h7
-rw-r--r--include/uapi/linux/batadv_packet.h2
-rw-r--r--include/uapi/linux/bpf.h35
-rw-r--r--include/uapi/linux/cxl_mem.h30
-rw-r--r--include/uapi/linux/dcbnl.h2
-rw-r--r--include/uapi/linux/elf.h2
-rw-r--r--include/uapi/linux/ethtool.h48
-rw-r--r--include/uapi/linux/ethtool_netlink.h79
-rw-r--r--include/uapi/linux/fcntl.h1
-rw-r--r--include/uapi/linux/fou.h54
-rw-r--r--include/uapi/linux/fuse.h45
-rw-r--r--include/uapi/linux/gsmmux.h17
-rw-r--r--include/uapi/linux/if_bridge.h2
-rw-r--r--include/uapi/linux/if_link.h5
-rw-r--r--include/uapi/linux/if_packet.h1
-rw-r--r--include/uapi/linux/in.h1
-rw-r--r--include/uapi/linux/ioam6.h2
-rw-r--r--include/uapi/linux/kd.h10
-rw-r--r--include/uapi/linux/kvm.h9
-rw-r--r--include/uapi/linux/mdio.h8
-rw-r--r--include/uapi/linux/media-bus-format.h5
-rw-r--r--include/uapi/linux/memfd.h4
-rw-r--r--include/uapi/linux/meye.h65
-rw-r--r--include/uapi/linux/netdev.h59
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h14
-rw-r--r--include/uapi/linux/nl80211.h36
-rw-r--r--include/uapi/linux/pci_regs.h1
-rw-r--r--include/uapi/linux/prctl.h6
-rw-r--r--include/uapi/linux/rpl.h4
-rw-r--r--include/uapi/linux/rtnetlink.h1
-rw-r--r--include/uapi/linux/serial_core.h3
-rw-r--r--include/uapi/linux/serial_reg.h5
-rw-r--r--include/uapi/linux/snmp.h3
-rw-r--r--include/uapi/linux/usb/ch9.h16
-rw-r--r--include/uapi/linux/usb/video.h30
-rw-r--r--include/uapi/linux/uuid.h12
-rw-r--r--include/uapi/linux/uvcvideo.h6
-rw-r--r--include/uapi/linux/v4l2-controls.h8
-rw-r--r--include/uapi/linux/v4l2-subdev.h74
-rw-r--r--include/uapi/linux/vfio.h15
-rw-r--r--include/uapi/linux/vhost.h8
-rw-r--r--include/uapi/linux/vhost_types.h2
-rw-r--r--include/uapi/linux/videodev2.h11
-rw-r--r--include/uapi/linux/virtio_blk.h105
-rw-r--r--include/uapi/rdma/hns-abi.h4
-rw-r--r--include/uapi/scsi/scsi_bsg_fc.h2
-rw-r--r--include/uapi/scsi/scsi_bsg_ufs.h48
-rw-r--r--include/uapi/sound/firewire.h26
-rw-r--r--include/uapi/sound/intel/avs/tokens.h4
-rw-r--r--include/ufs/ufs.h37
-rw-r--r--include/ufs/ufshcd.h194
-rw-r--r--include/ufs/ufshci.h76
-rw-r--r--include/ufs/unipro.h1
-rw-r--r--include/xen/xenbus.h7
523 files changed, 12145 insertions, 6545 deletions
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index e44be31115a6..0584e9f6e339 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -534,6 +534,7 @@ int acpi_bus_update_power(acpi_handle handle, int *state_p);
int acpi_device_update_power(struct acpi_device *device, int *state_p);
bool acpi_bus_power_manageable(acpi_handle handle);
void acpi_dev_power_up_children_with_adr(struct acpi_device *adev);
+u8 acpi_dev_power_state_for_wake(struct acpi_device *adev);
int acpi_device_power_add_dependent(struct acpi_device *adev,
struct device *dev);
void acpi_device_power_remove_dependent(struct acpi_device *adev,
diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h
index cdedccccc4f3..81b9e794424d 100644
--- a/include/acpi/actbl1.h
+++ b/include/acpi/actbl1.h
@@ -804,7 +804,7 @@ struct acpi_dmar_pci_path {
struct acpi_dmar_hardware_unit {
struct acpi_dmar_header header;
u8 flags;
- u8 reserved;
+ u8 size; /* Size of the register set */
u16 segment;
u64 address; /* Register Base Address */
};
diff --git a/include/asm-generic/dma-mapping.h b/include/asm-generic/dma-mapping.h
index c13f46109e88..46a0016efd81 100644
--- a/include/asm-generic/dma-mapping.h
+++ b/include/asm-generic/dma-mapping.h
@@ -2,7 +2,7 @@
#ifndef _ASM_GENERIC_DMA_MAPPING_H
#define _ASM_GENERIC_DMA_MAPPING_H
-static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
+static inline const struct dma_map_ops *get_arch_dma_ops(void)
{
return NULL;
}
diff --git a/include/asm-generic/error-injection.h b/include/asm-generic/error-injection.h
index fbca56bd9cbc..b05253f68eaa 100644
--- a/include/asm-generic/error-injection.h
+++ b/include/asm-generic/error-injection.h
@@ -4,7 +4,6 @@
#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
enum {
- EI_ETYPE_NONE, /* Dummy value for undefined case */
EI_ETYPE_NULL, /* Return NULL if failure */
EI_ETYPE_ERRNO, /* Return -ERRNO if failure */
EI_ETYPE_ERRNO_NULL, /* Return -ERRNO or NULL if failure */
@@ -20,8 +19,10 @@ struct pt_regs;
#ifdef CONFIG_FUNCTION_ERROR_INJECTION
/*
- * Whitelist generating macro. Specify functions which can be
- * error-injectable using this macro.
+ * Whitelist generating macro. Specify functions which can be error-injectable
+ * using this macro. If you unsure what is required for the error-injectable
+ * functions, please read Documentation/fault-injection/fault-injection.rst
+ * 'Error Injectable Functions' section.
*/
#define ALLOW_ERROR_INJECTION(fname, _etype) \
static struct error_injection_entry __used \
diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h
index a7752cf152ce..22cb8c9efc1d 100644
--- a/include/asm-generic/gpio.h
+++ b/include/asm-generic/gpio.h
@@ -31,12 +31,6 @@ struct module;
struct device_node;
struct gpio_desc;
-/* caller holds gpio_lock *OR* gpio is marked as requested */
-static inline struct gpio_chip *gpio_to_chip(unsigned gpio)
-{
- return gpiod_to_chip(gpio_to_desc(gpio));
-}
-
/* Always use the library code for GPIO management calls,
* or when sleeping may be involved.
*/
@@ -103,12 +97,6 @@ static inline int gpio_export(unsigned gpio, bool direction_may_change)
return gpiod_export(gpio_to_desc(gpio), direction_may_change);
}
-static inline int gpio_export_link(struct device *dev, const char *name,
- unsigned gpio)
-{
- return gpiod_export_link(dev, name, gpio_to_desc(gpio));
-}
-
static inline void gpio_unexport(unsigned gpio)
{
gpiod_unexport(gpio_to_desc(gpio));
diff --git a/include/asm-generic/hugetlb.h b/include/asm-generic/hugetlb.h
index a57d667addd2..d7f6335d3999 100644
--- a/include/asm-generic/hugetlb.h
+++ b/include/asm-generic/hugetlb.h
@@ -25,6 +25,13 @@ static inline pte_t huge_pte_mkwrite(pte_t pte)
return pte_mkwrite(pte);
}
+#ifndef __HAVE_ARCH_HUGE_PTE_WRPROTECT
+static inline pte_t huge_pte_wrprotect(pte_t pte)
+{
+ return pte_wrprotect(pte);
+}
+#endif
+
static inline pte_t huge_pte_mkdirty(pte_t pte)
{
return pte_mkdirty(pte);
@@ -37,7 +44,7 @@ static inline pte_t huge_pte_modify(pte_t pte, pgprot_t newprot)
static inline pte_t huge_pte_mkuffd_wp(pte_t pte)
{
- return pte_mkuffd_wp(pte);
+ return huge_pte_wrprotect(pte_mkuffd_wp(pte));
}
static inline pte_t huge_pte_clear_uffd_wp(pte_t pte)
@@ -104,13 +111,6 @@ static inline int huge_pte_none_mostly(pte_t pte)
return huge_pte_none(pte) || is_pte_marker(pte);
}
-#ifndef __HAVE_ARCH_HUGE_PTE_WRPROTECT
-static inline pte_t huge_pte_wrprotect(pte_t pte)
-{
- return pte_wrprotect(pte);
-}
-#endif
-
#ifndef __HAVE_ARCH_PREPARE_HUGEPAGE_RANGE
static inline int prepare_hugepage_range(struct file *file,
unsigned long addr, unsigned long len)
diff --git a/include/asm-generic/memory_model.h b/include/asm-generic/memory_model.h
index a2c8ed60233a..6796abe1900e 100644
--- a/include/asm-generic/memory_model.h
+++ b/include/asm-generic/memory_model.h
@@ -19,6 +19,18 @@
#define __page_to_pfn(page) ((unsigned long)((page) - mem_map) + \
ARCH_PFN_OFFSET)
+#ifndef pfn_valid
+static inline int pfn_valid(unsigned long pfn)
+{
+ /* avoid <linux/mm.h> include hell */
+ extern unsigned long max_mapnr;
+ unsigned long pfn_offset = ARCH_PFN_OFFSET;
+
+ return pfn >= pfn_offset && (pfn - pfn_offset) < max_mapnr;
+}
+#define pfn_valid pfn_valid
+#endif
+
#elif defined(CONFIG_SPARSEMEM_VMEMMAP)
/* memmap is virtually contiguous. */
diff --git a/include/asm-generic/page.h b/include/asm-generic/page.h
index 6fc47561814c..c0be2edeb484 100644
--- a/include/asm-generic/page.h
+++ b/include/asm-generic/page.h
@@ -84,8 +84,6 @@ extern unsigned long memory_end;
#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
#endif
-#define pfn_valid(pfn) ((pfn) >= ARCH_PFN_OFFSET && ((pfn) - ARCH_PFN_OFFSET) < max_mapnr)
-
#define virt_addr_valid(kaddr) (((void *)(kaddr) >= (void *)PAGE_OFFSET) && \
((void *)(kaddr) < (void *)memory_end))
diff --git a/include/crypto/aead.h b/include/crypto/aead.h
index 14db3bee0519..4a2b7e6e0c1f 100644
--- a/include/crypto/aead.h
+++ b/include/crypto/aead.h
@@ -27,15 +27,12 @@
*
* For example: authenc(hmac(sha256), cbc(aes))
*
- * The example code provided for the symmetric key cipher operation
- * applies here as well. Naturally all *skcipher* symbols must be exchanged
- * the *aead* pendants discussed in the following. In addition, for the AEAD
- * operation, the aead_request_set_ad function must be used to set the
- * pointer to the associated data memory location before performing the
- * encryption or decryption operation. In case of an encryption, the associated
- * data memory is filled during the encryption operation. For decryption, the
- * associated data memory must contain data that is used to verify the integrity
- * of the decrypted data. Another deviation from the asynchronous block cipher
+ * The example code provided for the symmetric key cipher operation applies
+ * here as well. Naturally all *skcipher* symbols must be exchanged the *aead*
+ * pendants discussed in the following. In addition, for the AEAD operation,
+ * the aead_request_set_ad function must be used to set the pointer to the
+ * associated data memory location before performing the encryption or
+ * decryption operation. Another deviation from the asynchronous block cipher
* operation is that the caller should explicitly check for -EBADMSG of the
* crypto_aead_decrypt. That error indicates an authentication error, i.e.
* a breach in the integrity of the message. In essence, that -EBADMSG error
@@ -49,7 +46,10 @@
*
* The destination scatterlist has the same layout, except that the plaintext
* (resp. ciphertext) will grow (resp. shrink) by the authentication tag size
- * during encryption (resp. decryption).
+ * during encryption (resp. decryption). The authentication tag is generated
+ * during the encryption operation and appended to the ciphertext. During
+ * decryption, the authentication tag is consumed along with the ciphertext and
+ * used to verify the integrity of the plaintext and the associated data.
*
* In-place encryption/decryption is enabled by using the same scatterlist
* pointer for both the source and destination.
diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
index 61b327206b55..fede394ae2ab 100644
--- a/include/crypto/algapi.h
+++ b/include/crypto/algapi.h
@@ -302,4 +302,10 @@ enum {
CRYPTO_MSG_ALG_LOADED,
};
+static inline void crypto_request_complete(struct crypto_async_request *req,
+ int err)
+{
+ req->complete(req->data, err);
+}
+
#endif /* _CRYPTO_ALGAPI_H */
diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h
index a5db86670bdf..7e76623f9ec3 100644
--- a/include/crypto/if_alg.h
+++ b/include/crypto/if_alg.h
@@ -21,8 +21,6 @@
#define ALG_MAX_PAGES 16
-struct crypto_async_request;
-
struct alg_sock {
/* struct sock must be the first member of struct alg_sock */
struct sock sk;
@@ -235,7 +233,7 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
ssize_t af_alg_sendpage(struct socket *sock, struct page *page,
int offset, size_t size, int flags);
void af_alg_free_resources(struct af_alg_async_req *areq);
-void af_alg_async_cb(struct crypto_async_request *_req, int err);
+void af_alg_async_cb(void *data, int err);
__poll_t af_alg_poll(struct file *file, struct socket *sock,
poll_table *wait);
struct af_alg_async_req *af_alg_alloc_areq(struct sock *sk,
diff --git a/include/crypto/internal/acompress.h b/include/crypto/internal/acompress.h
index 49339003bd2c..978b57a3f4f0 100644
--- a/include/crypto/internal/acompress.h
+++ b/include/crypto/internal/acompress.h
@@ -28,7 +28,7 @@ static inline void *acomp_tfm_ctx(struct crypto_acomp *tfm)
static inline void acomp_request_complete(struct acomp_req *req,
int err)
{
- req->base.complete(&req->base, err);
+ crypto_request_complete(&req->base, err);
}
static inline const char *acomp_alg_name(struct crypto_acomp *tfm)
diff --git a/include/crypto/internal/aead.h b/include/crypto/internal/aead.h
index cd8cb1e921b7..28a95eb3182d 100644
--- a/include/crypto/internal/aead.h
+++ b/include/crypto/internal/aead.h
@@ -82,7 +82,7 @@ static inline void *aead_request_ctx_dma(struct aead_request *req)
static inline void aead_request_complete(struct aead_request *req, int err)
{
- req->base.complete(&req->base, err);
+ crypto_request_complete(&req->base, err);
}
static inline u32 aead_request_flags(struct aead_request *req)
diff --git a/include/crypto/internal/akcipher.h b/include/crypto/internal/akcipher.h
index aaf1092b93b8..a0fba4b2eccf 100644
--- a/include/crypto/internal/akcipher.h
+++ b/include/crypto/internal/akcipher.h
@@ -69,7 +69,7 @@ static inline void *akcipher_tfm_ctx_dma(struct crypto_akcipher *tfm)
static inline void akcipher_request_complete(struct akcipher_request *req,
int err)
{
- req->base.complete(&req->base, err);
+ crypto_request_complete(&req->base, err);
}
static inline const char *akcipher_alg_name(struct crypto_akcipher *tfm)
diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h
index 1a2a41b79253..0b259dbb97af 100644
--- a/include/crypto/internal/hash.h
+++ b/include/crypto/internal/hash.h
@@ -199,7 +199,7 @@ static inline void *ahash_request_ctx_dma(struct ahash_request *req)
static inline void ahash_request_complete(struct ahash_request *req, int err)
{
- req->base.complete(&req->base, err);
+ crypto_request_complete(&req->base, err);
}
static inline u32 ahash_request_flags(struct ahash_request *req)
diff --git a/include/crypto/internal/kpp.h b/include/crypto/internal/kpp.h
index 3c9726e89f53..0a6db8c4a9a0 100644
--- a/include/crypto/internal/kpp.h
+++ b/include/crypto/internal/kpp.h
@@ -85,7 +85,7 @@ static inline void *kpp_tfm_ctx_dma(struct crypto_kpp *tfm)
static inline void kpp_request_complete(struct kpp_request *req, int err)
{
- req->base.complete(&req->base, err);
+ crypto_request_complete(&req->base, err);
}
static inline const char *kpp_alg_name(struct crypto_kpp *tfm)
diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h
index 06d0a5491cf3..fb3d9e899f52 100644
--- a/include/crypto/internal/skcipher.h
+++ b/include/crypto/internal/skcipher.h
@@ -94,7 +94,7 @@ static inline void *skcipher_instance_ctx(struct skcipher_instance *inst)
static inline void skcipher_request_complete(struct skcipher_request *req, int err)
{
- req->base.complete(&req->base, err);
+ crypto_request_complete(&req->base, err);
}
int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn,
diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h
index f2c42b4111b1..32fc4473175b 100644
--- a/include/crypto/scatterwalk.h
+++ b/include/crypto/scatterwalk.h
@@ -53,7 +53,7 @@ static inline struct page *scatterwalk_page(struct scatter_walk *walk)
static inline void scatterwalk_unmap(void *vaddr)
{
- kunmap_atomic(vaddr);
+ kunmap_local(vaddr);
}
static inline void scatterwalk_start(struct scatter_walk *walk,
@@ -65,7 +65,7 @@ static inline void scatterwalk_start(struct scatter_walk *walk,
static inline void *scatterwalk_map(struct scatter_walk *walk)
{
- return kmap_atomic(scatterwalk_page(walk)) +
+ return kmap_local_page(scatterwalk_page(walk)) +
offset_in_page(walk->offset);
}
diff --git a/include/crypto/xts.h b/include/crypto/xts.h
index 0f8dba69feb4..15b16c4853d8 100644
--- a/include/crypto/xts.h
+++ b/include/crypto/xts.h
@@ -8,8 +8,8 @@
#define XTS_BLOCK_SIZE 16
-static inline int xts_check_key(struct crypto_tfm *tfm,
- const u8 *key, unsigned int keylen)
+static inline int xts_verify_key(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keylen)
{
/*
* key consists of keys of equal size concatenated, therefore
@@ -18,24 +18,17 @@ static inline int xts_check_key(struct crypto_tfm *tfm,
if (keylen % 2)
return -EINVAL;
- /* ensure that the AES and tweak key are not identical */
- if (fips_enabled && !crypto_memneq(key, key + (keylen / 2), keylen / 2))
- return -EINVAL;
-
- return 0;
-}
-
-static inline int xts_verify_key(struct crypto_skcipher *tfm,
- const u8 *key, unsigned int keylen)
-{
/*
- * key consists of keys of equal size concatenated, therefore
- * the length must be even.
+ * In FIPS mode only a combined key length of either 256 or
+ * 512 bits is allowed, c.f. FIPS 140-3 IG C.I.
*/
- if (keylen % 2)
+ if (fips_enabled && keylen != 32 && keylen != 64)
return -EINVAL;
- /* ensure that the AES and tweak key are not identical */
+ /*
+ * Ensure that the AES and tweak key are not identical when
+ * in FIPS mode or the FORBID_WEAK_KEYS flag is set.
+ */
if ((fips_enabled || (crypto_skcipher_get_flags(tfm) &
CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) &&
!crypto_memneq(key, key + (keylen / 2), keylen / 2))
diff --git a/include/drm/display/drm_dp.h b/include/drm/display/drm_dp.h
index e934aab357be..632376c291db 100644
--- a/include/drm/display/drm_dp.h
+++ b/include/drm/display/drm_dp.h
@@ -240,6 +240,8 @@
#define DP_DSC_SUPPORT 0x060 /* DP 1.4 */
# define DP_DSC_DECOMPRESSION_IS_SUPPORTED (1 << 0)
# define DP_DSC_PASSTHROUGH_IS_SUPPORTED (1 << 1)
+# define DP_DSC_DYNAMIC_PPS_UPDATE_SUPPORT_COMP_TO_COMP (1 << 2)
+# define DP_DSC_DYNAMIC_PPS_UPDATE_SUPPORT_UNCOMP_TO_COMP (1 << 3)
#define DP_DSC_REV 0x061
# define DP_DSC_MAJOR_MASK (0xf << 0)
@@ -278,12 +280,15 @@
#define DP_DSC_BLK_PREDICTION_SUPPORT 0x066
# define DP_DSC_BLK_PREDICTION_IS_SUPPORTED (1 << 0)
+# define DP_DSC_RGB_COLOR_CONV_BYPASS_SUPPORT (1 << 1)
#define DP_DSC_MAX_BITS_PER_PIXEL_LOW 0x067 /* eDP 1.4 */
#define DP_DSC_MAX_BITS_PER_PIXEL_HI 0x068 /* eDP 1.4 */
# define DP_DSC_MAX_BITS_PER_PIXEL_HI_MASK (0x3 << 0)
# define DP_DSC_MAX_BITS_PER_PIXEL_HI_SHIFT 8
+# define DP_DSC_MAX_BPP_DELTA_VERSION_MASK 0x06
+# define DP_DSC_MAX_BPP_DELTA_AVAILABILITY 0x08
#define DP_DSC_DEC_COLOR_FORMAT_CAP 0x069
# define DP_DSC_RGB (1 << 0)
@@ -345,11 +350,13 @@
# define DP_DSC_24_PER_DP_DSC_SINK (1 << 2)
#define DP_DSC_BITS_PER_PIXEL_INC 0x06F
+# define DP_DSC_RGB_YCbCr444_MAX_BPP_DELTA_MASK 0x1f
+# define DP_DSC_RGB_YCbCr420_MAX_BPP_DELTA_MASK 0xe0
# define DP_DSC_BITS_PER_PIXEL_1_16 0x0
# define DP_DSC_BITS_PER_PIXEL_1_8 0x1
# define DP_DSC_BITS_PER_PIXEL_1_4 0x2
# define DP_DSC_BITS_PER_PIXEL_1_2 0x3
-# define DP_DSC_BITS_PER_PIXEL_1 0x4
+# define DP_DSC_BITS_PER_PIXEL_1_1 0x4
#define DP_PSR_SUPPORT 0x070 /* XXX 1.2? */
# define DP_PSR_IS_SUPPORTED 1
@@ -603,6 +610,7 @@
#define DP_DOWNSPREAD_CTRL 0x107
# define DP_SPREAD_AMP_0_5 (1 << 4)
+# define DP_FIXED_VTOTAL_AS_SDP_EN_IN_PR_ACTIVE (1 << 6)
# define DP_MSA_TIMING_PAR_IGNORE_EN (1 << 7) /* eDP */
#define DP_MAIN_LINK_CHANNEL_CODING_SET 0x108
@@ -1105,6 +1113,11 @@
# define DP_VSC_EXT_CEA_SDP_SUPPORTED (1 << 6) /* DP 1.4 */
# define DP_VSC_EXT_CEA_SDP_CHAINING_SUPPORTED (1 << 7) /* DP 1.4 */
+#define DP_DPRX_FEATURE_ENUMERATION_LIST_CONT_1 0x2214 /* 2.0 E11 */
+# define DP_ADAPTIVE_SYNC_SDP_SUPPORTED (1 << 0)
+# define DP_AS_SDP_FIRST_HALF_LINE_OR_3840_PIXEL_CYCLE_WINDOW_NOT_SUPPORTED (1 << 1)
+# define DP_VSC_EXT_SDP_FRAMEWORK_VERSION_1_SUPPORTED (1 << 4)
+
#define DP_128B132B_SUPPORTED_LINK_RATES 0x2215 /* 2.0 */
# define DP_UHBR10 (1 << 0)
# define DP_UHBR20 (1 << 1)
diff --git a/include/drm/display/drm_dp_mst_helper.h b/include/drm/display/drm_dp_mst_helper.h
index 41fd8352ab65..32c764fb9cb5 100644
--- a/include/drm/display/drm_dp_mst_helper.h
+++ b/include/drm/display/drm_dp_mst_helper.h
@@ -841,7 +841,8 @@ int drm_dp_add_payload_part2(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_atomic_payload *payload);
void drm_dp_remove_payload(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_topology_state *mst_state,
- struct drm_dp_mst_atomic_payload *payload);
+ const struct drm_dp_mst_atomic_payload *old_payload,
+ struct drm_dp_mst_atomic_payload *new_payload);
int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr);
@@ -867,6 +868,9 @@ struct drm_dp_mst_topology_state *
drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,
struct drm_dp_mst_topology_mgr *mgr);
struct drm_dp_mst_topology_state *
+drm_atomic_get_old_mst_topology_state(struct drm_atomic_state *state,
+ struct drm_dp_mst_topology_mgr *mgr);
+struct drm_dp_mst_topology_state *
drm_atomic_get_new_mst_topology_state(struct drm_atomic_state *state,
struct drm_dp_mst_topology_mgr *mgr);
struct drm_dp_mst_atomic_payload *
diff --git a/include/drm/drm_accel.h b/include/drm/drm_accel.h
index 65c0affbd306..d4955062c77e 100644
--- a/include/drm/drm_accel.h
+++ b/include/drm/drm_accel.h
@@ -27,7 +27,8 @@
.compat_ioctl = drm_compat_ioctl,\
.poll = drm_poll,\
.read = drm_read,\
- .llseek = noop_llseek
+ .llseek = noop_llseek, \
+ .mmap = drm_gem_mmap
/**
* DEFINE_DRM_ACCEL_FOPS() - macro to generate file operations for accelerators drivers
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index 10b1990bc1f6..92586ab55ef5 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -515,17 +515,17 @@ struct drm_private_state * __must_check
drm_atomic_get_private_obj_state(struct drm_atomic_state *state,
struct drm_private_obj *obj);
struct drm_private_state *
-drm_atomic_get_old_private_obj_state(struct drm_atomic_state *state,
+drm_atomic_get_old_private_obj_state(const struct drm_atomic_state *state,
struct drm_private_obj *obj);
struct drm_private_state *
-drm_atomic_get_new_private_obj_state(struct drm_atomic_state *state,
+drm_atomic_get_new_private_obj_state(const struct drm_atomic_state *state,
struct drm_private_obj *obj);
struct drm_connector *
-drm_atomic_get_old_connector_for_encoder(struct drm_atomic_state *state,
+drm_atomic_get_old_connector_for_encoder(const struct drm_atomic_state *state,
struct drm_encoder *encoder);
struct drm_connector *
-drm_atomic_get_new_connector_for_encoder(struct drm_atomic_state *state,
+drm_atomic_get_new_connector_for_encoder(const struct drm_atomic_state *state,
struct drm_encoder *encoder);
/**
@@ -540,7 +540,7 @@ drm_atomic_get_new_connector_for_encoder(struct drm_atomic_state *state,
* @drm_atomic_get_new_crtc_state should be used instead.
*/
static inline struct drm_crtc_state *
-drm_atomic_get_existing_crtc_state(struct drm_atomic_state *state,
+drm_atomic_get_existing_crtc_state(const struct drm_atomic_state *state,
struct drm_crtc *crtc)
{
return state->crtcs[drm_crtc_index(crtc)].state;
@@ -555,7 +555,7 @@ drm_atomic_get_existing_crtc_state(struct drm_atomic_state *state,
* NULL if the CRTC is not part of the global atomic state.
*/
static inline struct drm_crtc_state *
-drm_atomic_get_old_crtc_state(struct drm_atomic_state *state,
+drm_atomic_get_old_crtc_state(const struct drm_atomic_state *state,
struct drm_crtc *crtc)
{
return state->crtcs[drm_crtc_index(crtc)].old_state;
@@ -569,7 +569,7 @@ drm_atomic_get_old_crtc_state(struct drm_atomic_state *state,
* NULL if the CRTC is not part of the global atomic state.
*/
static inline struct drm_crtc_state *
-drm_atomic_get_new_crtc_state(struct drm_atomic_state *state,
+drm_atomic_get_new_crtc_state(const struct drm_atomic_state *state,
struct drm_crtc *crtc)
{
return state->crtcs[drm_crtc_index(crtc)].new_state;
@@ -587,7 +587,7 @@ drm_atomic_get_new_crtc_state(struct drm_atomic_state *state,
* @drm_atomic_get_new_plane_state should be used instead.
*/
static inline struct drm_plane_state *
-drm_atomic_get_existing_plane_state(struct drm_atomic_state *state,
+drm_atomic_get_existing_plane_state(const struct drm_atomic_state *state,
struct drm_plane *plane)
{
return state->planes[drm_plane_index(plane)].state;
@@ -602,7 +602,7 @@ drm_atomic_get_existing_plane_state(struct drm_atomic_state *state,
* NULL if the plane is not part of the global atomic state.
*/
static inline struct drm_plane_state *
-drm_atomic_get_old_plane_state(struct drm_atomic_state *state,
+drm_atomic_get_old_plane_state(const struct drm_atomic_state *state,
struct drm_plane *plane)
{
return state->planes[drm_plane_index(plane)].old_state;
@@ -617,7 +617,7 @@ drm_atomic_get_old_plane_state(struct drm_atomic_state *state,
* NULL if the plane is not part of the global atomic state.
*/
static inline struct drm_plane_state *
-drm_atomic_get_new_plane_state(struct drm_atomic_state *state,
+drm_atomic_get_new_plane_state(const struct drm_atomic_state *state,
struct drm_plane *plane)
{
return state->planes[drm_plane_index(plane)].new_state;
@@ -635,7 +635,7 @@ drm_atomic_get_new_plane_state(struct drm_atomic_state *state,
* @drm_atomic_get_new_connector_state should be used instead.
*/
static inline struct drm_connector_state *
-drm_atomic_get_existing_connector_state(struct drm_atomic_state *state,
+drm_atomic_get_existing_connector_state(const struct drm_atomic_state *state,
struct drm_connector *connector)
{
int index = drm_connector_index(connector);
@@ -655,7 +655,7 @@ drm_atomic_get_existing_connector_state(struct drm_atomic_state *state,
* or NULL if the connector is not part of the global atomic state.
*/
static inline struct drm_connector_state *
-drm_atomic_get_old_connector_state(struct drm_atomic_state *state,
+drm_atomic_get_old_connector_state(const struct drm_atomic_state *state,
struct drm_connector *connector)
{
int index = drm_connector_index(connector);
@@ -675,7 +675,7 @@ drm_atomic_get_old_connector_state(struct drm_atomic_state *state,
* or NULL if the connector is not part of the global atomic state.
*/
static inline struct drm_connector_state *
-drm_atomic_get_new_connector_state(struct drm_atomic_state *state,
+drm_atomic_get_new_connector_state(const struct drm_atomic_state *state,
struct drm_connector *connector)
{
int index = drm_connector_index(connector);
@@ -713,7 +713,7 @@ drm_atomic_get_new_connector_state(struct drm_atomic_state *state,
* Read-only pointer to the current plane state.
*/
static inline const struct drm_plane_state *
-__drm_atomic_get_current_plane_state(struct drm_atomic_state *state,
+__drm_atomic_get_current_plane_state(const struct drm_atomic_state *state,
struct drm_plane *plane)
{
if (state->planes[drm_plane_index(plane)].state)
@@ -1134,10 +1134,10 @@ struct drm_bridge_state *
drm_atomic_get_bridge_state(struct drm_atomic_state *state,
struct drm_bridge *bridge);
struct drm_bridge_state *
-drm_atomic_get_old_bridge_state(struct drm_atomic_state *state,
+drm_atomic_get_old_bridge_state(const struct drm_atomic_state *state,
struct drm_bridge *bridge);
struct drm_bridge_state *
-drm_atomic_get_new_bridge_state(struct drm_atomic_state *state,
+drm_atomic_get_new_bridge_state(const struct drm_atomic_state *state,
struct drm_bridge *bridge);
#endif /* DRM_ATOMIC_H_ */
diff --git a/include/drm/drm_atomic_state_helper.h b/include/drm/drm_atomic_state_helper.h
index 192766656b88..b9740edb2658 100644
--- a/include/drm/drm_atomic_state_helper.h
+++ b/include/drm/drm_atomic_state_helper.h
@@ -26,6 +26,7 @@
#include <linux/types.h>
+struct drm_atomic_state;
struct drm_bridge;
struct drm_bridge_state;
struct drm_crtc;
@@ -70,6 +71,9 @@ void __drm_atomic_helper_connector_state_reset(struct drm_connector_state *conn_
void __drm_atomic_helper_connector_reset(struct drm_connector *connector,
struct drm_connector_state *conn_state);
void drm_atomic_helper_connector_reset(struct drm_connector *connector);
+void drm_atomic_helper_connector_tv_reset(struct drm_connector *connector);
+int drm_atomic_helper_connector_tv_check(struct drm_connector *connector,
+ struct drm_atomic_state *state);
void drm_atomic_helper_connector_tv_margins_reset(struct drm_connector *connector);
void
__drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector,
diff --git a/include/drm/drm_audio_component.h b/include/drm/drm_audio_component.h
index 0d36bfd1a4cd..5a4cd1fa8e2a 100644
--- a/include/drm/drm_audio_component.h
+++ b/include/drm/drm_audio_component.h
@@ -4,6 +4,9 @@
#ifndef _DRM_AUDIO_COMPONENT_H_
#define _DRM_AUDIO_COMPONENT_H_
+#include <linux/completion.h>
+#include <linux/types.h>
+
struct drm_audio_component;
struct device;
diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h
index 6b65b0dfb4fb..42f86327b40a 100644
--- a/include/drm/drm_bridge.h
+++ b/include/drm/drm_bridge.h
@@ -297,12 +297,6 @@ struct drm_bridge_funcs {
* not enable the display link feeding the next bridge in the chain (if
* there is one) when this callback is called.
*
- * Note that this function will only be invoked in the context of an
- * atomic commit. It will not be invoked from
- * &drm_bridge_chain_pre_enable. It would be prudent to also provide an
- * implementation of @pre_enable if you are expecting driver calls into
- * &drm_bridge_chain_pre_enable.
- *
* The @atomic_pre_enable callback is optional.
*/
void (*atomic_pre_enable)(struct drm_bridge *bridge,
@@ -323,11 +317,6 @@ struct drm_bridge_funcs {
* callback must enable the display link feeding the next bridge in the
* chain if there is one.
*
- * Note that this function will only be invoked in the context of an
- * atomic commit. It will not be invoked from &drm_bridge_chain_enable.
- * It would be prudent to also provide an implementation of @enable if
- * you are expecting driver calls into &drm_bridge_chain_enable.
- *
* The @atomic_enable callback is optional.
*/
void (*atomic_enable)(struct drm_bridge *bridge,
@@ -345,12 +334,6 @@ struct drm_bridge_funcs {
* The bridge can assume that the display pipe (i.e. clocks and timing
* signals) feeding it is still running when this callback is called.
*
- * Note that this function will only be invoked in the context of an
- * atomic commit. It will not be invoked from
- * &drm_bridge_chain_disable. It would be prudent to also provide an
- * implementation of @disable if you are expecting driver calls into
- * &drm_bridge_chain_disable.
- *
* The @atomic_disable callback is optional.
*/
void (*atomic_disable)(struct drm_bridge *bridge,
@@ -370,13 +353,6 @@ struct drm_bridge_funcs {
* signals) feeding it is no longer running when this callback is
* called.
*
- * Note that this function will only be invoked in the context of an
- * atomic commit. It will not be invoked from
- * &drm_bridge_chain_post_disable.
- * It would be prudent to also provide an implementation of
- * @post_disable if you are expecting driver calls into
- * &drm_bridge_chain_post_disable.
- *
* The @atomic_post_disable callback is optional.
*/
void (*atomic_post_disable)(struct drm_bridge *bridge,
@@ -769,6 +745,14 @@ struct drm_bridge {
*/
bool interlace_allowed;
/**
+ * @pre_enable_prev_first: The bridge requires that the prev
+ * bridge @pre_enable function is called before its @pre_enable,
+ * and conversely for post_disable. This is most frequently a
+ * requirement for DSI devices which need the host to be initialised
+ * before the peripheral.
+ */
+ bool pre_enable_prev_first;
+ /**
* @ddc: Associated I2C adapter for DDC access, if any.
*/
struct i2c_adapter *ddc;
@@ -876,13 +860,9 @@ enum drm_mode_status
drm_bridge_chain_mode_valid(struct drm_bridge *bridge,
const struct drm_display_info *info,
const struct drm_display_mode *mode);
-void drm_bridge_chain_disable(struct drm_bridge *bridge);
-void drm_bridge_chain_post_disable(struct drm_bridge *bridge);
void drm_bridge_chain_mode_set(struct drm_bridge *bridge,
const struct drm_display_mode *mode,
const struct drm_display_mode *adjusted_mode);
-void drm_bridge_chain_pre_enable(struct drm_bridge *bridge);
-void drm_bridge_chain_enable(struct drm_bridge *bridge);
int drm_atomic_bridge_chain_check(struct drm_bridge *bridge,
struct drm_crtc_state *crtc_state,
diff --git a/include/drm/drm_bridge_connector.h b/include/drm/drm_bridge_connector.h
index 33f6c3bbdb4a..69630815fb09 100644
--- a/include/drm/drm_bridge_connector.h
+++ b/include/drm/drm_bridge_connector.h
@@ -10,8 +10,6 @@ struct drm_connector;
struct drm_device;
struct drm_encoder;
-void drm_bridge_connector_enable_hpd(struct drm_connector *connector);
-void drm_bridge_connector_disable_hpd(struct drm_connector *connector);
struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
struct drm_encoder *encoder);
diff --git a/include/drm/drm_client.h b/include/drm/drm_client.h
index 1220d185c776..c0a14b40c039 100644
--- a/include/drm/drm_client.h
+++ b/include/drm/drm_client.h
@@ -106,6 +106,14 @@ struct drm_client_dev {
* @modesets: CRTC configurations
*/
struct drm_mode_set *modesets;
+
+ /**
+ * @hotplug_failed:
+ *
+ * Set by client hotplug helpers if the hotplugging failed
+ * before. It is usually not tried again.
+ */
+ bool hotplug_failed;
};
int drm_client_init(struct drm_device *dev, struct drm_client_dev *client,
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
index 565cf9d3c550..7b5048516185 100644
--- a/include/drm/drm_connector.h
+++ b/include/drm/drm_connector.h
@@ -144,6 +144,65 @@ enum subpixel_order {
};
/**
+ * enum drm_connector_tv_mode - Analog TV output mode
+ *
+ * This enum is used to indicate the TV output mode used on an analog TV
+ * connector.
+ *
+ * WARNING: The values of this enum is uABI since they're exposed in the
+ * "TV mode" connector property.
+ */
+enum drm_connector_tv_mode {
+ /**
+ * @DRM_MODE_TV_MODE_NTSC: CCIR System M (aka 525-lines)
+ * together with the NTSC Color Encoding.
+ */
+ DRM_MODE_TV_MODE_NTSC,
+
+ /**
+ * @DRM_MODE_TV_MODE_NTSC_443: Variant of
+ * @DRM_MODE_TV_MODE_NTSC. Uses a color subcarrier frequency
+ * of 4.43 MHz.
+ */
+ DRM_MODE_TV_MODE_NTSC_443,
+
+ /**
+ * @DRM_MODE_TV_MODE_NTSC_J: Variant of @DRM_MODE_TV_MODE_NTSC
+ * used in Japan. Uses a black level equals to the blanking
+ * level.
+ */
+ DRM_MODE_TV_MODE_NTSC_J,
+
+ /**
+ * @DRM_MODE_TV_MODE_PAL: CCIR System B together with the PAL
+ * color system.
+ */
+ DRM_MODE_TV_MODE_PAL,
+
+ /**
+ * @DRM_MODE_TV_MODE_PAL_M: CCIR System M (aka 525-lines)
+ * together with the PAL color encoding
+ */
+ DRM_MODE_TV_MODE_PAL_M,
+
+ /**
+ * @DRM_MODE_TV_MODE_PAL_N: CCIR System N together with the PAL
+ * color encoding. It uses 625 lines, but has a color subcarrier
+ * frequency of 3.58MHz, the SECAM color space, and narrower
+ * channels compared to most of the other PAL variants.
+ */
+ DRM_MODE_TV_MODE_PAL_N,
+
+ /**
+ * @DRM_MODE_TV_MODE_SECAM: CCIR System B together with the
+ * SECAM color system.
+ */
+ DRM_MODE_TV_MODE_SECAM,
+
+ DRM_MODE_TV_MODE_MAX,
+};
+
+/**
* struct drm_scrambling: sink's scrambling support.
*/
struct drm_scrambling {
@@ -245,9 +304,6 @@ struct drm_hdmi_info {
*/
unsigned long y420_cmdb_modes[BITS_TO_LONGS(256)];
- /** @y420_cmdb_map: bitmap of SVD index, to extraxt vcb modes */
- u64 y420_cmdb_map;
-
/** @y420_dc_modes: bitmap of deep color support index */
u8 y420_dc_modes;
@@ -662,6 +718,21 @@ struct drm_display_info {
* monitor's default value is used instead.
*/
u32 max_dsc_bpp;
+
+ /**
+ * @vics: Array of vics_len VICs. Internal to EDID parsing.
+ */
+ u8 *vics;
+
+ /**
+ * @vics_len: Number of elements in vics. Internal to EDID parsing.
+ */
+ int vics_len;
+
+ /**
+ * @quirks: EDID based quirks. Internal to EDID parsing.
+ */
+ u32 quirks;
};
int drm_display_info_set_bus_formats(struct drm_display_info *info,
@@ -701,6 +772,7 @@ struct drm_connector_tv_margins {
* @select_subconnector: selected subconnector
* @subconnector: detected subconnector
* @margins: TV margins
+ * @legacy_mode: Legacy TV mode, driver specific value
* @mode: TV mode
* @brightness: brightness in percent
* @contrast: contrast in percent
@@ -713,6 +785,7 @@ struct drm_tv_connector_state {
enum drm_mode_subconnector select_subconnector;
enum drm_mode_subconnector subconnector;
struct drm_connector_tv_margins margins;
+ unsigned int legacy_mode;
unsigned int mode;
unsigned int brightness;
unsigned int contrast;
@@ -1313,6 +1386,18 @@ struct drm_cmdline_mode {
* @tv_margins: TV margins to apply to the mode.
*/
struct drm_connector_tv_margins tv_margins;
+
+ /**
+ * @tv_mode: TV mode standard. See DRM_MODE_TV_MODE_*.
+ */
+ enum drm_connector_tv_mode tv_mode;
+
+ /**
+ * @tv_mode_specified:
+ *
+ * Did the mode have a preferred TV mode?
+ */
+ bool tv_mode_specified;
};
/**
@@ -1810,19 +1895,24 @@ const char *drm_get_subpixel_order_name(enum subpixel_order order);
const char *drm_get_dpms_name(int val);
const char *drm_get_dvi_i_subconnector_name(int val);
const char *drm_get_dvi_i_select_name(int val);
+const char *drm_get_tv_mode_name(int val);
const char *drm_get_tv_subconnector_name(int val);
const char *drm_get_tv_select_name(int val);
const char *drm_get_dp_subconnector_name(int val);
const char *drm_get_content_protection_name(int val);
const char *drm_get_hdcp_content_type_name(int val);
+int drm_get_tv_mode_from_name(const char *name, size_t len);
+
int drm_mode_create_dvi_i_properties(struct drm_device *dev);
void drm_connector_attach_dp_subconnector_property(struct drm_connector *connector);
int drm_mode_create_tv_margin_properties(struct drm_device *dev);
+int drm_mode_create_tv_properties_legacy(struct drm_device *dev,
+ unsigned int num_modes,
+ const char * const modes[]);
int drm_mode_create_tv_properties(struct drm_device *dev,
- unsigned int num_modes,
- const char * const modes[]);
+ unsigned int supported_tv_modes);
void drm_connector_attach_tv_margin_properties(struct drm_connector *conn);
int drm_mode_create_scaling_mode_property(struct drm_device *dev);
int drm_connector_attach_content_type_property(struct drm_connector *dev);
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
index 1840db247f69..8c886fc46ef2 100644
--- a/include/drm/drm_crtc_helper.h
+++ b/include/drm/drm_crtc_helper.h
@@ -33,15 +33,17 @@
#ifndef __DRM_CRTC_HELPER_H__
#define __DRM_CRTC_HELPER_H__
-#include <linux/spinlock.h>
#include <linux/types.h>
-#include <linux/idr.h>
-#include <linux/fb.h>
-
-#include <drm/drm_crtc.h>
-#include <drm/drm_modeset_helper_vtables.h>
-#include <drm/drm_modeset_helper.h>
+struct drm_atomic_state;
+struct drm_connector;
+struct drm_crtc;
+struct drm_device;
+struct drm_display_mode;
+struct drm_encoder;
+struct drm_framebuffer;
+struct drm_mode_set;
+struct drm_modeset_acquire_ctx;
void drm_helper_disable_unused_functions(struct drm_device *dev);
int drm_crtc_helper_set_config(struct drm_mode_set *set,
diff --git a/include/drm/drm_debugfs.h b/include/drm/drm_debugfs.h
index 2188dc83957f..7616f457ce70 100644
--- a/include/drm/drm_debugfs.h
+++ b/include/drm/drm_debugfs.h
@@ -79,12 +79,61 @@ struct drm_info_node {
struct dentry *dent;
};
+/**
+ * struct drm_debugfs_info - debugfs info list entry
+ *
+ * This structure represents a debugfs file to be created by the drm
+ * core.
+ */
+struct drm_debugfs_info {
+ /** @name: File name */
+ const char *name;
+
+ /**
+ * @show:
+ *
+ * Show callback. &seq_file->private will be set to the &struct
+ * drm_debugfs_entry corresponding to the instance of this info
+ * on a given &struct drm_device.
+ */
+ int (*show)(struct seq_file*, void*);
+
+ /** @driver_features: Required driver features for this entry. */
+ u32 driver_features;
+
+ /** @data: Driver-private data, should not be device-specific. */
+ void *data;
+};
+
+/**
+ * struct drm_debugfs_entry - Per-device debugfs node structure
+ *
+ * This structure represents a debugfs file, as an instantiation of a &struct
+ * drm_debugfs_info on a &struct drm_device.
+ */
+struct drm_debugfs_entry {
+ /** @dev: &struct drm_device for this node. */
+ struct drm_device *dev;
+
+ /** @file: Template for this node. */
+ struct drm_debugfs_info file;
+
+ /** @list: Linked list of all device nodes. */
+ struct list_head list;
+};
+
#if defined(CONFIG_DEBUG_FS)
void drm_debugfs_create_files(const struct drm_info_list *files,
int count, struct dentry *root,
struct drm_minor *minor);
int drm_debugfs_remove_files(const struct drm_info_list *files,
int count, struct drm_minor *minor);
+
+void drm_debugfs_add_file(struct drm_device *dev, const char *name,
+ int (*show)(struct seq_file*, void*), void *data);
+
+void drm_debugfs_add_files(struct drm_device *dev,
+ const struct drm_debugfs_info *files, int count);
#else
static inline void drm_debugfs_create_files(const struct drm_info_list *files,
int count, struct dentry *root,
@@ -96,6 +145,16 @@ static inline int drm_debugfs_remove_files(const struct drm_info_list *files,
{
return 0;
}
+
+static inline void drm_debugfs_add_file(struct drm_device *dev, const char *name,
+ int (*show)(struct seq_file*, void*),
+ void *data)
+{}
+
+static inline void drm_debugfs_add_files(struct drm_device *dev,
+ const struct drm_debugfs_info *files,
+ int count)
+{}
#endif
#endif /* _DRM_DEBUGFS_H_ */
diff --git a/include/drm/drm_device.h b/include/drm/drm_device.h
index 933ce2048e20..7cf4afae2e79 100644
--- a/include/drm/drm_device.h
+++ b/include/drm/drm_device.h
@@ -87,10 +87,23 @@ struct drm_device {
*/
void *dev_private;
- /** @primary: Primary node */
+ /**
+ * @primary:
+ *
+ * Primary node. Drivers should not interact with this
+ * directly. debugfs interfaces can be registered with
+ * drm_debugfs_add_file(), and sysfs should be directly added on the
+ * hardware (and not character device node) struct device @dev.
+ */
struct drm_minor *primary;
- /** @render: Render node */
+ /**
+ * @render:
+ *
+ * Render node. Drivers should not interact with this directly ever.
+ * Drivers should not expose any additional interfaces in debugfs or
+ * sysfs on this node.
+ */
struct drm_minor *render;
/** @accel: Compute Acceleration node */
@@ -298,6 +311,21 @@ struct drm_device {
*/
struct drm_fb_helper *fb_helper;
+ /**
+ * @debugfs_mutex:
+ *
+ * Protects &debugfs_list access.
+ */
+ struct mutex debugfs_mutex;
+
+ /**
+ * @debugfs_list:
+ *
+ * List of debugfs files to be created by the DRM device. The files
+ * must be added during drm_dev_register().
+ */
+ struct list_head debugfs_list;
+
/* Everything below here is for legacy driver, never use! */
/* private: */
#if IS_ENABLED(CONFIG_DRM_LEGACY)
diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h
index d7c521e8860f..1d76d0686b03 100644
--- a/include/drm/drm_drv.h
+++ b/include/drm/drm_drv.h
@@ -149,13 +149,6 @@ enum drm_driver_feature {
* Legacy irq support. Only for legacy drivers. Do not use.
*/
DRIVER_HAVE_IRQ = BIT(30),
- /**
- * @DRIVER_KMS_LEGACY_CONTEXT:
- *
- * Used only by nouveau for backwards compatibility with existing
- * userspace. Do not use.
- */
- DRIVER_KMS_LEGACY_CONTEXT = BIT(31),
};
/**
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index 372963600f1d..70ae6c290bdc 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -609,6 +609,8 @@ const struct drm_edid *drm_edid_read_custom(struct drm_connector *connector,
void *context);
int drm_edid_connector_update(struct drm_connector *connector,
const struct drm_edid *edid);
+int drm_edid_connector_add_modes(struct drm_connector *connector);
+
const u8 *drm_find_edid_extension(const struct drm_edid *drm_edid,
int ext_id, int *ext_index);
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index 095370ef029d..013654de3fc5 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -37,11 +37,6 @@ struct drm_fb_helper;
#include <drm/drm_client.h>
-enum mode_set_atomic {
- LEAVE_ATOMIC_MODE_SET,
- ENTER_ATOMIC_MODE_SET,
-};
-
/**
* struct drm_fb_helper_surface_size - describes fbdev size and scanout surface size
* @fb_width: fbdev width
@@ -246,7 +241,9 @@ drm_fb_helper_from_client(struct drm_client_dev *client)
#ifdef CONFIG_DRM_FBDEV_EMULATION
void drm_fb_helper_prepare(struct drm_device *dev, struct drm_fb_helper *helper,
+ unsigned int preferred_bpp,
const struct drm_fb_helper_funcs *funcs);
+void drm_fb_helper_unprepare(struct drm_fb_helper *fb_helper);
int drm_fb_helper_init(struct drm_device *dev, struct drm_fb_helper *helper);
void drm_fb_helper_fini(struct drm_fb_helper *helper);
int drm_fb_helper_blank(int blank, struct fb_info *info);
@@ -300,7 +297,7 @@ int drm_fb_helper_ioctl(struct fb_info *info, unsigned int cmd,
unsigned long arg);
int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper);
-int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel);
+int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper);
int drm_fb_helper_debug_enter(struct fb_info *info);
int drm_fb_helper_debug_leave(struct fb_info *info);
@@ -308,8 +305,13 @@ void drm_fb_helper_lastclose(struct drm_device *dev);
void drm_fb_helper_output_poll_changed(struct drm_device *dev);
#else
static inline void drm_fb_helper_prepare(struct drm_device *dev,
- struct drm_fb_helper *helper,
- const struct drm_fb_helper_funcs *funcs)
+ struct drm_fb_helper *helper,
+ unsigned int preferred_bpp,
+ const struct drm_fb_helper_funcs *funcs)
+{
+}
+
+static inline void drm_fb_helper_unprepare(struct drm_fb_helper *fb_helper)
{
}
@@ -467,8 +469,7 @@ static inline int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
return 0;
}
-static inline int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper,
- int bpp_sel)
+static inline int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper)
{
return 0;
}
diff --git a/include/drm/drm_fixed.h b/include/drm/drm_fixed.h
index 553210c02ee0..255645c1f9a8 100644
--- a/include/drm/drm_fixed.h
+++ b/include/drm/drm_fixed.h
@@ -25,6 +25,7 @@
#ifndef DRM_FIXED_H
#define DRM_FIXED_H
+#include <linux/kernel.h>
#include <linux/math64.h>
typedef union dfixed {
diff --git a/include/drm/drm_format_helper.h b/include/drm/drm_format_helper.h
index eb5c98cf82b8..291deb09475b 100644
--- a/include/drm/drm_format_helper.h
+++ b/include/drm/drm_format_helper.h
@@ -30,12 +30,27 @@ void drm_fb_xrgb8888_to_rgb332(struct iosys_map *dst, const unsigned int *dst_pi
void drm_fb_xrgb8888_to_rgb565(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
const struct drm_rect *clip, bool swab);
+void drm_fb_xrgb8888_to_xrgb1555(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip);
+void drm_fb_xrgb8888_to_argb1555(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip);
+void drm_fb_xrgb8888_to_rgba5551(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip);
void drm_fb_xrgb8888_to_rgb888(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
const struct drm_rect *clip);
+void drm_fb_xrgb8888_to_argb8888(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip);
void drm_fb_xrgb8888_to_xrgb2101010(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
const struct drm_rect *clip);
+void drm_fb_xrgb8888_to_argb2101010(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip);
void drm_fb_xrgb8888_to_gray8(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
const struct drm_rect *clip);
@@ -50,7 +65,6 @@ void drm_fb_xrgb8888_to_mono(struct iosys_map *dst, const unsigned int *dst_pitc
size_t drm_fb_build_fourcc_list(struct drm_device *dev,
const u32 *native_fourccs, size_t native_nfourccs,
- const u32 *extra_fourccs, size_t extra_nfourccs,
u32 *fourccs_out, size_t nfourccs_out);
#endif /* __LINUX_DRM_FORMAT_HELPER_H */
diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
index a17c2f903f81..772a4adf5287 100644
--- a/include/drm/drm_gem.h
+++ b/include/drm/drm_gem.h
@@ -405,6 +405,7 @@ int drm_gem_object_init(struct drm_device *dev,
struct drm_gem_object *obj, size_t size);
void drm_gem_private_object_init(struct drm_device *dev,
struct drm_gem_object *obj, size_t size);
+void drm_gem_private_object_fini(struct drm_gem_object *obj);
void drm_gem_vm_open(struct vm_area_struct *vma);
void drm_gem_vm_close(struct vm_area_struct *vma);
int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
diff --git a/include/drm/drm_gem_atomic_helper.h b/include/drm/drm_gem_atomic_helper.h
index 6970ccb787e2..40b8b039518e 100644
--- a/include/drm/drm_gem_atomic_helper.h
+++ b/include/drm/drm_gem_atomic_helper.h
@@ -15,8 +15,6 @@ struct drm_simple_display_pipe;
*/
int drm_gem_plane_helper_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state);
-int drm_gem_simple_display_pipe_prepare_fb(struct drm_simple_display_pipe *pipe,
- struct drm_plane_state *plane_state);
/*
* Helpers for planes with shadow buffers
diff --git a/include/drm/drm_gem_ttm_helper.h b/include/drm/drm_gem_ttm_helper.h
index 4c003b4f173e..7b53d673ae7e 100644
--- a/include/drm/drm_gem_ttm_helper.h
+++ b/include/drm/drm_gem_ttm_helper.h
@@ -7,8 +7,7 @@
#include <drm/drm_device.h>
#include <drm/drm_gem.h>
-#include <drm/ttm/ttm_bo_api.h>
-#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_bo.h>
struct iosys_map;
diff --git a/include/drm/drm_gem_vram_helper.h b/include/drm/drm_gem_vram_helper.h
index c083a1d71cf4..d3e8920c0b64 100644
--- a/include/drm/drm_gem_vram_helper.h
+++ b/include/drm/drm_gem_vram_helper.h
@@ -8,8 +8,8 @@
#include <drm/drm_gem_ttm_helper.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_modes.h>
-#include <drm/ttm/ttm_bo_api.h>
-#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_bo.h>
+#include <drm/ttm/ttm_placement.h>
#include <linux/container_of.h>
#include <linux/iosys-map.h>
diff --git a/include/drm/drm_kunit_helpers.h b/include/drm/drm_kunit_helpers.h
new file mode 100644
index 000000000000..ed013fdcc1ff
--- /dev/null
+++ b/include/drm/drm_kunit_helpers.h
@@ -0,0 +1,91 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#ifndef DRM_KUNIT_HELPERS_H_
+#define DRM_KUNIT_HELPERS_H_
+
+#include <kunit/test.h>
+
+struct drm_device;
+struct kunit;
+
+struct device *drm_kunit_helper_alloc_device(struct kunit *test);
+void drm_kunit_helper_free_device(struct kunit *test, struct device *dev);
+
+struct drm_device *
+__drm_kunit_helper_alloc_drm_device_with_driver(struct kunit *test,
+ struct device *dev,
+ size_t size, size_t offset,
+ const struct drm_driver *driver);
+
+/**
+ * drm_kunit_helper_alloc_drm_device_with_driver - Allocates a mock DRM device for KUnit tests
+ * @_test: The test context object
+ * @_dev: The parent device object
+ * @_type: the type of the struct which contains struct &drm_device
+ * @_member: the name of the &drm_device within @_type.
+ * @_drv: Mocked DRM device driver features
+ *
+ * This function creates a struct &drm_device from @_dev and @_drv.
+ *
+ * @_dev should be allocated using drm_kunit_helper_alloc_device().
+ *
+ * The driver is tied to the @_test context and will get cleaned at the
+ * end of the test. The drm_device is allocated through
+ * devm_drm_dev_alloc() and will thus be freed through a device-managed
+ * resource.
+ *
+ * Returns:
+ * A pointer to the new drm_device, or an ERR_PTR() otherwise.
+ */
+#define drm_kunit_helper_alloc_drm_device_with_driver(_test, _dev, _type, _member, _drv) \
+ ((_type *)__drm_kunit_helper_alloc_drm_device_with_driver(_test, _dev, \
+ sizeof(_type), \
+ offsetof(_type, _member), \
+ _drv))
+
+static inline struct drm_device *
+__drm_kunit_helper_alloc_drm_device(struct kunit *test,
+ struct device *dev,
+ size_t size, size_t offset,
+ u32 features)
+{
+ struct drm_driver *driver;
+
+ driver = kunit_kzalloc(test, sizeof(*driver), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, driver);
+
+ driver->driver_features = features;
+
+ return __drm_kunit_helper_alloc_drm_device_with_driver(test, dev,
+ size, offset,
+ driver);
+}
+
+/**
+ * drm_kunit_helper_alloc_drm_device - Allocates a mock DRM device for KUnit tests
+ * @_test: The test context object
+ * @_dev: The parent device object
+ * @_type: the type of the struct which contains struct &drm_device
+ * @_member: the name of the &drm_device within @_type.
+ * @_features: Mocked DRM device driver features
+ *
+ * This function creates a struct &drm_driver and will create a struct
+ * &drm_device from @_dev and that driver.
+ *
+ * @_dev should be allocated using drm_kunit_helper_alloc_device().
+ *
+ * The driver is tied to the @_test context and will get cleaned at the
+ * end of the test. The drm_device is allocated through
+ * devm_drm_dev_alloc() and will thus be freed through a device-managed
+ * resource.
+ *
+ * Returns:
+ * A pointer to the new drm_device, or an ERR_PTR() otherwise.
+ */
+#define drm_kunit_helper_alloc_drm_device(_test, _dev, _type, _member, _feat) \
+ ((_type *)__drm_kunit_helper_alloc_drm_device(_test, _dev, \
+ sizeof(_type), \
+ offsetof(_type, _member), \
+ _feat))
+
+#endif // DRM_KUNIT_HELPERS_H_
diff --git a/include/drm/drm_mipi_dbi.h b/include/drm/drm_mipi_dbi.h
index 14eaecb1825c..816f196b3d4c 100644
--- a/include/drm/drm_mipi_dbi.h
+++ b/include/drm/drm_mipi_dbi.h
@@ -13,9 +13,10 @@
#include <drm/drm_simple_kms_helper.h>
struct drm_rect;
-struct spi_device;
struct gpio_desc;
+struct iosys_map;
struct regulator;
+struct spi_device;
/**
* struct mipi_dbi - MIPI DBI interface
@@ -122,11 +123,16 @@ struct mipi_dbi_dev {
struct backlight_device *backlight;
/**
- * @regulator: power regulator (optional)
+ * @regulator: power regulator (Vdd) (optional)
*/
struct regulator *regulator;
/**
+ * @io_regulator: I/O power regulator (Vddi) (optional)
+ */
+ struct regulator *io_regulator;
+
+ /**
* @dbi: MIPI DBI interface
*/
struct mipi_dbi dbi;
@@ -163,6 +169,15 @@ void mipi_dbi_enable_flush(struct mipi_dbi_dev *dbidev,
struct drm_crtc_state *crtc_state,
struct drm_plane_state *plan_state);
void mipi_dbi_pipe_disable(struct drm_simple_display_pipe *pipe);
+int mipi_dbi_pipe_begin_fb_access(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *plane_state);
+void mipi_dbi_pipe_end_fb_access(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *plane_state);
+void mipi_dbi_pipe_reset_plane(struct drm_simple_display_pipe *pipe);
+struct drm_plane_state *mipi_dbi_pipe_duplicate_plane_state(struct drm_simple_display_pipe *pipe);
+void mipi_dbi_pipe_destroy_plane_state(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *plane_state);
+
void mipi_dbi_hw_reset(struct mipi_dbi *dbi);
bool mipi_dbi_display_is_on(struct mipi_dbi *dbi);
int mipi_dbi_poweron_reset(struct mipi_dbi_dev *dbidev);
@@ -176,8 +191,9 @@ int mipi_dbi_command_read(struct mipi_dbi *dbi, u8 cmd, u8 *val);
int mipi_dbi_command_buf(struct mipi_dbi *dbi, u8 cmd, u8 *data, size_t len);
int mipi_dbi_command_stackbuf(struct mipi_dbi *dbi, u8 cmd, const u8 *data,
size_t len);
-int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb,
+int mipi_dbi_buf_copy(void *dst, struct iosys_map *src, struct drm_framebuffer *fb,
struct drm_rect *clip, bool swap);
+
/**
* mipi_dbi_command - MIPI DCS command with optional parameter(s)
* @dbi: MIPI DBI structure
@@ -207,4 +223,25 @@ void mipi_dbi_debugfs_init(struct drm_minor *minor);
static inline void mipi_dbi_debugfs_init(struct drm_minor *minor) {}
#endif
+/**
+ * DRM_MIPI_DBI_SIMPLE_DISPLAY_PIPE_FUNCS - Initializes struct drm_simple_display_pipe_funcs
+ * for MIPI-DBI devices
+ * @enable_: Enable-callback implementation
+ *
+ * This macro initializes struct drm_simple_display_pipe_funcs with default
+ * values for MIPI-DBI-based devices. The only callback that depends on the
+ * hardware is @enable, for which the driver has to provide an implementation.
+ * MIPI-based drivers are encouraged to use this macro for initialization.
+ */
+#define DRM_MIPI_DBI_SIMPLE_DISPLAY_PIPE_FUNCS(enable_) \
+ .mode_valid = mipi_dbi_pipe_mode_valid, \
+ .enable = (enable_), \
+ .disable = mipi_dbi_pipe_disable, \
+ .update = mipi_dbi_pipe_update, \
+ .begin_fb_access = mipi_dbi_pipe_begin_fb_access, \
+ .end_fb_access = mipi_dbi_pipe_end_fb_access, \
+ .reset_plane = mipi_dbi_pipe_reset_plane, \
+ .duplicate_plane_state = mipi_dbi_pipe_duplicate_plane_state, \
+ .destroy_plane_state = mipi_dbi_pipe_destroy_plane_state
+
#endif /* __LINUX_MIPI_DBI_H */
diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h
index 20b21b577dea..c9df0407980c 100644
--- a/include/drm/drm_mipi_dsi.h
+++ b/include/drm/drm_mipi_dsi.h
@@ -197,10 +197,7 @@ struct mipi_dsi_device {
#define MIPI_DSI_MODULE_PREFIX "mipi-dsi:"
-static inline struct mipi_dsi_device *to_mipi_dsi_device(struct device *dev)
-{
- return container_of(dev, struct mipi_dsi_device, dev);
-}
+#define to_mipi_dsi_device(__dev) container_of_const(__dev, struct mipi_dsi_device, dev)
/**
* mipi_dsi_pixel_format_to_bpp - obtain the number of bits per pixel for any
@@ -296,6 +293,28 @@ int mipi_dsi_dcs_set_display_brightness(struct mipi_dsi_device *dsi,
u16 brightness);
int mipi_dsi_dcs_get_display_brightness(struct mipi_dsi_device *dsi,
u16 *brightness);
+int mipi_dsi_dcs_set_display_brightness_large(struct mipi_dsi_device *dsi,
+ u16 brightness);
+int mipi_dsi_dcs_get_display_brightness_large(struct mipi_dsi_device *dsi,
+ u16 *brightness);
+
+/**
+ * mipi_dsi_generic_write_seq - transmit data using a generic write packet
+ * @dsi: DSI peripheral device
+ * @seq: buffer containing the payload
+ */
+#define mipi_dsi_generic_write_seq(dsi, seq...) \
+ do { \
+ static const u8 d[] = { seq }; \
+ struct device *dev = &dsi->dev; \
+ int ret; \
+ ret = mipi_dsi_generic_write(dsi, d, ARRAY_SIZE(d)); \
+ if (ret < 0) { \
+ dev_err_ratelimited(dev, "transmit data failed: %d\n", \
+ ret); \
+ return ret; \
+ } \
+ } while (0)
/**
* mipi_dsi_dcs_write_seq - transmit a DCS command with payload
@@ -303,15 +322,18 @@ int mipi_dsi_dcs_get_display_brightness(struct mipi_dsi_device *dsi,
* @cmd: Command
* @seq: buffer containing data to be transmitted
*/
-#define mipi_dsi_dcs_write_seq(dsi, cmd, seq...) do { \
- static const u8 d[] = { cmd, seq }; \
- struct device *dev = &dsi->dev; \
- int ret; \
- ret = mipi_dsi_dcs_write_buffer(dsi, d, ARRAY_SIZE(d)); \
- if (ret < 0) { \
- dev_err_ratelimited(dev, "sending command %#02x failed: %d\n", cmd, ret); \
- return ret; \
- } \
+#define mipi_dsi_dcs_write_seq(dsi, cmd, seq...) \
+ do { \
+ static const u8 d[] = { cmd, seq }; \
+ struct device *dev = &dsi->dev; \
+ int ret; \
+ ret = mipi_dsi_dcs_write_buffer(dsi, d, ARRAY_SIZE(d)); \
+ if (ret < 0) { \
+ dev_err_ratelimited( \
+ dev, "sending command %#02x failed: %d\n", \
+ cmd, ret); \
+ return ret; \
+ } \
} while (0)
/**
diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h
index 5362702fffe1..e5b053001d22 100644
--- a/include/drm/drm_mode_config.h
+++ b/include/drm/drm_mode_config.h
@@ -712,11 +712,21 @@ struct drm_mode_config {
* between different TV connector types.
*/
struct drm_property *tv_select_subconnector_property;
+
/**
- * @tv_mode_property: Optional TV property to select
+ * @legacy_tv_mode_property: Optional TV property to select
* the output TV mode.
+ *
+ * Superseded by @tv_mode_property
+ */
+ struct drm_property *legacy_tv_mode_property;
+
+ /**
+ * @tv_mode_property: Optional TV property to select the TV
+ * standard output on the connector.
*/
struct drm_property *tv_mode_property;
+
/**
* @tv_left_margin_property: Optional TV property to set the left
* margin (expressed in pixels).
diff --git a/include/drm/drm_modes.h b/include/drm/drm_modes.h
index b0c680e6f670..c613f0abe9dc 100644
--- a/include/drm/drm_modes.h
+++ b/include/drm/drm_modes.h
@@ -468,6 +468,23 @@ bool drm_mode_is_420_also(const struct drm_display_info *display,
bool drm_mode_is_420(const struct drm_display_info *display,
const struct drm_display_mode *mode);
+struct drm_display_mode *drm_analog_tv_mode(struct drm_device *dev,
+ enum drm_connector_tv_mode mode,
+ unsigned long pixel_clock_hz,
+ unsigned int hdisplay,
+ unsigned int vdisplay,
+ bool interlace);
+
+static inline struct drm_display_mode *drm_mode_analog_ntsc_480i(struct drm_device *dev)
+{
+ return drm_analog_tv_mode(dev, DRM_MODE_TV_MODE_NTSC, 13500000, 720, 480, true);
+}
+
+static inline struct drm_display_mode *drm_mode_analog_pal_576i(struct drm_device *dev)
+{
+ return drm_analog_tv_mode(dev, DRM_MODE_TV_MODE_PAL, 13500000, 720, 576, true);
+}
+
struct drm_display_mode *drm_cvt_mode(struct drm_device *dev,
int hdisplay, int vdisplay, int vrefresh,
bool reduced, bool interlaced,
diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h
index d9f2254a039a..206f495bbf06 100644
--- a/include/drm/drm_modeset_helper_vtables.h
+++ b/include/drm/drm_modeset_helper_vtables.h
@@ -48,10 +48,14 @@
* To make this clear all the helper vtables are pulled together in this location here.
*/
-enum mode_set_atomic;
struct drm_writeback_connector;
struct drm_writeback_job;
+enum mode_set_atomic {
+ LEAVE_ATOMIC_MODE_SET,
+ ENTER_ATOMIC_MODE_SET,
+};
+
/**
* struct drm_crtc_helper_funcs - helper operations for CRTCs
*
@@ -1143,6 +1147,28 @@ struct drm_connector_helper_funcs {
*/
void (*cleanup_writeback_job)(struct drm_writeback_connector *connector,
struct drm_writeback_job *job);
+
+ /**
+ * @enable_hpd:
+ *
+ * Enable hot-plug detection for the connector.
+ *
+ * This operation is optional.
+ *
+ * This callback is used by the drm_kms_helper_poll_enable() helpers.
+ */
+ void (*enable_hpd)(struct drm_connector *connector);
+
+ /**
+ * @disable_hpd:
+ *
+ * Disable hot-plug detection for the connector.
+ *
+ * This operation is optional.
+ *
+ * This callback is used by the drm_kms_helper_poll_disable() helpers.
+ */
+ void (*disable_hpd)(struct drm_connector *connector);
};
/**
diff --git a/include/drm/drm_panel.h b/include/drm/drm_panel.h
index 994bfcdd84c5..432fab2347eb 100644
--- a/include/drm/drm_panel.h
+++ b/include/drm/drm_panel.h
@@ -188,6 +188,16 @@ struct drm_panel {
* Panel entry in registry.
*/
struct list_head list;
+
+ /**
+ * @prepare_prev_first:
+ *
+ * The previous controller should be prepared first, before the prepare
+ * for the panel is called. This is largely required for DSI panels
+ * where the DSI host controller should be initialised to LP-11 before
+ * the panel is powered up.
+ */
+ bool prepare_prev_first;
};
void drm_panel_init(struct drm_panel *panel, struct device *dev,
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index b7e899ce44f0..90e8abc08653 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -700,115 +700,3 @@
{0x1002, 0x99A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x99A4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0, 0, 0}
-
-#define r128_PCI_IDS \
- {0x1002, 0x4c45, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x4c46, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x4d46, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x4d4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x5041, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x5042, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x5043, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x5044, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x5045, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x5046, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x5047, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x5048, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x5049, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x504A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x504B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x504C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x504D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x504E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x504F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x5050, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x5051, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x5052, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x5053, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x5054, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x5055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x5056, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x5057, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x5058, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x5245, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x5246, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x5247, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x524b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x524c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x534d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x5446, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x544C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x5452, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0, 0, 0}
-
-#define mga_PCI_IDS \
- {0x102b, 0x0520, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G200}, \
- {0x102b, 0x0521, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G200}, \
- {0x102b, 0x0525, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G400}, \
- {0x102b, 0x2527, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G550}, \
- {0, 0, 0}
-
-#define sisdrv_PCI_IDS \
- {0x1039, 0x0300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1039, 0x5300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1039, 0x6300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1039, 0x6330, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_CHIP_315}, \
- {0x1039, 0x6351, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1039, 0x7300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x18CA, 0x0040, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_CHIP_315}, \
- {0x18CA, 0x0042, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_CHIP_315}, \
- {0, 0, 0}
-
-#define tdfx_PCI_IDS \
- {0x121a, 0x0003, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x121a, 0x0004, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x121a, 0x0005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x121a, 0x0007, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x121a, 0x0009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x121a, 0x000b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0, 0, 0}
-
-#define viadrv_PCI_IDS \
- {0x1106, 0x3022, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1106, 0x3118, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_PRO_GROUP_A}, \
- {0x1106, 0x3122, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1106, 0x7205, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1106, 0x3108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1106, 0x3344, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1106, 0x3343, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1106, 0x3230, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_DX9_0}, \
- {0x1106, 0x3157, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_PRO_GROUP_A}, \
- {0, 0, 0}
-
-#define i810_PCI_IDS \
- {0x8086, 0x7121, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x8086, 0x7123, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x8086, 0x7125, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x8086, 0x1132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0, 0, 0}
-
-#define savage_PCI_IDS \
- {0x5333, 0x8a20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE3D}, \
- {0x5333, 0x8a21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE3D}, \
- {0x5333, 0x8a22, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE4}, \
- {0x5333, 0x8a23, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE4}, \
- {0x5333, 0x8c10, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE_MX}, \
- {0x5333, 0x8c11, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE_MX}, \
- {0x5333, 0x8c12, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE_MX}, \
- {0x5333, 0x8c13, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE_MX}, \
- {0x5333, 0x8c22, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
- {0x5333, 0x8c24, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
- {0x5333, 0x8c26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
- {0x5333, 0x8c2a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
- {0x5333, 0x8c2b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
- {0x5333, 0x8c2c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
- {0x5333, 0x8c2d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
- {0x5333, 0x8c2e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
- {0x5333, 0x8c2f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
- {0x5333, 0x8a25, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGE}, \
- {0x5333, 0x8a26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGE}, \
- {0x5333, 0x8d01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_TWISTER}, \
- {0x5333, 0x8d02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_TWISTER}, \
- {0x5333, 0x8d03, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGEDDR}, \
- {0x5333, 0x8d04, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGEDDR}, \
- {0, 0, 0}
diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h
index 447e664e49d5..51291983ea44 100644
--- a/include/drm/drm_plane.h
+++ b/include/drm/drm_plane.h
@@ -77,8 +77,8 @@ struct drm_plane_state {
* write this field directly for a driver's implicit fence.
*
* Drivers should store any implicit fence in this from their
- * &drm_plane_helper_funcs.prepare_fb callback. See drm_gem_plane_helper_prepare_fb()
- * and drm_gem_simple_display_pipe_prepare_fb() for suitable helpers.
+ * &drm_plane_helper_funcs.prepare_fb callback. See
+ * drm_gem_plane_helper_prepare_fb() for a suitable helper.
*/
struct dma_fence *fence;
diff --git a/include/drm/drm_print.h b/include/drm/drm_print.h
index a44fb7ef257f..a93a387f8a1a 100644
--- a/include/drm/drm_print.h
+++ b/include/drm/drm_print.h
@@ -521,7 +521,7 @@ __printf(1, 2)
void __drm_err(const char *format, ...);
#if !defined(CONFIG_DRM_USE_DYNAMIC_DEBUG)
-#define __drm_dbg(fmt, ...) ___drm_dbg(NULL, fmt, ##__VA_ARGS__)
+#define __drm_dbg(cat, fmt, ...) ___drm_dbg(NULL, cat, fmt, ##__VA_ARGS__)
#else
#define __drm_dbg(cat, fmt, ...) \
_dynamic_func_call_cls(cat, fmt, ___drm_dbg, \
@@ -605,9 +605,6 @@ void __drm_err(const char *format, ...);
#define drm_dbg_kms_ratelimited(drm, fmt, ...) \
__DRM_DEFINE_DBG_RATELIMITED(KMS, drm, fmt, ## __VA_ARGS__)
-/* NOTE: this is deprecated in favor of drm_dbg_kms_ratelimited(NULL, ...). */
-#define DRM_DEBUG_KMS_RATELIMITED(fmt, ...) drm_dbg_kms_ratelimited(NULL, fmt, ## __VA_ARGS__)
-
/*
* struct drm_device based WARNs
*
diff --git a/include/drm/drm_probe_helper.h b/include/drm/drm_probe_helper.h
index 5880daa14624..4977e0ab72db 100644
--- a/include/drm/drm_probe_helper.h
+++ b/include/drm/drm_probe_helper.h
@@ -35,5 +35,6 @@ int drm_connector_helper_get_modes_from_ddc(struct drm_connector *connector);
int drm_connector_helper_get_modes_fixed(struct drm_connector *connector,
const struct drm_display_mode *fixed_mode);
int drm_connector_helper_get_modes(struct drm_connector *connector);
+int drm_connector_helper_tv_get_modes(struct drm_connector *connector);
#endif
diff --git a/include/drm/drm_simple_kms_helper.h b/include/drm/drm_simple_kms_helper.h
index 2298fe3af4cd..b2486d073763 100644
--- a/include/drm/drm_simple_kms_helper.h
+++ b/include/drm/drm_simple_kms_helper.h
@@ -117,9 +117,9 @@ struct drm_simple_display_pipe_funcs {
* more details.
*
* For GEM drivers who neither have a @prepare_fb nor @cleanup_fb hook
- * set drm_gem_simple_display_pipe_prepare_fb() is called automatically
+ * set, drm_gem_plane_helper_prepare_fb() is called automatically
* to implement this. Other drivers which need additional plane
- * processing can call drm_gem_simple_display_pipe_prepare_fb() from
+ * processing can call drm_gem_plane_helper_prepare_fb() from
* their @prepare_fb hook.
*/
int (*prepare_fb)(struct drm_simple_display_pipe *pipe,
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index ca857ec9e7eb..9db9e5e504ee 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -228,6 +228,13 @@ struct drm_sched_entity {
*/
struct rb_node rb_tree_node;
+ /**
+ * @elapsed_ns:
+ *
+ * Records the amount of time where jobs from this entity were active
+ * on the GPU.
+ */
+ uint64_t elapsed_ns;
};
/**
@@ -538,7 +545,6 @@ void drm_sched_increase_karma_ext(struct drm_sched_job *bad, int type);
bool drm_sched_dependency_optimized(struct dma_fence* fence,
struct drm_sched_entity *entity);
void drm_sched_fault(struct drm_gpu_scheduler *sched);
-void drm_sched_job_kickout(struct drm_sched_job *s_job);
void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
struct drm_sched_entity *entity);
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo.h
index 44a538ee5e2a..8b113c384236 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo.h
@@ -32,28 +32,24 @@
#define _TTM_BO_API_H_
#include <drm/drm_gem.h>
-#include <drm/drm_vma_manager.h>
+
#include <linux/kref.h>
#include <linux/list.h>
-#include <linux/wait.h>
-#include <linux/mutex.h>
-#include <linux/mm.h>
-#include <linux/bitmap.h>
-#include <linux/dma-resv.h>
-
-#include "ttm_resource.h"
-struct ttm_global;
+#include "ttm_device.h"
-struct ttm_device;
+/* Default number of pre-faulted pages in the TTM fault handler */
+#define TTM_BO_VM_NUM_PREFAULT 16
struct iosys_map;
-struct drm_mm_node;
-
+struct ttm_global;
+struct ttm_device;
struct ttm_placement;
-
struct ttm_place;
+struct ttm_resource;
+struct ttm_resource_manager;
+struct ttm_tt;
/**
* enum ttm_bo_type
@@ -68,15 +64,12 @@ struct ttm_place;
* @ttm_bo_type_sg: Buffer made from dmabuf sg table shared with another
* driver.
*/
-
enum ttm_bo_type {
ttm_bo_type_device,
ttm_bo_type_kernel,
ttm_bo_type_sg
};
-struct ttm_tt;
-
/**
* struct ttm_buffer_object
*
@@ -85,18 +78,11 @@ struct ttm_tt;
* @type: The bo type.
* @page_alignment: Page alignment.
* @destroy: Destruction function. If NULL, kfree is used.
- * @num_pages: Actual number of pages.
* @kref: Reference count of this buffer object. When this refcount reaches
* zero, the object is destroyed or put on the delayed delete list.
- * @mem: structure describing current placement.
+ * @resource: structure describing current placement.
* @ttm: TTM structure holding system pages.
- * @evicted: Whether the object was evicted without user-space knowing.
* @deleted: True if the object is only a zombie and already deleted.
- * @ddestroy: List head for the delayed destroy list.
- * @swap: List head for swap LRU list.
- * @offset: The current GPU offset, which can have different meanings
- * depending on the memory type. For SYSTEM type memory, it should be 0.
- * @cur_placement: Hint of current placement.
*
* Base class for TTM buffer object, that deals with data placement and CPU
* mappings. GPU mappings are really up to the driver, but for simpler GPUs
@@ -109,52 +95,43 @@ struct ttm_tt;
* The destroy member, the API visibility of this object makes it possible
* to derive driver specific types.
*/
-
struct ttm_buffer_object {
struct drm_gem_object base;
- /**
+ /*
* Members constant at init.
*/
-
struct ttm_device *bdev;
enum ttm_bo_type type;
uint32_t page_alignment;
void (*destroy) (struct ttm_buffer_object *);
- /**
+ /*
* Members not needing protection.
*/
struct kref kref;
- /**
+ /*
* Members protected by the bo::resv::reserved lock.
*/
-
struct ttm_resource *resource;
struct ttm_tt *ttm;
bool deleted;
struct ttm_lru_bulk_move *bulk_move;
+ unsigned priority;
+ unsigned pin_count;
/**
- * Members protected by the bdev::lru_lock.
- */
-
- struct list_head ddestroy;
-
- /**
- * Members protected by a bo reservation.
+ * @delayed_delete: Work item used when we can't delete the BO
+ * immediately
*/
-
- unsigned priority;
- unsigned pin_count;
+ struct work_struct delayed_delete;
/**
* Special members that are protected by the reserve lock
* and the bo::lock when written to. Can be read with
* either of these locks held.
*/
-
struct sg_table *sg;
};
@@ -170,7 +147,6 @@ struct ttm_buffer_object {
* mapping can either be an ioremap, a vmap, a kmap or part of a
* premapped region.
*/
-
#define TTM_BO_MAP_IOMEM_MASK 0x80
struct ttm_bo_kmap_obj {
void *virtual;
@@ -238,95 +214,120 @@ ttm_bo_get_unless_zero(struct ttm_buffer_object *bo)
}
/**
- * ttm_bo_wait - wait for buffer idle.
+ * ttm_bo_reserve:
+ *
+ * @bo: A pointer to a struct ttm_buffer_object.
+ * @interruptible: Sleep interruptible if waiting.
+ * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
+ * @ticket: ticket used to acquire the ww_mutex.
*
- * @bo: The buffer object.
- * @interruptible: Use interruptible wait.
- * @no_wait: Return immediately if buffer is busy.
+ * Locks a buffer object for validation. (Or prevents other processes from
+ * locking it for validation), while taking a number of measures to prevent
+ * deadlocks.
*
- * This function must be called with the bo::mutex held, and makes
- * sure any previous rendering to the buffer is completed.
- * Note: It might be necessary to block validations before the
- * wait by reserving the buffer.
- * Returns -EBUSY if no_wait is true and the buffer is busy.
- * Returns -ERESTARTSYS if interrupted by a signal.
+ * Returns:
+ * -EDEADLK: The reservation may cause a deadlock.
+ * Release all buffer reservations, wait for @bo to become unreserved and
+ * try again.
+ * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
+ * a signal. Release all buffer reservations and return to user-space.
+ * -EBUSY: The function needed to sleep, but @no_wait was true
+ * -EALREADY: Bo already reserved using @ticket. This error code will only
+ * be returned if @use_ticket is set to true.
*/
-int ttm_bo_wait(struct ttm_buffer_object *bo, bool interruptible, bool no_wait);
-
-static inline int ttm_bo_wait_ctx(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx)
+static inline int ttm_bo_reserve(struct ttm_buffer_object *bo,
+ bool interruptible, bool no_wait,
+ struct ww_acquire_ctx *ticket)
{
- return ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
-}
+ int ret = 0;
-/**
- * ttm_bo_validate
- *
- * @bo: The buffer object.
- * @placement: Proposed placement for the buffer object.
- * @ctx: validation parameters.
- *
- * Changes placement and caching policy of the buffer object
- * according proposed placement.
- * Returns
- * -EINVAL on invalid proposed placement.
- * -ENOMEM on out-of-memory condition.
- * -EBUSY if no_wait is true and buffer busy.
- * -ERESTARTSYS if interrupted by a signal.
- */
-int ttm_bo_validate(struct ttm_buffer_object *bo,
- struct ttm_placement *placement,
- struct ttm_operation_ctx *ctx);
+ if (no_wait) {
+ bool success;
+
+ if (WARN_ON(ticket))
+ return -EBUSY;
+
+ success = dma_resv_trylock(bo->base.resv);
+ return success ? 0 : -EBUSY;
+ }
+
+ if (interruptible)
+ ret = dma_resv_lock_interruptible(bo->base.resv, ticket);
+ else
+ ret = dma_resv_lock(bo->base.resv, ticket);
+ if (ret == -EINTR)
+ return -ERESTARTSYS;
+ return ret;
+}
/**
- * ttm_bo_put
- *
- * @bo: The buffer object.
- *
- * Unreference a buffer object.
+ * ttm_bo_reserve_slowpath:
+ * @bo: A pointer to a struct ttm_buffer_object.
+ * @interruptible: Sleep interruptible if waiting.
+ * @sequence: Set (@bo)->sequence to this value after lock
+ *
+ * This is called after ttm_bo_reserve returns -EAGAIN and we backed off
+ * from all our other reservations. Because there are no other reservations
+ * held by us, this function cannot deadlock any more.
*/
-void ttm_bo_put(struct ttm_buffer_object *bo);
+static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
+ bool interruptible,
+ struct ww_acquire_ctx *ticket)
+{
+ if (interruptible) {
+ int ret = dma_resv_lock_slow_interruptible(bo->base.resv,
+ ticket);
+ if (ret == -EINTR)
+ ret = -ERESTARTSYS;
+ return ret;
+ }
+ dma_resv_lock_slow(bo->base.resv, ticket);
+ return 0;
+}
void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo);
-void ttm_bo_set_bulk_move(struct ttm_buffer_object *bo,
- struct ttm_lru_bulk_move *bulk);
-/**
- * ttm_bo_lock_delayed_workqueue
- *
- * Prevent the delayed workqueue from running.
- * Returns
- * True if the workqueue was queued at the time
- */
-int ttm_bo_lock_delayed_workqueue(struct ttm_device *bdev);
+static inline void
+ttm_bo_move_to_lru_tail_unlocked(struct ttm_buffer_object *bo)
+{
+ spin_lock(&bo->bdev->lru_lock);
+ ttm_bo_move_to_lru_tail(bo);
+ spin_unlock(&bo->bdev->lru_lock);
+}
+
+static inline void ttm_bo_assign_mem(struct ttm_buffer_object *bo,
+ struct ttm_resource *new_mem)
+{
+ WARN_ON(bo->resource);
+ bo->resource = new_mem;
+}
/**
- * ttm_bo_unlock_delayed_workqueue
+ * ttm_bo_move_null = assign memory for a buffer object.
+ * @bo: The bo to assign the memory to
+ * @new_mem: The memory to be assigned.
*
- * Allows the delayed workqueue to run.
+ * Assign the memory from new_mem to the memory of the buffer object bo.
*/
-void ttm_bo_unlock_delayed_workqueue(struct ttm_device *bdev, int resched);
+static inline void ttm_bo_move_null(struct ttm_buffer_object *bo,
+ struct ttm_resource *new_mem)
+{
+ ttm_resource_free(bo, &bo->resource);
+ ttm_bo_assign_mem(bo, new_mem);
+}
/**
- * ttm_bo_eviction_valuable
+ * ttm_bo_unreserve
*
- * @bo: The buffer object to evict
- * @place: the placement we need to make room for
+ * @bo: A pointer to a struct ttm_buffer_object.
*
- * Check if it is valuable to evict the BO to make room for the given placement.
+ * Unreserve a previous reservation of @bo.
*/
-bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
- const struct ttm_place *place);
-
-int ttm_bo_init_reserved(struct ttm_device *bdev, struct ttm_buffer_object *bo,
- enum ttm_bo_type type, struct ttm_placement *placement,
- uint32_t alignment, struct ttm_operation_ctx *ctx,
- struct sg_table *sg, struct dma_resv *resv,
- void (*destroy) (struct ttm_buffer_object *));
-int ttm_bo_init_validate(struct ttm_device *bdev, struct ttm_buffer_object *bo,
- enum ttm_bo_type type, struct ttm_placement *placement,
- uint32_t alignment, bool interruptible,
- struct sg_table *sg, struct dma_resv *resv,
- void (*destroy) (struct ttm_buffer_object *));
+static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo)
+{
+ ttm_bo_move_to_lru_tail_unlocked(bo);
+ dma_resv_unlock(bo->base.resv);
+}
/**
* ttm_kmap_obj_virtual
@@ -346,126 +347,83 @@ static inline void *ttm_kmap_obj_virtual(struct ttm_bo_kmap_obj *map,
return map->virtual;
}
-/**
- * ttm_bo_kmap
- *
- * @bo: The buffer object.
- * @start_page: The first page to map.
- * @num_pages: Number of pages to map.
- * @map: pointer to a struct ttm_bo_kmap_obj representing the map.
- *
- * Sets up a kernel virtual mapping, using ioremap, vmap or kmap to the
- * data in the buffer object. The ttm_kmap_obj_virtual function can then be
- * used to obtain a virtual address to the data.
- *
- * Returns
- * -ENOMEM: Out of memory.
- * -EINVAL: Invalid range.
- */
+int ttm_bo_wait_ctx(struct ttm_buffer_object *bo,
+ struct ttm_operation_ctx *ctx);
+int ttm_bo_validate(struct ttm_buffer_object *bo,
+ struct ttm_placement *placement,
+ struct ttm_operation_ctx *ctx);
+void ttm_bo_put(struct ttm_buffer_object *bo);
+void ttm_bo_set_bulk_move(struct ttm_buffer_object *bo,
+ struct ttm_lru_bulk_move *bulk);
+int ttm_bo_lock_delayed_workqueue(struct ttm_device *bdev);
+void ttm_bo_unlock_delayed_workqueue(struct ttm_device *bdev, int resched);
+bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
+ const struct ttm_place *place);
+int ttm_bo_init_reserved(struct ttm_device *bdev, struct ttm_buffer_object *bo,
+ enum ttm_bo_type type, struct ttm_placement *placement,
+ uint32_t alignment, struct ttm_operation_ctx *ctx,
+ struct sg_table *sg, struct dma_resv *resv,
+ void (*destroy)(struct ttm_buffer_object *));
+int ttm_bo_init_validate(struct ttm_device *bdev, struct ttm_buffer_object *bo,
+ enum ttm_bo_type type, struct ttm_placement *placement,
+ uint32_t alignment, bool interruptible,
+ struct sg_table *sg, struct dma_resv *resv,
+ void (*destroy)(struct ttm_buffer_object *));
int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page,
unsigned long num_pages, struct ttm_bo_kmap_obj *map);
-
-/**
- * ttm_bo_kunmap
- *
- * @map: Object describing the map to unmap.
- *
- * Unmaps a kernel map set up by ttm_bo_kmap.
- */
void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map);
-
-/**
- * ttm_bo_vmap
- *
- * @bo: The buffer object.
- * @map: pointer to a struct iosys_map representing the map.
- *
- * Sets up a kernel virtual mapping, using ioremap or vmap to the
- * data in the buffer object. The parameter @map returns the virtual
- * address as struct iosys_map. Unmap the buffer with ttm_bo_vunmap().
- *
- * Returns
- * -ENOMEM: Out of memory.
- * -EINVAL: Invalid range.
- */
int ttm_bo_vmap(struct ttm_buffer_object *bo, struct iosys_map *map);
-
-/**
- * ttm_bo_vunmap
- *
- * @bo: The buffer object.
- * @map: Object describing the map to unmap.
- *
- * Unmaps a kernel map set up by ttm_bo_vmap().
- */
void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct iosys_map *map);
-
-/**
- * ttm_bo_mmap_obj - mmap memory backed by a ttm buffer object.
- *
- * @vma: vma as input from the fbdev mmap method.
- * @bo: The bo backing the address space.
- *
- * Maps a buffer object.
- */
int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo);
-
-/**
- * ttm_bo_io
- *
- * @bdev: Pointer to the struct ttm_device.
- * @filp: Pointer to the struct file attempting to read / write.
- * @wbuf: User-space pointer to address of buffer to write. NULL on read.
- * @rbuf: User-space pointer to address of buffer to read into.
- * Null on write.
- * @count: Number of bytes to read / write.
- * @f_pos: Pointer to current file position.
- * @write: 1 for read, 0 for write.
- *
- * This function implements read / write into ttm buffer objects, and is
- * intended to
- * be called from the fops::read and fops::write method.
- * Returns:
- * See man (2) write, man(2) read. In particular,
- * the function may return -ERESTARTSYS if
- * interrupted by a signal.
- */
-ssize_t ttm_bo_io(struct ttm_device *bdev, struct file *filp,
- const char __user *wbuf, char __user *rbuf,
- size_t count, loff_t *f_pos, bool write);
-
int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
gfp_t gfp_flags);
-
void ttm_bo_pin(struct ttm_buffer_object *bo);
void ttm_bo_unpin(struct ttm_buffer_object *bo);
-
int ttm_mem_evict_first(struct ttm_device *bdev,
struct ttm_resource_manager *man,
const struct ttm_place *place,
struct ttm_operation_ctx *ctx,
struct ww_acquire_ctx *ticket);
-
-/* Default number of pre-faulted pages in the TTM fault handler */
-#define TTM_BO_VM_NUM_PREFAULT 16
-
vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo,
struct vm_fault *vmf);
-
vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
pgprot_t prot,
pgoff_t num_prefault);
-
vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf);
-
void ttm_bo_vm_open(struct vm_area_struct *vma);
-
void ttm_bo_vm_close(struct vm_area_struct *vma);
-
int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
void *buf, int len, int write);
-bool ttm_bo_delayed_delete(struct ttm_device *bdev, bool remove_all);
-
vm_fault_t ttm_bo_vm_dummy_page(struct vm_fault *vmf, pgprot_t prot);
+int ttm_bo_mem_space(struct ttm_buffer_object *bo,
+ struct ttm_placement *placement,
+ struct ttm_resource **mem,
+ struct ttm_operation_ctx *ctx);
+
+void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
+/*
+ * ttm_bo_util.c
+ */
+int ttm_mem_io_reserve(struct ttm_device *bdev,
+ struct ttm_resource *mem);
+void ttm_mem_io_free(struct ttm_device *bdev,
+ struct ttm_resource *mem);
+void ttm_move_memcpy(bool clear, u32 num_pages,
+ struct ttm_kmap_iter *dst_iter,
+ struct ttm_kmap_iter *src_iter);
+int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
+ struct ttm_operation_ctx *ctx,
+ struct ttm_resource *new_mem);
+int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
+ struct dma_fence *fence, bool evict,
+ bool pipeline,
+ struct ttm_resource *new_mem);
+void ttm_bo_move_sync_cleanup(struct ttm_buffer_object *bo,
+ struct ttm_resource *new_mem);
+int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo);
+pgprot_t ttm_io_prot(struct ttm_buffer_object *bo, struct ttm_resource *res,
+ pgprot_t tmp);
+void ttm_bo_tt_destroy(struct ttm_buffer_object *bo);
+
#endif
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
deleted file mode 100644
index 1afa891f488a..000000000000
--- a/include/drm/ttm/ttm_bo_driver.h
+++ /dev/null
@@ -1,303 +0,0 @@
-/**************************************************************************
- *
- * Copyright (c) 2006-2009 Vmware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-/*
- * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
- */
-#ifndef _TTM_BO_DRIVER_H_
-#define _TTM_BO_DRIVER_H_
-
-#include <drm/drm_mm.h>
-#include <drm/drm_vma_manager.h>
-#include <linux/workqueue.h>
-#include <linux/fs.h>
-#include <linux/spinlock.h>
-#include <linux/dma-resv.h>
-
-#include <drm/ttm/ttm_device.h>
-
-#include "ttm_bo_api.h"
-#include "ttm_kmap_iter.h"
-#include "ttm_placement.h"
-#include "ttm_tt.h"
-#include "ttm_pool.h"
-
-/*
- * ttm_bo.c
- */
-
-/**
- * ttm_bo_mem_space
- *
- * @bo: Pointer to a struct ttm_buffer_object. the data of which
- * we want to allocate space for.
- * @proposed_placement: Proposed new placement for the buffer object.
- * @mem: A struct ttm_resource.
- * @interruptible: Sleep interruptible when sliping.
- * @no_wait_gpu: Return immediately if the GPU is busy.
- *
- * Allocate memory space for the buffer object pointed to by @bo, using
- * the placement flags in @mem, potentially evicting other idle buffer objects.
- * This function may sleep while waiting for space to become available.
- * Returns:
- * -EBUSY: No space available (only if no_wait == 1).
- * -ENOMEM: Could not allocate memory for the buffer object, either due to
- * fragmentation or concurrent allocators.
- * -ERESTARTSYS: An interruptible sleep was interrupted by a signal.
- */
-int ttm_bo_mem_space(struct ttm_buffer_object *bo,
- struct ttm_placement *placement,
- struct ttm_resource **mem,
- struct ttm_operation_ctx *ctx);
-
-/**
- * ttm_bo_unmap_virtual
- *
- * @bo: tear down the virtual mappings for this BO
- */
-void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
-
-/**
- * ttm_bo_reserve:
- *
- * @bo: A pointer to a struct ttm_buffer_object.
- * @interruptible: Sleep interruptible if waiting.
- * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
- * @ticket: ticket used to acquire the ww_mutex.
- *
- * Locks a buffer object for validation. (Or prevents other processes from
- * locking it for validation), while taking a number of measures to prevent
- * deadlocks.
- *
- * Returns:
- * -EDEADLK: The reservation may cause a deadlock.
- * Release all buffer reservations, wait for @bo to become unreserved and
- * try again.
- * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
- * a signal. Release all buffer reservations and return to user-space.
- * -EBUSY: The function needed to sleep, but @no_wait was true
- * -EALREADY: Bo already reserved using @ticket. This error code will only
- * be returned if @use_ticket is set to true.
- */
-static inline int ttm_bo_reserve(struct ttm_buffer_object *bo,
- bool interruptible, bool no_wait,
- struct ww_acquire_ctx *ticket)
-{
- int ret;
-
- if (no_wait) {
- bool success;
- if (WARN_ON(ticket))
- return -EBUSY;
-
- success = dma_resv_trylock(bo->base.resv);
- return success ? 0 : -EBUSY;
- }
-
- if (interruptible)
- ret = dma_resv_lock_interruptible(bo->base.resv, ticket);
- else
- ret = dma_resv_lock(bo->base.resv, ticket);
- if (ret == -EINTR)
- return -ERESTARTSYS;
- return ret;
-}
-
-/**
- * ttm_bo_reserve_slowpath:
- * @bo: A pointer to a struct ttm_buffer_object.
- * @interruptible: Sleep interruptible if waiting.
- * @sequence: Set (@bo)->sequence to this value after lock
- *
- * This is called after ttm_bo_reserve returns -EAGAIN and we backed off
- * from all our other reservations. Because there are no other reservations
- * held by us, this function cannot deadlock any more.
- */
-static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
- bool interruptible,
- struct ww_acquire_ctx *ticket)
-{
- if (interruptible) {
- int ret = dma_resv_lock_slow_interruptible(bo->base.resv,
- ticket);
- if (ret == -EINTR)
- ret = -ERESTARTSYS;
- return ret;
- }
- dma_resv_lock_slow(bo->base.resv, ticket);
- return 0;
-}
-
-static inline void
-ttm_bo_move_to_lru_tail_unlocked(struct ttm_buffer_object *bo)
-{
- spin_lock(&bo->bdev->lru_lock);
- ttm_bo_move_to_lru_tail(bo);
- spin_unlock(&bo->bdev->lru_lock);
-}
-
-static inline void ttm_bo_assign_mem(struct ttm_buffer_object *bo,
- struct ttm_resource *new_mem)
-{
- WARN_ON(bo->resource);
- bo->resource = new_mem;
-}
-
-/**
- * ttm_bo_move_null = assign memory for a buffer object.
- * @bo: The bo to assign the memory to
- * @new_mem: The memory to be assigned.
- *
- * Assign the memory from new_mem to the memory of the buffer object bo.
- */
-static inline void ttm_bo_move_null(struct ttm_buffer_object *bo,
- struct ttm_resource *new_mem)
-{
- ttm_resource_free(bo, &bo->resource);
- ttm_bo_assign_mem(bo, new_mem);
-}
-
-/**
- * ttm_bo_unreserve
- *
- * @bo: A pointer to a struct ttm_buffer_object.
- *
- * Unreserve a previous reservation of @bo.
- */
-static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo)
-{
- ttm_bo_move_to_lru_tail_unlocked(bo);
- dma_resv_unlock(bo->base.resv);
-}
-
-/*
- * ttm_bo_util.c
- */
-int ttm_mem_io_reserve(struct ttm_device *bdev,
- struct ttm_resource *mem);
-void ttm_mem_io_free(struct ttm_device *bdev,
- struct ttm_resource *mem);
-
-/**
- * ttm_bo_move_memcpy
- *
- * @bo: A pointer to a struct ttm_buffer_object.
- * @interruptible: Sleep interruptible if waiting.
- * @no_wait_gpu: Return immediately if the GPU is busy.
- * @new_mem: struct ttm_resource indicating where to move.
- *
- * Fallback move function for a mappable buffer object in mappable memory.
- * The function will, if successful,
- * free any old aperture space, and set (@new_mem)->mm_node to NULL,
- * and update the (@bo)->mem placement flags. If unsuccessful, the old
- * data remains untouched, and it's up to the caller to free the
- * memory space indicated by @new_mem.
- * Returns:
- * !0: Failure.
- */
-
-int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
- struct ttm_operation_ctx *ctx,
- struct ttm_resource *new_mem);
-
-/**
- * ttm_bo_move_accel_cleanup.
- *
- * @bo: A pointer to a struct ttm_buffer_object.
- * @fence: A fence object that signals when moving is complete.
- * @evict: This is an evict move. Don't return until the buffer is idle.
- * @pipeline: evictions are to be pipelined.
- * @new_mem: struct ttm_resource indicating where to move.
- *
- * Accelerated move function to be called when an accelerated move
- * has been scheduled. The function will create a new temporary buffer object
- * representing the old placement, and put the sync object on both buffer
- * objects. After that the newly created buffer object is unref'd to be
- * destroyed when the move is complete. This will help pipeline
- * buffer moves.
- */
-int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
- struct dma_fence *fence, bool evict,
- bool pipeline,
- struct ttm_resource *new_mem);
-
-/**
- * ttm_bo_move_sync_cleanup.
- *
- * @bo: A pointer to a struct ttm_buffer_object.
- * @new_mem: struct ttm_resource indicating where to move.
- *
- * Special case of ttm_bo_move_accel_cleanup where the bo is guaranteed
- * by the caller to be idle. Typically used after memcpy buffer moves.
- */
-void ttm_bo_move_sync_cleanup(struct ttm_buffer_object *bo,
- struct ttm_resource *new_mem);
-
-/**
- * ttm_bo_pipeline_gutting.
- *
- * @bo: A pointer to a struct ttm_buffer_object.
- *
- * Pipelined gutting a BO of its backing store.
- */
-int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo);
-
-/**
- * ttm_io_prot
- *
- * bo: ttm buffer object
- * res: ttm resource object
- * @tmp: Page protection flag for a normal, cached mapping.
- *
- * Utility function that returns the pgprot_t that should be used for
- * setting up a PTE with the caching model indicated by @c_state.
- */
-pgprot_t ttm_io_prot(struct ttm_buffer_object *bo, struct ttm_resource *res,
- pgprot_t tmp);
-
-/**
- * ttm_bo_tt_bind
- *
- * Bind the object tt to a memory resource.
- */
-int ttm_bo_tt_bind(struct ttm_buffer_object *bo, struct ttm_resource *mem);
-
-/**
- * ttm_bo_tt_destroy.
- */
-void ttm_bo_tt_destroy(struct ttm_buffer_object *bo);
-
-void ttm_move_memcpy(bool clear,
- u32 num_pages,
- struct ttm_kmap_iter *dst_iter,
- struct ttm_kmap_iter *src_iter);
-
-struct ttm_kmap_iter *
-ttm_kmap_iter_iomap_init(struct ttm_kmap_iter_iomap *iter_io,
- struct io_mapping *iomap,
- struct sg_table *st,
- resource_size_t start);
-#endif
diff --git a/include/drm/ttm/ttm_device.h b/include/drm/ttm/ttm_device.h
index 95b3c04b1ab9..4f3e81eac6f3 100644
--- a/include/drm/ttm/ttm_device.h
+++ b/include/drm/ttm/ttm_device.h
@@ -252,11 +252,6 @@ struct ttm_device {
spinlock_t lru_lock;
/**
- * @ddestroy: Destroyed but not yet cleaned up buffer objects.
- */
- struct list_head ddestroy;
-
- /**
* @pinned: Buffer objects which are pinned and so not on any LRU list.
*/
struct list_head pinned;
@@ -270,7 +265,7 @@ struct ttm_device {
/**
* @wq: Work queue structure for the delayed delete workqueue.
*/
- struct delayed_work wq;
+ struct workqueue_struct *wq;
};
int ttm_global_swapout(struct ttm_operation_ctx *ctx, gfp_t gfp_flags);
diff --git a/include/drm/ttm/ttm_execbuf_util.h b/include/drm/ttm/ttm_execbuf_util.h
index a99d7fdf2964..03aca29d3ce4 100644
--- a/include/drm/ttm/ttm_execbuf_util.h
+++ b/include/drm/ttm/ttm_execbuf_util.h
@@ -33,7 +33,9 @@
#include <linux/list.h>
-#include "ttm_bo_api.h"
+struct ww_acquire_ctx;
+struct dma_fence;
+struct ttm_buffer_object;
/**
* struct ttm_validate_buffer
diff --git a/include/dt-bindings/arm/qcom,ids.h b/include/dt-bindings/arm/qcom,ids.h
index f7aef3f310d7..aa95439708dc 100644
--- a/include/dt-bindings/arm/qcom,ids.h
+++ b/include/dt-bindings/arm/qcom,ids.h
@@ -11,40 +11,70 @@
* The MSM chipset and hardware revision used by Qualcomm bootloaders, DTS for
* older chipsets (qcom,msm-id) and in socinfo driver:
*/
+#define QCOM_ID_MSM8260 70
+#define QCOM_ID_MSM8660 71
+#define QCOM_ID_APQ8060 86
#define QCOM_ID_MSM8960 87
#define QCOM_ID_APQ8064 109
+#define QCOM_ID_MSM8930 116
+#define QCOM_ID_MSM8630 117
+#define QCOM_ID_MSM8230 118
+#define QCOM_ID_APQ8030 119
+#define QCOM_ID_MSM8627 120
+#define QCOM_ID_MSM8227 121
#define QCOM_ID_MSM8660A 122
#define QCOM_ID_MSM8260A 123
#define QCOM_ID_APQ8060A 124
#define QCOM_ID_MSM8974 126
+#define QCOM_ID_MSM8225 127
+#define QCOM_ID_MSM8625 129
#define QCOM_ID_MPQ8064 130
#define QCOM_ID_MSM8960AB 138
#define QCOM_ID_APQ8060AB 139
#define QCOM_ID_MSM8260AB 140
#define QCOM_ID_MSM8660AB 141
+#define QCOM_ID_MSM8930AA 142
+#define QCOM_ID_MSM8630AA 143
+#define QCOM_ID_MSM8230AA 144
#define QCOM_ID_MSM8626 145
#define QCOM_ID_MSM8610 147
#define QCOM_ID_APQ8064AB 153
+#define QCOM_ID_MSM8930AB 154
+#define QCOM_ID_MSM8630AB 155
+#define QCOM_ID_MSM8230AB 156
+#define QCOM_ID_APQ8030AB 157
#define QCOM_ID_MSM8226 158
#define QCOM_ID_MSM8526 159
+#define QCOM_ID_APQ8030AA 160
#define QCOM_ID_MSM8110 161
#define QCOM_ID_MSM8210 162
#define QCOM_ID_MSM8810 163
#define QCOM_ID_MSM8212 164
#define QCOM_ID_MSM8612 165
#define QCOM_ID_MSM8112 166
+#define QCOM_ID_MSM8125 167
#define QCOM_ID_MSM8225Q 168
#define QCOM_ID_MSM8625Q 169
#define QCOM_ID_MSM8125Q 170
#define QCOM_ID_APQ8064AA 172
#define QCOM_ID_APQ8084 178
+#define QCOM_ID_MSM8130 179
+#define QCOM_ID_MSM8130AA 180
+#define QCOM_ID_MSM8130AB 181
+#define QCOM_ID_MSM8627AA 182
+#define QCOM_ID_MSM8227AA 183
#define QCOM_ID_APQ8074 184
#define QCOM_ID_MSM8274 185
#define QCOM_ID_MSM8674 186
+#define QCOM_ID_MDM9635 187
#define QCOM_ID_MSM8974PRO_AC 194
#define QCOM_ID_MSM8126 198
#define QCOM_ID_APQ8026 199
#define QCOM_ID_MSM8926 200
+#define QCOM_ID_IPQ8062 201
+#define QCOM_ID_IPQ8064 202
+#define QCOM_ID_IPQ8066 203
+#define QCOM_ID_IPQ8068 204
#define QCOM_ID_MSM8326 205
#define QCOM_ID_MSM8916 206
#define QCOM_ID_MSM8994 207
@@ -68,34 +98,74 @@
#define QCOM_ID_MSM8510 225
#define QCOM_ID_MSM8512 226
#define QCOM_ID_MSM8936 233
+#define QCOM_ID_MDM9640 234
#define QCOM_ID_MSM8939 239
#define QCOM_ID_APQ8036 240
#define QCOM_ID_APQ8039 241
+#define QCOM_ID_MSM8236 242
+#define QCOM_ID_MSM8636 243
+#define QCOM_ID_MSM8909 245
#define QCOM_ID_MSM8996 246
#define QCOM_ID_APQ8016 247
#define QCOM_ID_MSM8216 248
#define QCOM_ID_MSM8116 249
#define QCOM_ID_MSM8616 250
#define QCOM_ID_MSM8992 251
+#define QCOM_ID_APQ8092 252
#define QCOM_ID_APQ8094 253
+#define QCOM_ID_MSM8209 258
+#define QCOM_ID_MSM8208 259
+#define QCOM_ID_MDM9209 260
+#define QCOM_ID_MDM9309 261
+#define QCOM_ID_MDM9609 262
+#define QCOM_ID_MSM8239 263
+#define QCOM_ID_MSM8952 264
+#define QCOM_ID_APQ8009 265
#define QCOM_ID_MSM8956 266
+#define QCOM_ID_MSM8929 268
+#define QCOM_ID_MSM8629 269
+#define QCOM_ID_MSM8229 270
+#define QCOM_ID_APQ8029 271
+#define QCOM_ID_APQ8056 274
+#define QCOM_ID_MSM8609 275
+#define QCOM_ID_APQ8076 277
#define QCOM_ID_MSM8976 278
+#define QCOM_ID_MDM9650 279
+#define QCOM_ID_IPQ8065 280
+#define QCOM_ID_IPQ8069 281
+#define QCOM_ID_MDM9655 283
+#define QCOM_ID_MDM9250 284
+#define QCOM_ID_MDM9255 285
+#define QCOM_ID_MDM9350 286
+#define QCOM_ID_APQ8052 289
#define QCOM_ID_MDM9607 290
#define QCOM_ID_APQ8096 291
#define QCOM_ID_MSM8998 292
#define QCOM_ID_MSM8953 293
+#define QCOM_ID_MSM8937 294
+#define QCOM_ID_APQ8037 295
#define QCOM_ID_MDM8207 296
#define QCOM_ID_MDM9207 297
#define QCOM_ID_MDM9307 298
#define QCOM_ID_MDM9628 299
+#define QCOM_ID_MSM8909W 300
+#define QCOM_ID_APQ8009W 301
+#define QCOM_ID_MSM8996L 302
+#define QCOM_ID_MSM8917 303
#define QCOM_ID_APQ8053 304
#define QCOM_ID_MSM8996SG 305
+#define QCOM_ID_APQ8017 307
+#define QCOM_ID_MSM8217 308
+#define QCOM_ID_MSM8617 309
#define QCOM_ID_MSM8996AU 310
#define QCOM_ID_APQ8096AU 311
#define QCOM_ID_APQ8096SG 312
+#define QCOM_ID_MSM8940 313
+#define QCOM_ID_SDX201 314
#define QCOM_ID_SDM660 317
#define QCOM_ID_SDM630 318
#define QCOM_ID_APQ8098 319
+#define QCOM_ID_MSM8920 320
#define QCOM_ID_SDM845 321
#define QCOM_ID_MDM9206 322
#define QCOM_ID_IPQ8074 323
@@ -103,6 +173,8 @@
#define QCOM_ID_SDM658 325
#define QCOM_ID_SDA658 326
#define QCOM_ID_SDA630 327
+#define QCOM_ID_MSM8905 331
+#define QCOM_ID_SDX202 333
#define QCOM_ID_SDM450 338
#define QCOM_ID_SM8150 339
#define QCOM_ID_SDA845 341
@@ -114,10 +186,15 @@
#define QCOM_ID_SDM632 349
#define QCOM_ID_SDA632 350
#define QCOM_ID_SDA450 351
+#define QCOM_ID_SDM439 353
+#define QCOM_ID_SDM429 354
#define QCOM_ID_SM8250 356
#define QCOM_ID_SA8155 362
+#define QCOM_ID_SDA439 363
+#define QCOM_ID_SDA429 364
#define QCOM_ID_IPQ8070 375
#define QCOM_ID_IPQ8071 376
+#define QCOM_ID_QM215 386
#define QCOM_ID_IPQ8072A 389
#define QCOM_ID_IPQ8074A 390
#define QCOM_ID_IPQ8076A 391
@@ -127,11 +204,14 @@
#define QCOM_ID_IPQ8071A 396
#define QCOM_ID_IPQ6018 402
#define QCOM_ID_IPQ6028 403
+#define QCOM_ID_SDM429W 416
#define QCOM_ID_SM4250 417
#define QCOM_ID_IPQ6000 421
#define QCOM_ID_IPQ6010 422
#define QCOM_ID_SC7180 425
#define QCOM_ID_SM6350 434
+#define QCOM_ID_QCM2150 436
+#define QCOM_ID_SDA429W 437
#define QCOM_ID_SM8350 439
#define QCOM_ID_SM6115 444
#define QCOM_ID_SC8280XP 449
@@ -155,6 +235,8 @@
#define QCOM_ID_QRU1032 588
#define QCOM_ID_QRU1052 589
#define QCOM_ID_QRU1062 590
+#define QCOM_ID_IPQ5332 592
+#define QCOM_ID_IPQ5322 593
/*
* The board type and revision information, used by Qualcomm bootloaders and
@@ -165,6 +247,7 @@
#define QCOM_BOARD_ID_MTP 8
#define QCOM_BOARD_ID_DRAGONBOARD 10
+#define QCOM_BOARD_ID_QRD 11
#define QCOM_BOARD_ID_SBC 24
#endif /* _DT_BINDINGS_ARM_QCOM_IDS_H */
diff --git a/include/dt-bindings/clock/imx6qdl-clock.h b/include/dt-bindings/clock/imx6qdl-clock.h
index e20c43cc36f6..e5b2a1ba02bc 100644
--- a/include/dt-bindings/clock/imx6qdl-clock.h
+++ b/include/dt-bindings/clock/imx6qdl-clock.h
@@ -273,6 +273,8 @@
#define IMX6QDL_CLK_MMDC_P0_IPG 263
#define IMX6QDL_CLK_DCIC1 264
#define IMX6QDL_CLK_DCIC2 265
-#define IMX6QDL_CLK_END 266
+#define IMX6QDL_CLK_ENET_REF_SEL 266
+#define IMX6QDL_CLK_ENET_REF_PAD 267
+#define IMX6QDL_CLK_END 268
#endif /* __DT_BINDINGS_CLOCK_IMX6QDL_H */
diff --git a/include/dt-bindings/clock/imx6sll-clock.h b/include/dt-bindings/clock/imx6sll-clock.h
index f446710fe63d..494fd0c37fb5 100644
--- a/include/dt-bindings/clock/imx6sll-clock.h
+++ b/include/dt-bindings/clock/imx6sll-clock.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2016 Freescale Semiconductor, Inc.
* Copyright 2017-2018 NXP.
diff --git a/include/dt-bindings/clock/imx6ul-clock.h b/include/dt-bindings/clock/imx6ul-clock.h
index 79094338e6f1..66239ebc0e23 100644
--- a/include/dt-bindings/clock/imx6ul-clock.h
+++ b/include/dt-bindings/clock/imx6ul-clock.h
@@ -256,7 +256,12 @@
#define IMX6UL_CLK_GPIO4 247
#define IMX6UL_CLK_GPIO5 248
#define IMX6UL_CLK_MMDC_P1_IPG 249
+#define IMX6UL_CLK_ENET1_REF_125M 250
+#define IMX6UL_CLK_ENET1_REF_SEL 251
+#define IMX6UL_CLK_ENET1_REF_PAD 252
+#define IMX6UL_CLK_ENET2_REF_SEL 253
+#define IMX6UL_CLK_ENET2_REF_PAD 254
-#define IMX6UL_CLK_END 250
+#define IMX6UL_CLK_END 255
#endif /* __DT_BINDINGS_CLOCK_IMX6UL_H */
diff --git a/include/dt-bindings/clock/imx8ulp-clock.h b/include/dt-bindings/clock/imx8ulp-clock.h
index 953ecfe8ebcc..827404fadf5c 100644
--- a/include/dt-bindings/clock/imx8ulp-clock.h
+++ b/include/dt-bindings/clock/imx8ulp-clock.h
@@ -201,7 +201,7 @@
#define IMX8ULP_CLK_SAI7 2
#define IMX8ULP_CLK_SPDIF 3
#define IMX8ULP_CLK_ISI 4
-#define IMX8ULP_CLK_CSI_REGS 5
+#define IMX8ULP_CLK_CSI_REGS 5
#define IMX8ULP_CLK_PCTLD 6
#define IMX8ULP_CLK_CSI 7
#define IMX8ULP_CLK_DSI 8
@@ -212,7 +212,7 @@
#define IMX8ULP_CLK_GPU2D 13
#define IMX8ULP_CLK_GPU3D 14
#define IMX8ULP_CLK_DC_NANO 15
-#define IMX8ULP_CLK_CSI_CLK_UI 16
+#define IMX8ULP_CLK_CSI_CLK_UI 16
#define IMX8ULP_CLK_CSI_CLK_ESC 17
#define IMX8ULP_CLK_RGPIOD 18
#define IMX8ULP_CLK_DMA2_MP 19
diff --git a/include/dt-bindings/clock/loongson,ls2k-clk.h b/include/dt-bindings/clock/loongson,ls2k-clk.h
new file mode 100644
index 000000000000..db1e27e792ff
--- /dev/null
+++ b/include/dt-bindings/clock/loongson,ls2k-clk.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Author: Yinbo Zhu <zhuyinbo@loongson.cn>
+ * Copyright (C) 2022-2023 Loongson Technology Corporation Limited
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_LOONGSON2_H
+#define __DT_BINDINGS_CLOCK_LOONGSON2_H
+
+#define LOONGSON2_REF_100M 0
+#define LOONGSON2_NODE_PLL 1
+#define LOONGSON2_DDR_PLL 2
+#define LOONGSON2_DC_PLL 3
+#define LOONGSON2_PIX0_PLL 4
+#define LOONGSON2_PIX1_PLL 5
+#define LOONGSON2_NODE_CLK 6
+#define LOONGSON2_HDA_CLK 7
+#define LOONGSON2_GPU_CLK 8
+#define LOONGSON2_DDR_CLK 9
+#define LOONGSON2_GMAC_CLK 10
+#define LOONGSON2_DC_CLK 11
+#define LOONGSON2_APB_CLK 12
+#define LOONGSON2_USB_CLK 13
+#define LOONGSON2_SATA_CLK 14
+#define LOONGSON2_PIX0_CLK 15
+#define LOONGSON2_PIX1_CLK 16
+#define LOONGSON2_CLK_END 17
+
+#endif
diff --git a/include/dt-bindings/clock/mediatek,mt7981-clk.h b/include/dt-bindings/clock/mediatek,mt7981-clk.h
new file mode 100644
index 000000000000..192f8cefb589
--- /dev/null
+++ b/include/dt-bindings/clock/mediatek,mt7981-clk.h
@@ -0,0 +1,215 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021 MediaTek Inc.
+ * Author: Wenzhen.Yu <wenzhen.yu@mediatek.com>
+ * Author: Jianhui Zhao <zhaojh329@gmail.com>
+ * Author: Daniel Golle <daniel@makrotopia.org>
+ */
+
+#ifndef _DT_BINDINGS_CLK_MT7981_H
+#define _DT_BINDINGS_CLK_MT7981_H
+
+/* TOPCKGEN */
+#define CLK_TOP_CB_CKSQ_40M 0
+#define CLK_TOP_CB_M_416M 1
+#define CLK_TOP_CB_M_D2 2
+#define CLK_TOP_CB_M_D3 3
+#define CLK_TOP_M_D3_D2 4
+#define CLK_TOP_CB_M_D4 5
+#define CLK_TOP_CB_M_D8 6
+#define CLK_TOP_M_D8_D2 7
+#define CLK_TOP_CB_MM_720M 8
+#define CLK_TOP_CB_MM_D2 9
+#define CLK_TOP_CB_MM_D3 10
+#define CLK_TOP_CB_MM_D3_D5 11
+#define CLK_TOP_CB_MM_D4 12
+#define CLK_TOP_CB_MM_D6 13
+#define CLK_TOP_MM_D6_D2 14
+#define CLK_TOP_CB_MM_D8 15
+#define CLK_TOP_CB_APLL2_196M 16
+#define CLK_TOP_APLL2_D2 17
+#define CLK_TOP_APLL2_D4 18
+#define CLK_TOP_NET1_2500M 19
+#define CLK_TOP_CB_NET1_D4 20
+#define CLK_TOP_CB_NET1_D5 21
+#define CLK_TOP_NET1_D5_D2 22
+#define CLK_TOP_NET1_D5_D4 23
+#define CLK_TOP_CB_NET1_D8 24
+#define CLK_TOP_NET1_D8_D2 25
+#define CLK_TOP_NET1_D8_D4 26
+#define CLK_TOP_CB_NET2_800M 27
+#define CLK_TOP_CB_NET2_D2 28
+#define CLK_TOP_CB_NET2_D4 29
+#define CLK_TOP_NET2_D4_D2 30
+#define CLK_TOP_NET2_D4_D4 31
+#define CLK_TOP_CB_NET2_D6 32
+#define CLK_TOP_CB_WEDMCU_208M 33
+#define CLK_TOP_CB_SGM_325M 34
+#define CLK_TOP_CKSQ_40M_D2 35
+#define CLK_TOP_CB_RTC_32K 36
+#define CLK_TOP_CB_RTC_32P7K 37
+#define CLK_TOP_USB_TX250M 38
+#define CLK_TOP_FAUD 39
+#define CLK_TOP_NFI1X 40
+#define CLK_TOP_USB_EQ_RX250M 41
+#define CLK_TOP_USB_CDR_CK 42
+#define CLK_TOP_USB_LN0_CK 43
+#define CLK_TOP_SPINFI_BCK 44
+#define CLK_TOP_SPI 45
+#define CLK_TOP_SPIM_MST 46
+#define CLK_TOP_UART_BCK 47
+#define CLK_TOP_PWM_BCK 48
+#define CLK_TOP_I2C_BCK 49
+#define CLK_TOP_PEXTP_TL 50
+#define CLK_TOP_EMMC_208M 51
+#define CLK_TOP_EMMC_400M 52
+#define CLK_TOP_DRAMC_REF 53
+#define CLK_TOP_DRAMC_MD32 54
+#define CLK_TOP_SYSAXI 55
+#define CLK_TOP_SYSAPB 56
+#define CLK_TOP_ARM_DB_MAIN 57
+#define CLK_TOP_AP2CNN_HOST 58
+#define CLK_TOP_NETSYS 59
+#define CLK_TOP_NETSYS_500M 60
+#define CLK_TOP_NETSYS_WED_MCU 61
+#define CLK_TOP_NETSYS_2X 62
+#define CLK_TOP_SGM_325M 63
+#define CLK_TOP_SGM_REG 64
+#define CLK_TOP_F26M 65
+#define CLK_TOP_EIP97B 66
+#define CLK_TOP_USB3_PHY 67
+#define CLK_TOP_AUD 68
+#define CLK_TOP_A1SYS 69
+#define CLK_TOP_AUD_L 70
+#define CLK_TOP_A_TUNER 71
+#define CLK_TOP_U2U3_REF 72
+#define CLK_TOP_U2U3_SYS 73
+#define CLK_TOP_U2U3_XHCI 74
+#define CLK_TOP_USB_FRMCNT 75
+#define CLK_TOP_NFI1X_SEL 76
+#define CLK_TOP_SPINFI_SEL 77
+#define CLK_TOP_SPI_SEL 78
+#define CLK_TOP_SPIM_MST_SEL 79
+#define CLK_TOP_UART_SEL 80
+#define CLK_TOP_PWM_SEL 81
+#define CLK_TOP_I2C_SEL 82
+#define CLK_TOP_PEXTP_TL_SEL 83
+#define CLK_TOP_EMMC_208M_SEL 84
+#define CLK_TOP_EMMC_400M_SEL 85
+#define CLK_TOP_F26M_SEL 86
+#define CLK_TOP_DRAMC_SEL 87
+#define CLK_TOP_DRAMC_MD32_SEL 88
+#define CLK_TOP_SYSAXI_SEL 89
+#define CLK_TOP_SYSAPB_SEL 90
+#define CLK_TOP_ARM_DB_MAIN_SEL 91
+#define CLK_TOP_AP2CNN_HOST_SEL 92
+#define CLK_TOP_NETSYS_SEL 93
+#define CLK_TOP_NETSYS_500M_SEL 94
+#define CLK_TOP_NETSYS_MCU_SEL 95
+#define CLK_TOP_NETSYS_2X_SEL 96
+#define CLK_TOP_SGM_325M_SEL 97
+#define CLK_TOP_SGM_REG_SEL 98
+#define CLK_TOP_EIP97B_SEL 99
+#define CLK_TOP_USB3_PHY_SEL 100
+#define CLK_TOP_AUD_SEL 101
+#define CLK_TOP_A1SYS_SEL 102
+#define CLK_TOP_AUD_L_SEL 103
+#define CLK_TOP_A_TUNER_SEL 104
+#define CLK_TOP_U2U3_SEL 105
+#define CLK_TOP_U2U3_SYS_SEL 106
+#define CLK_TOP_U2U3_XHCI_SEL 107
+#define CLK_TOP_USB_FRMCNT_SEL 108
+#define CLK_TOP_AUD_I2S_M 109
+
+/* INFRACFG */
+#define CLK_INFRA_66M_MCK 0
+#define CLK_INFRA_UART0_SEL 1
+#define CLK_INFRA_UART1_SEL 2
+#define CLK_INFRA_UART2_SEL 3
+#define CLK_INFRA_SPI0_SEL 4
+#define CLK_INFRA_SPI1_SEL 5
+#define CLK_INFRA_SPI2_SEL 6
+#define CLK_INFRA_PWM1_SEL 7
+#define CLK_INFRA_PWM2_SEL 8
+#define CLK_INFRA_PWM3_SEL 9
+#define CLK_INFRA_PWM_BSEL 10
+#define CLK_INFRA_PCIE_SEL 11
+#define CLK_INFRA_GPT_STA 12
+#define CLK_INFRA_PWM_HCK 13
+#define CLK_INFRA_PWM_STA 14
+#define CLK_INFRA_PWM1_CK 15
+#define CLK_INFRA_PWM2_CK 16
+#define CLK_INFRA_PWM3_CK 17
+#define CLK_INFRA_CQ_DMA_CK 18
+#define CLK_INFRA_AUD_BUS_CK 19
+#define CLK_INFRA_AUD_26M_CK 20
+#define CLK_INFRA_AUD_L_CK 21
+#define CLK_INFRA_AUD_AUD_CK 22
+#define CLK_INFRA_AUD_EG2_CK 23
+#define CLK_INFRA_DRAMC_26M_CK 24
+#define CLK_INFRA_DBG_CK 25
+#define CLK_INFRA_AP_DMA_CK 26
+#define CLK_INFRA_SEJ_CK 27
+#define CLK_INFRA_SEJ_13M_CK 28
+#define CLK_INFRA_THERM_CK 29
+#define CLK_INFRA_I2C0_CK 30
+#define CLK_INFRA_UART0_CK 31
+#define CLK_INFRA_UART1_CK 32
+#define CLK_INFRA_UART2_CK 33
+#define CLK_INFRA_SPI2_CK 34
+#define CLK_INFRA_SPI2_HCK_CK 35
+#define CLK_INFRA_NFI1_CK 36
+#define CLK_INFRA_SPINFI1_CK 37
+#define CLK_INFRA_NFI_HCK_CK 38
+#define CLK_INFRA_SPI0_CK 39
+#define CLK_INFRA_SPI1_CK 40
+#define CLK_INFRA_SPI0_HCK_CK 41
+#define CLK_INFRA_SPI1_HCK_CK 42
+#define CLK_INFRA_FRTC_CK 43
+#define CLK_INFRA_MSDC_CK 44
+#define CLK_INFRA_MSDC_HCK_CK 45
+#define CLK_INFRA_MSDC_133M_CK 46
+#define CLK_INFRA_MSDC_66M_CK 47
+#define CLK_INFRA_ADC_26M_CK 48
+#define CLK_INFRA_ADC_FRC_CK 49
+#define CLK_INFRA_FBIST2FPC_CK 50
+#define CLK_INFRA_I2C_MCK_CK 51
+#define CLK_INFRA_I2C_PCK_CK 52
+#define CLK_INFRA_IUSB_133_CK 53
+#define CLK_INFRA_IUSB_66M_CK 54
+#define CLK_INFRA_IUSB_SYS_CK 55
+#define CLK_INFRA_IUSB_CK 56
+#define CLK_INFRA_IPCIE_CK 57
+#define CLK_INFRA_IPCIE_PIPE_CK 58
+#define CLK_INFRA_IPCIER_CK 59
+#define CLK_INFRA_IPCIEB_CK 60
+
+/* APMIXEDSYS */
+#define CLK_APMIXED_ARMPLL 0
+#define CLK_APMIXED_NET2PLL 1
+#define CLK_APMIXED_MMPLL 2
+#define CLK_APMIXED_SGMPLL 3
+#define CLK_APMIXED_WEDMCUPLL 4
+#define CLK_APMIXED_NET1PLL 5
+#define CLK_APMIXED_MPLL 6
+#define CLK_APMIXED_APLL2 7
+
+/* SGMIISYS_0 */
+#define CLK_SGM0_TX_EN 0
+#define CLK_SGM0_RX_EN 1
+#define CLK_SGM0_CK0_EN 2
+#define CLK_SGM0_CDR_CK0_EN 3
+
+/* SGMIISYS_1 */
+#define CLK_SGM1_TX_EN 0
+#define CLK_SGM1_RX_EN 1
+#define CLK_SGM1_CK1_EN 2
+#define CLK_SGM1_CDR_CK1_EN 3
+
+/* ETHSYS */
+#define CLK_ETH_FE_EN 0
+#define CLK_ETH_GP2_EN 1
+#define CLK_ETH_GP1_EN 2
+#define CLK_ETH_WOCPU0_EN 3
+
+#endif /* _DT_BINDINGS_CLK_MT7981_H */
diff --git a/include/dt-bindings/clock/qcom,gcc-apq8084.h b/include/dt-bindings/clock/qcom,gcc-apq8084.h
index 7f657cf8cc8a..a985248d6332 100644
--- a/include/dt-bindings/clock/qcom,gcc-apq8084.h
+++ b/include/dt-bindings/clock/qcom,gcc-apq8084.h
@@ -339,6 +339,7 @@
#define GCC_PCIE_1_MSTR_AXI_CLK 330
#define GCC_PCIE_1_PIPE_CLK 331
#define GCC_PCIE_1_SLV_AXI_CLK 332
+#define GCC_MMSS_GPLL0_CLK_SRC 333
/* gdscs */
#define USB_HS_HSIC_GDSC 0
diff --git a/include/dt-bindings/clock/qcom,gcc-qcs404.h b/include/dt-bindings/clock/qcom,gcc-qcs404.h
index bc3051543347..126a51898571 100644
--- a/include/dt-bindings/clock/qcom,gcc-qcs404.h
+++ b/include/dt-bindings/clock/qcom,gcc-qcs404.h
@@ -177,4 +177,8 @@
#define GCC_PCIE_0_PIPE_ARES 21
#define GCC_WDSP_RESTART 22
+/* Indexes for GDSCs */
+#define MDSS_GDSC 0
+#define OXILI_GDSC 1
+
#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-sm8350.h b/include/dt-bindings/clock/qcom,gcc-sm8350.h
index f6be3da5f781..529c1b8b0417 100644
--- a/include/dt-bindings/clock/qcom,gcc-sm8350.h
+++ b/include/dt-bindings/clock/qcom,gcc-sm8350.h
@@ -8,7 +8,6 @@
#define _DT_BINDINGS_CLK_QCOM_GCC_SM8350_H
/* GCC HW clocks */
-#define CORE_BI_PLL_TEST_SE 0
#define PCIE_0_PIPE_CLK 1
#define PCIE_1_PIPE_CLK 2
#define UFS_CARD_RX_SYMBOL_0_CLK 3
diff --git a/include/dt-bindings/clock/qcom,gcc-sm8450.h b/include/dt-bindings/clock/qcom,gcc-sm8450.h
index cf1469312c4c..9679410843a0 100644
--- a/include/dt-bindings/clock/qcom,gcc-sm8450.h
+++ b/include/dt-bindings/clock/qcom,gcc-sm8450.h
@@ -8,7 +8,6 @@
#define _DT_BINDINGS_CLK_QCOM_GCC_SM8450_H
/* GCC HW clocks */
-#define CORE_BI_PLL_TEST_SE 0
#define PCIE_0_PIPE_CLK 1
#define PCIE_1_PHY_AUX_CLK 2
#define PCIE_1_PIPE_CLK 3
diff --git a/include/dt-bindings/clock/qcom,rpmcc.h b/include/dt-bindings/clock/qcom,rpmcc.h
index c0ad624e930e..46309c9953b2 100644
--- a/include/dt-bindings/clock/qcom,rpmcc.h
+++ b/include/dt-bindings/clock/qcom,rpmcc.h
@@ -168,5 +168,7 @@
#define RPM_SMD_MSS_CFG_AHB_CLK 122
#define RPM_SMD_MSS_CFG_AHB_A_CLK 123
#define RPM_SMD_BIMC_FREQ_LOG 124
+#define RPM_SMD_LN_BB_CLK_PIN 125
+#define RPM_SMD_LN_BB_A_CLK_PIN 126
#endif
diff --git a/include/dt-bindings/clock/qcom,sa8775p-gcc.h b/include/dt-bindings/clock/qcom,sa8775p-gcc.h
new file mode 100644
index 000000000000..01f54234963d
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sa8775p-gcc.h
@@ -0,0 +1,320 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_SA8775P_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_SA8775P_H
+
+/* GCC clocks */
+#define GCC_GPLL0 0
+#define GCC_GPLL0_OUT_EVEN 1
+#define GCC_GPLL1 2
+#define GCC_GPLL4 3
+#define GCC_GPLL5 4
+#define GCC_GPLL7 5
+#define GCC_GPLL9 6
+#define GCC_AGGRE_NOC_QUPV3_AXI_CLK 7
+#define GCC_AGGRE_UFS_CARD_AXI_CLK 8
+#define GCC_AGGRE_UFS_PHY_AXI_CLK 9
+#define GCC_AGGRE_USB2_PRIM_AXI_CLK 10
+#define GCC_AGGRE_USB3_PRIM_AXI_CLK 11
+#define GCC_AGGRE_USB3_SEC_AXI_CLK 12
+#define GCC_AHB2PHY0_CLK 13
+#define GCC_AHB2PHY2_CLK 14
+#define GCC_AHB2PHY3_CLK 15
+#define GCC_BOOT_ROM_AHB_CLK 16
+#define GCC_CAMERA_AHB_CLK 17
+#define GCC_CAMERA_HF_AXI_CLK 18
+#define GCC_CAMERA_SF_AXI_CLK 19
+#define GCC_CAMERA_THROTTLE_XO_CLK 20
+#define GCC_CAMERA_XO_CLK 21
+#define GCC_CFG_NOC_USB2_PRIM_AXI_CLK 22
+#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 23
+#define GCC_CFG_NOC_USB3_SEC_AXI_CLK 24
+#define GCC_DDRSS_GPU_AXI_CLK 25
+#define GCC_DISP1_AHB_CLK 26
+#define GCC_DISP1_HF_AXI_CLK 27
+#define GCC_DISP1_XO_CLK 28
+#define GCC_DISP_AHB_CLK 29
+#define GCC_DISP_HF_AXI_CLK 30
+#define GCC_DISP_XO_CLK 31
+#define GCC_EDP_REF_CLKREF_EN 32
+#define GCC_EMAC0_AXI_CLK 33
+#define GCC_EMAC0_PHY_AUX_CLK 34
+#define GCC_EMAC0_PHY_AUX_CLK_SRC 35
+#define GCC_EMAC0_PTP_CLK 36
+#define GCC_EMAC0_PTP_CLK_SRC 37
+#define GCC_EMAC0_RGMII_CLK 38
+#define GCC_EMAC0_RGMII_CLK_SRC 39
+#define GCC_EMAC0_SLV_AHB_CLK 40
+#define GCC_EMAC1_AXI_CLK 41
+#define GCC_EMAC1_PHY_AUX_CLK 42
+#define GCC_EMAC1_PHY_AUX_CLK_SRC 43
+#define GCC_EMAC1_PTP_CLK 44
+#define GCC_EMAC1_PTP_CLK_SRC 45
+#define GCC_EMAC1_RGMII_CLK 46
+#define GCC_EMAC1_RGMII_CLK_SRC 47
+#define GCC_EMAC1_SLV_AHB_CLK 48
+#define GCC_GP1_CLK 49
+#define GCC_GP1_CLK_SRC 50
+#define GCC_GP2_CLK 51
+#define GCC_GP2_CLK_SRC 52
+#define GCC_GP3_CLK 53
+#define GCC_GP3_CLK_SRC 54
+#define GCC_GP4_CLK 55
+#define GCC_GP4_CLK_SRC 56
+#define GCC_GP5_CLK 57
+#define GCC_GP5_CLK_SRC 58
+#define GCC_GPU_CFG_AHB_CLK 59
+#define GCC_GPU_GPLL0_CLK_SRC 60
+#define GCC_GPU_GPLL0_DIV_CLK_SRC 61
+#define GCC_GPU_MEMNOC_GFX_CLK 62
+#define GCC_GPU_SNOC_DVM_GFX_CLK 63
+#define GCC_GPU_TCU_THROTTLE_AHB_CLK 64
+#define GCC_GPU_TCU_THROTTLE_CLK 65
+#define GCC_PCIE_0_AUX_CLK 66
+#define GCC_PCIE_0_AUX_CLK_SRC 67
+#define GCC_PCIE_0_CFG_AHB_CLK 68
+#define GCC_PCIE_0_MSTR_AXI_CLK 69
+#define GCC_PCIE_0_PHY_AUX_CLK 70
+#define GCC_PCIE_0_PHY_AUX_CLK_SRC 71
+#define GCC_PCIE_0_PHY_RCHNG_CLK 72
+#define GCC_PCIE_0_PHY_RCHNG_CLK_SRC 73
+#define GCC_PCIE_0_PIPE_CLK 74
+#define GCC_PCIE_0_PIPE_CLK_SRC 75
+#define GCC_PCIE_0_PIPE_DIV_CLK_SRC 76
+#define GCC_PCIE_0_PIPEDIV2_CLK 77
+#define GCC_PCIE_0_SLV_AXI_CLK 78
+#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 79
+#define GCC_PCIE_1_AUX_CLK 80
+#define GCC_PCIE_1_AUX_CLK_SRC 81
+#define GCC_PCIE_1_CFG_AHB_CLK 82
+#define GCC_PCIE_1_MSTR_AXI_CLK 83
+#define GCC_PCIE_1_PHY_AUX_CLK 84
+#define GCC_PCIE_1_PHY_AUX_CLK_SRC 85
+#define GCC_PCIE_1_PHY_RCHNG_CLK 86
+#define GCC_PCIE_1_PHY_RCHNG_CLK_SRC 87
+#define GCC_PCIE_1_PIPE_CLK 88
+#define GCC_PCIE_1_PIPE_CLK_SRC 89
+#define GCC_PCIE_1_PIPE_DIV_CLK_SRC 90
+#define GCC_PCIE_1_PIPEDIV2_CLK 91
+#define GCC_PCIE_1_SLV_AXI_CLK 92
+#define GCC_PCIE_1_SLV_Q2A_AXI_CLK 93
+#define GCC_PCIE_CLKREF_EN 94
+#define GCC_PCIE_THROTTLE_CFG_CLK 95
+#define GCC_PDM2_CLK 96
+#define GCC_PDM2_CLK_SRC 97
+#define GCC_PDM_AHB_CLK 98
+#define GCC_PDM_XO4_CLK 99
+#define GCC_QMIP_CAMERA_NRT_AHB_CLK 100
+#define GCC_QMIP_CAMERA_RT_AHB_CLK 101
+#define GCC_QMIP_DISP1_AHB_CLK 102
+#define GCC_QMIP_DISP1_ROT_AHB_CLK 103
+#define GCC_QMIP_DISP_AHB_CLK 104
+#define GCC_QMIP_DISP_ROT_AHB_CLK 105
+#define GCC_QMIP_VIDEO_CVP_AHB_CLK 106
+#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 107
+#define GCC_QMIP_VIDEO_VCPU_AHB_CLK 108
+#define GCC_QUPV3_WRAP0_CORE_2X_CLK 109
+#define GCC_QUPV3_WRAP0_CORE_CLK 110
+#define GCC_QUPV3_WRAP0_S0_CLK 111
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC 112
+#define GCC_QUPV3_WRAP0_S1_CLK 113
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC 114
+#define GCC_QUPV3_WRAP0_S2_CLK 115
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC 116
+#define GCC_QUPV3_WRAP0_S3_CLK 117
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC 118
+#define GCC_QUPV3_WRAP0_S4_CLK 119
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC 120
+#define GCC_QUPV3_WRAP0_S5_CLK 121
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC 122
+#define GCC_QUPV3_WRAP0_S6_CLK 123
+#define GCC_QUPV3_WRAP0_S6_CLK_SRC 124
+#define GCC_QUPV3_WRAP1_CORE_2X_CLK 125
+#define GCC_QUPV3_WRAP1_CORE_CLK 126
+#define GCC_QUPV3_WRAP1_S0_CLK 127
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC 128
+#define GCC_QUPV3_WRAP1_S1_CLK 129
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC 130
+#define GCC_QUPV3_WRAP1_S2_CLK 131
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC 132
+#define GCC_QUPV3_WRAP1_S3_CLK 133
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC 134
+#define GCC_QUPV3_WRAP1_S4_CLK 135
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC 136
+#define GCC_QUPV3_WRAP1_S5_CLK 137
+#define GCC_QUPV3_WRAP1_S5_CLK_SRC 138
+#define GCC_QUPV3_WRAP1_S6_CLK 139
+#define GCC_QUPV3_WRAP1_S6_CLK_SRC 140
+#define GCC_QUPV3_WRAP2_CORE_2X_CLK 141
+#define GCC_QUPV3_WRAP2_CORE_CLK 142
+#define GCC_QUPV3_WRAP2_S0_CLK 143
+#define GCC_QUPV3_WRAP2_S0_CLK_SRC 144
+#define GCC_QUPV3_WRAP2_S1_CLK 145
+#define GCC_QUPV3_WRAP2_S1_CLK_SRC 146
+#define GCC_QUPV3_WRAP2_S2_CLK 147
+#define GCC_QUPV3_WRAP2_S2_CLK_SRC 148
+#define GCC_QUPV3_WRAP2_S3_CLK 149
+#define GCC_QUPV3_WRAP2_S3_CLK_SRC 150
+#define GCC_QUPV3_WRAP2_S4_CLK 151
+#define GCC_QUPV3_WRAP2_S4_CLK_SRC 152
+#define GCC_QUPV3_WRAP2_S5_CLK 153
+#define GCC_QUPV3_WRAP2_S5_CLK_SRC 154
+#define GCC_QUPV3_WRAP2_S6_CLK 155
+#define GCC_QUPV3_WRAP2_S6_CLK_SRC 156
+#define GCC_QUPV3_WRAP3_CORE_2X_CLK 157
+#define GCC_QUPV3_WRAP3_CORE_CLK 158
+#define GCC_QUPV3_WRAP3_QSPI_CLK 159
+#define GCC_QUPV3_WRAP3_S0_CLK 160
+#define GCC_QUPV3_WRAP3_S0_CLK_SRC 161
+#define GCC_QUPV3_WRAP3_S0_DIV_CLK_SRC 162
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK 163
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK 164
+#define GCC_QUPV3_WRAP_1_M_AHB_CLK 165
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK 166
+#define GCC_QUPV3_WRAP_2_M_AHB_CLK 167
+#define GCC_QUPV3_WRAP_2_S_AHB_CLK 168
+#define GCC_QUPV3_WRAP_3_M_AHB_CLK 169
+#define GCC_QUPV3_WRAP_3_S_AHB_CLK 170
+#define GCC_SDCC1_AHB_CLK 171
+#define GCC_SDCC1_APPS_CLK 172
+#define GCC_SDCC1_APPS_CLK_SRC 173
+#define GCC_SDCC1_ICE_CORE_CLK 174
+#define GCC_SDCC1_ICE_CORE_CLK_SRC 175
+#define GCC_SGMI_CLKREF_EN 176
+#define GCC_TSCSS_AHB_CLK 177
+#define GCC_TSCSS_CNTR_CLK_SRC 178
+#define GCC_TSCSS_ETU_CLK 179
+#define GCC_TSCSS_GLOBAL_CNTR_CLK 180
+#define GCC_UFS_CARD_AHB_CLK 181
+#define GCC_UFS_CARD_AXI_CLK 182
+#define GCC_UFS_CARD_AXI_CLK_SRC 183
+#define GCC_UFS_CARD_ICE_CORE_CLK 184
+#define GCC_UFS_CARD_ICE_CORE_CLK_SRC 185
+#define GCC_UFS_CARD_PHY_AUX_CLK 186
+#define GCC_UFS_CARD_PHY_AUX_CLK_SRC 187
+#define GCC_UFS_CARD_RX_SYMBOL_0_CLK 188
+#define GCC_UFS_CARD_RX_SYMBOL_0_CLK_SRC 189
+#define GCC_UFS_CARD_RX_SYMBOL_1_CLK 190
+#define GCC_UFS_CARD_RX_SYMBOL_1_CLK_SRC 191
+#define GCC_UFS_CARD_TX_SYMBOL_0_CLK 192
+#define GCC_UFS_CARD_TX_SYMBOL_0_CLK_SRC 193
+#define GCC_UFS_CARD_UNIPRO_CORE_CLK 194
+#define GCC_UFS_CARD_UNIPRO_CORE_CLK_SRC 195
+#define GCC_UFS_PHY_AHB_CLK 196
+#define GCC_UFS_PHY_AXI_CLK 197
+#define GCC_UFS_PHY_AXI_CLK_SRC 198
+#define GCC_UFS_PHY_ICE_CORE_CLK 199
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 200
+#define GCC_UFS_PHY_PHY_AUX_CLK 201
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 202
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 203
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK_SRC 204
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK 205
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK_SRC 206
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 207
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK_SRC 208
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK 209
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 210
+#define GCC_USB20_MASTER_CLK 211
+#define GCC_USB20_MASTER_CLK_SRC 212
+#define GCC_USB20_MOCK_UTMI_CLK 213
+#define GCC_USB20_MOCK_UTMI_CLK_SRC 214
+#define GCC_USB20_MOCK_UTMI_POSTDIV_CLK_SRC 215
+#define GCC_USB20_SLEEP_CLK 216
+#define GCC_USB30_PRIM_MASTER_CLK 217
+#define GCC_USB30_PRIM_MASTER_CLK_SRC 218
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK 219
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 220
+#define GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC 221
+#define GCC_USB30_PRIM_SLEEP_CLK 222
+#define GCC_USB30_SEC_MASTER_CLK 223
+#define GCC_USB30_SEC_MASTER_CLK_SRC 224
+#define GCC_USB30_SEC_MOCK_UTMI_CLK 225
+#define GCC_USB30_SEC_MOCK_UTMI_CLK_SRC 226
+#define GCC_USB30_SEC_MOCK_UTMI_POSTDIV_CLK_SRC 227
+#define GCC_USB30_SEC_SLEEP_CLK 228
+#define GCC_USB3_PRIM_PHY_AUX_CLK 229
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 230
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 231
+#define GCC_USB3_PRIM_PHY_PIPE_CLK 232
+#define GCC_USB3_PRIM_PHY_PIPE_CLK_SRC 233
+#define GCC_USB3_SEC_PHY_AUX_CLK 234
+#define GCC_USB3_SEC_PHY_AUX_CLK_SRC 235
+#define GCC_USB3_SEC_PHY_COM_AUX_CLK 236
+#define GCC_USB3_SEC_PHY_PIPE_CLK 237
+#define GCC_USB3_SEC_PHY_PIPE_CLK_SRC 238
+#define GCC_USB_CLKREF_EN 239
+#define GCC_VIDEO_AHB_CLK 240
+#define GCC_VIDEO_AXI0_CLK 241
+#define GCC_VIDEO_AXI1_CLK 242
+#define GCC_VIDEO_XO_CLK 243
+#define GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK 244
+#define GCC_UFS_PHY_AXI_HW_CTL_CLK 245
+#define GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK 246
+#define GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK 247
+#define GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK 248
+
+/* GCC resets */
+#define GCC_CAMERA_BCR 0
+#define GCC_DISPLAY1_BCR 1
+#define GCC_DISPLAY_BCR 2
+#define GCC_EMAC0_BCR 3
+#define GCC_EMAC1_BCR 4
+#define GCC_GPU_BCR 5
+#define GCC_MMSS_BCR 6
+#define GCC_PCIE_0_BCR 7
+#define GCC_PCIE_0_LINK_DOWN_BCR 8
+#define GCC_PCIE_0_NOCSR_COM_PHY_BCR 9
+#define GCC_PCIE_0_PHY_BCR 10
+#define GCC_PCIE_0_PHY_NOCSR_COM_PHY_BCR 11
+#define GCC_PCIE_1_BCR 12
+#define GCC_PCIE_1_LINK_DOWN_BCR 13
+#define GCC_PCIE_1_NOCSR_COM_PHY_BCR 14
+#define GCC_PCIE_1_PHY_BCR 15
+#define GCC_PCIE_1_PHY_NOCSR_COM_PHY_BCR 16
+#define GCC_PDM_BCR 17
+#define GCC_QUPV3_WRAPPER_0_BCR 18
+#define GCC_QUPV3_WRAPPER_1_BCR 19
+#define GCC_QUPV3_WRAPPER_2_BCR 20
+#define GCC_QUPV3_WRAPPER_3_BCR 21
+#define GCC_SDCC1_BCR 22
+#define GCC_TSCSS_BCR 23
+#define GCC_UFS_CARD_BCR 24
+#define GCC_UFS_PHY_BCR 25
+#define GCC_USB20_PRIM_BCR 26
+#define GCC_USB2_PHY_PRIM_BCR 27
+#define GCC_USB2_PHY_SEC_BCR 28
+#define GCC_USB30_PRIM_BCR 29
+#define GCC_USB30_SEC_BCR 30
+#define GCC_USB3_DP_PHY_PRIM_BCR 31
+#define GCC_USB3_DP_PHY_SEC_BCR 32
+#define GCC_USB3_PHY_PRIM_BCR 33
+#define GCC_USB3_PHY_SEC_BCR 34
+#define GCC_USB3_PHY_TERT_BCR 35
+#define GCC_USB3_UNIPHY_MP0_BCR 36
+#define GCC_USB3_UNIPHY_MP1_BCR 37
+#define GCC_USB3PHY_PHY_PRIM_BCR 38
+#define GCC_USB3PHY_PHY_SEC_BCR 39
+#define GCC_USB3UNIPHY_PHY_MP0_BCR 40
+#define GCC_USB3UNIPHY_PHY_MP1_BCR 41
+#define GCC_USB_PHY_CFG_AHB2PHY_BCR 42
+#define GCC_VIDEO_BCR 43
+#define GCC_VIDEO_AXI0_CLK_ARES 44
+#define GCC_VIDEO_AXI1_CLK_ARES 45
+
+/* GCC GDSCs */
+#define PCIE_0_GDSC 0
+#define PCIE_1_GDSC 1
+#define UFS_CARD_GDSC 2
+#define UFS_PHY_GDSC 3
+#define USB20_PRIM_GDSC 4
+#define USB30_PRIM_GDSC 5
+#define USB30_SEC_GDSC 6
+#define EMAC0_GDSC 7
+#define EMAC1_GDSC 8
+
+#endif /* _DT_BINDINGS_CLK_QCOM_GCC_SA8775P_H */
diff --git a/include/dt-bindings/clock/stih416-clks.h b/include/dt-bindings/clock/stih416-clks.h
deleted file mode 100644
index 74302278024e..000000000000
--- a/include/dt-bindings/clock/stih416-clks.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * This header provides constants clk index STMicroelectronics
- * STiH416 SoC.
- */
-#ifndef _CLK_STIH416
-#define _CLK_STIH416
-
-/* CLOCKGEN A0 */
-#define CLK_ICN_REG 0
-#define CLK_ETH1_PHY 4
-
-/* CLOCKGEN A1 */
-#define CLK_ICN_IF_2 0
-#define CLK_GMAC0_PHY 3
-
-#endif
diff --git a/include/dt-bindings/clock/sun20i-d1-ccu.h b/include/dt-bindings/clock/sun20i-d1-ccu.h
index e3ac53315e1a..e143b9929763 100644
--- a/include/dt-bindings/clock/sun20i-d1-ccu.h
+++ b/include/dt-bindings/clock/sun20i-d1-ccu.h
@@ -152,5 +152,7 @@
#define CLK_FANOUT0 142
#define CLK_FANOUT1 143
#define CLK_FANOUT2 144
+#define CLK_BUS_CAN0 145
+#define CLK_BUS_CAN1 146
#endif /* _DT_BINDINGS_CLK_SUN20I_D1_CCU_H_ */
diff --git a/include/dt-bindings/firmware/qcom,scm.h b/include/dt-bindings/firmware/qcom,scm.h
new file mode 100644
index 000000000000..1a4e68fa0744
--- /dev/null
+++ b/include/dt-bindings/firmware/qcom,scm.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Copyright (c) 2010-2015, 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2015 Linaro Ltd.
+ */
+
+#ifndef _DT_BINDINGS_FIRMWARE_QCOM_SCM_H
+#define _DT_BINDINGS_FIRMWARE_QCOM_SCM_H
+
+#define QCOM_SCM_VMID_HLOS 0x3
+#define QCOM_SCM_VMID_MSS_MSA 0xF
+#define QCOM_SCM_VMID_WLAN 0x18
+#define QCOM_SCM_VMID_WLAN_CE 0x19
+#define QCOM_SCM_VMID_NAV 0x2B
+
+#endif
diff --git a/include/dt-bindings/gpio/gpio.h b/include/dt-bindings/gpio/gpio.h
index 5566e58196a2..b5d531237448 100644
--- a/include/dt-bindings/gpio/gpio.h
+++ b/include/dt-bindings/gpio/gpio.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */
/*
* This header provides constants for most GPIO bindings.
*
diff --git a/include/dt-bindings/interconnect/qcom,sa8775p-rpmh.h b/include/dt-bindings/interconnect/qcom,sa8775p-rpmh.h
new file mode 100644
index 000000000000..f21c39d0928e
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,sa8775p-rpmh.h
@@ -0,0 +1,231 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SA8775P_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_SA8775P_H
+
+/* aggre1_noc */
+#define MASTER_QUP_3 0
+#define MASTER_EMAC 1
+#define MASTER_EMAC_1 2
+#define MASTER_SDC 3
+#define MASTER_UFS_MEM 4
+#define MASTER_USB2 5
+#define MASTER_USB3_0 6
+#define MASTER_USB3_1 7
+#define SLAVE_A1NOC_SNOC 8
+
+/* aggre2_noc */
+#define MASTER_QDSS_BAM 0
+#define MASTER_QUP_0 1
+#define MASTER_QUP_1 2
+#define MASTER_QUP_2 3
+#define MASTER_CNOC_A2NOC 4
+#define MASTER_CRYPTO_CORE0 5
+#define MASTER_CRYPTO_CORE1 6
+#define MASTER_IPA 7
+#define MASTER_QDSS_ETR_0 8
+#define MASTER_QDSS_ETR_1 9
+#define MASTER_UFS_CARD 10
+#define SLAVE_A2NOC_SNOC 11
+
+/* clk_virt */
+#define MASTER_QUP_CORE_0 0
+#define MASTER_QUP_CORE_1 1
+#define MASTER_QUP_CORE_2 2
+#define MASTER_QUP_CORE_3 3
+#define SLAVE_QUP_CORE_0 4
+#define SLAVE_QUP_CORE_1 5
+#define SLAVE_QUP_CORE_2 6
+#define SLAVE_QUP_CORE_3 7
+
+/* config_noc */
+#define MASTER_GEM_NOC_CNOC 0
+#define MASTER_GEM_NOC_PCIE_SNOC 1
+#define SLAVE_AHB2PHY_0 2
+#define SLAVE_AHB2PHY_1 3
+#define SLAVE_AHB2PHY_2 4
+#define SLAVE_AHB2PHY_3 5
+#define SLAVE_ANOC_THROTTLE_CFG 6
+#define SLAVE_AOSS 7
+#define SLAVE_APPSS 8
+#define SLAVE_BOOT_ROM 9
+#define SLAVE_CAMERA_CFG 10
+#define SLAVE_CAMERA_NRT_THROTTLE_CFG 11
+#define SLAVE_CAMERA_RT_THROTTLE_CFG 12
+#define SLAVE_CLK_CTL 13
+#define SLAVE_CDSP_CFG 14
+#define SLAVE_CDSP1_CFG 15
+#define SLAVE_RBCPR_CX_CFG 16
+#define SLAVE_RBCPR_MMCX_CFG 17
+#define SLAVE_RBCPR_MX_CFG 18
+#define SLAVE_CPR_NSPCX 19
+#define SLAVE_CRYPTO_0_CFG 20
+#define SLAVE_CX_RDPM 21
+#define SLAVE_DISPLAY_CFG 22
+#define SLAVE_DISPLAY_RT_THROTTLE_CFG 23
+#define SLAVE_DISPLAY1_CFG 24
+#define SLAVE_DISPLAY1_RT_THROTTLE_CFG 25
+#define SLAVE_EMAC_CFG 26
+#define SLAVE_EMAC1_CFG 27
+#define SLAVE_GP_DSP0_CFG 28
+#define SLAVE_GP_DSP1_CFG 29
+#define SLAVE_GPDSP0_THROTTLE_CFG 30
+#define SLAVE_GPDSP1_THROTTLE_CFG 31
+#define SLAVE_GPU_TCU_THROTTLE_CFG 32
+#define SLAVE_GFX3D_CFG 33
+#define SLAVE_HWKM 34
+#define SLAVE_IMEM_CFG 35
+#define SLAVE_IPA_CFG 36
+#define SLAVE_IPC_ROUTER_CFG 37
+#define SLAVE_LPASS 38
+#define SLAVE_LPASS_THROTTLE_CFG 39
+#define SLAVE_MX_RDPM 40
+#define SLAVE_MXC_RDPM 41
+#define SLAVE_PCIE_0_CFG 42
+#define SLAVE_PCIE_1_CFG 43
+#define SLAVE_PCIE_RSC_CFG 44
+#define SLAVE_PCIE_TCU_THROTTLE_CFG 45
+#define SLAVE_PCIE_THROTTLE_CFG 46
+#define SLAVE_PDM 47
+#define SLAVE_PIMEM_CFG 48
+#define SLAVE_PKA_WRAPPER_CFG 49
+#define SLAVE_QDSS_CFG 50
+#define SLAVE_QM_CFG 51
+#define SLAVE_QM_MPU_CFG 52
+#define SLAVE_QUP_0 53
+#define SLAVE_QUP_1 54
+#define SLAVE_QUP_2 55
+#define SLAVE_QUP_3 56
+#define SLAVE_SAIL_THROTTLE_CFG 57
+#define SLAVE_SDC1 58
+#define SLAVE_SECURITY 59
+#define SLAVE_SNOC_THROTTLE_CFG 60
+#define SLAVE_TCSR 61
+#define SLAVE_TLMM 62
+#define SLAVE_TSC_CFG 63
+#define SLAVE_UFS_CARD_CFG 64
+#define SLAVE_UFS_MEM_CFG 65
+#define SLAVE_USB2 66
+#define SLAVE_USB3_0 67
+#define SLAVE_USB3_1 68
+#define SLAVE_VENUS_CFG 69
+#define SLAVE_VENUS_CVP_THROTTLE_CFG 70
+#define SLAVE_VENUS_V_CPU_THROTTLE_CFG 71
+#define SLAVE_VENUS_VCODEC_THROTTLE_CFG 72
+#define SLAVE_DDRSS_CFG 73
+#define SLAVE_GPDSP_NOC_CFG 74
+#define SLAVE_CNOC_MNOC_HF_CFG 75
+#define SLAVE_CNOC_MNOC_SF_CFG 76
+#define SLAVE_PCIE_ANOC_CFG 77
+#define SLAVE_SNOC_CFG 78
+#define SLAVE_BOOT_IMEM 79
+#define SLAVE_IMEM 80
+#define SLAVE_PIMEM 81
+#define SLAVE_PCIE_0 82
+#define SLAVE_PCIE_1 83
+#define SLAVE_QDSS_STM 84
+#define SLAVE_TCU 85
+
+/* dc_noc */
+#define MASTER_CNOC_DC_NOC 0
+#define SLAVE_LLCC_CFG 1
+#define SLAVE_GEM_NOC_CFG 2
+
+/* gem_noc */
+#define MASTER_GPU_TCU 0
+#define MASTER_PCIE_TCU 1
+#define MASTER_SYS_TCU 2
+#define MASTER_APPSS_PROC 3
+#define MASTER_COMPUTE_NOC 4
+#define MASTER_COMPUTE_NOC_1 5
+#define MASTER_GEM_NOC_CFG 6
+#define MASTER_GPDSP_SAIL 7
+#define MASTER_GFX3D 8
+#define MASTER_MNOC_HF_MEM_NOC 9
+#define MASTER_MNOC_SF_MEM_NOC 10
+#define MASTER_ANOC_PCIE_GEM_NOC 11
+#define MASTER_SNOC_GC_MEM_NOC 12
+#define MASTER_SNOC_SF_MEM_NOC 13
+#define SLAVE_GEM_NOC_CNOC 14
+#define SLAVE_LLCC 15
+#define SLAVE_GEM_NOC_PCIE_CNOC 16
+#define SLAVE_SERVICE_GEM_NOC_1 17
+#define SLAVE_SERVICE_GEM_NOC_2 18
+#define SLAVE_SERVICE_GEM_NOC 19
+#define SLAVE_SERVICE_GEM_NOC2 20
+
+/* gpdsp_anoc */
+#define MASTER_DSP0 0
+#define MASTER_DSP1 1
+#define SLAVE_GP_DSP_SAIL_NOC 2
+
+/* lpass_ag_noc */
+#define MASTER_CNOC_LPASS_AG_NOC 0
+#define MASTER_LPASS_PROC 1
+#define SLAVE_LPASS_CORE_CFG 2
+#define SLAVE_LPASS_LPI_CFG 3
+#define SLAVE_LPASS_MPU_CFG 4
+#define SLAVE_LPASS_TOP_CFG 5
+#define SLAVE_LPASS_SNOC 6
+#define SLAVE_SERVICES_LPASS_AML_NOC 7
+#define SLAVE_SERVICE_LPASS_AG_NOC 8
+
+/* mc_virt */
+#define MASTER_LLCC 0
+#define SLAVE_EBI1 1
+
+/*mmss_noc */
+#define MASTER_CAMNOC_HF 0
+#define MASTER_CAMNOC_ICP 1
+#define MASTER_CAMNOC_SF 2
+#define MASTER_MDP0 3
+#define MASTER_MDP1 4
+#define MASTER_MDP_CORE1_0 5
+#define MASTER_MDP_CORE1_1 6
+#define MASTER_CNOC_MNOC_HF_CFG 7
+#define MASTER_CNOC_MNOC_SF_CFG 8
+#define MASTER_VIDEO_P0 9
+#define MASTER_VIDEO_P1 10
+#define MASTER_VIDEO_PROC 11
+#define MASTER_VIDEO_V_PROC 12
+#define SLAVE_MNOC_HF_MEM_NOC 13
+#define SLAVE_MNOC_SF_MEM_NOC 14
+#define SLAVE_SERVICE_MNOC_HF 15
+#define SLAVE_SERVICE_MNOC_SF 16
+
+/* nspa_noc */
+#define MASTER_CDSP_NOC_CFG 0
+#define MASTER_CDSP_PROC 1
+#define SLAVE_HCP_A 2
+#define SLAVE_CDSP_MEM_NOC 3
+#define SLAVE_SERVICE_NSP_NOC 4
+
+/* nspb_noc */
+#define MASTER_CDSPB_NOC_CFG 0
+#define MASTER_CDSP_PROC_B 1
+#define SLAVE_CDSPB_MEM_NOC 2
+#define SLAVE_HCP_B 3
+#define SLAVE_SERVICE_NSPB_NOC 4
+
+/* pcie_anoc */
+#define MASTER_PCIE_0 0
+#define MASTER_PCIE_1 1
+#define SLAVE_ANOC_PCIE_GEM_NOC 2
+
+/* system_noc */
+#define MASTER_GIC_AHB 0
+#define MASTER_A1NOC_SNOC 1
+#define MASTER_A2NOC_SNOC 2
+#define MASTER_LPASS_ANOC 3
+#define MASTER_SNOC_CFG 4
+#define MASTER_PIMEM 5
+#define MASTER_GIC 6
+#define SLAVE_SNOC_GEM_NOC_GC 7
+#define SLAVE_SNOC_GEM_NOC_SF 8
+#define SLAVE_SERVICE_SNOC 9
+
+#endif /* __DT_BINDINGS_INTERCONNECT_QCOM_SA8775P_H */
diff --git a/include/dt-bindings/interconnect/qcom,sc7180.h b/include/dt-bindings/interconnect/qcom,sc7180.h
index f9970f6032eb..de5d5867bd67 100644
--- a/include/dt-bindings/interconnect/qcom,sc7180.h
+++ b/include/dt-bindings/interconnect/qcom,sc7180.h
@@ -108,9 +108,6 @@
#define SLAVE_LLCC 11
#define SLAVE_SERVICE_GEM_NOC 12
-#define MASTER_IPA_CORE 0
-#define SLAVE_IPA_CORE 1
-
#define MASTER_LLCC 0
#define SLAVE_EBI1 1
diff --git a/include/dt-bindings/interconnect/qcom,sc8180x.h b/include/dt-bindings/interconnect/qcom,sc8180x.h
index e84cfec5afdd..0bdc8d6cb401 100644
--- a/include/dt-bindings/interconnect/qcom,sc8180x.h
+++ b/include/dt-bindings/interconnect/qcom,sc8180x.h
@@ -129,9 +129,6 @@
#define SLAVE_SERVICE_GEM_NOC 16
#define SLAVE_SERVICE_GEM_NOC_1 17
-#define MASTER_IPA_CORE 0
-#define SLAVE_IPA_CORE 1
-
#define MASTER_LLCC 0
#define SLAVE_EBI_CH0 1
diff --git a/include/dt-bindings/interconnect/qcom,sc8280xp.h b/include/dt-bindings/interconnect/qcom,sc8280xp.h
index a3e5fda7c127..f89f47e99c6d 100644
--- a/include/dt-bindings/interconnect/qcom,sc8280xp.h
+++ b/include/dt-bindings/interconnect/qcom,sc8280xp.h
@@ -48,11 +48,11 @@
#define SLAVE_SERVICE_A2NOC 19
/* clk_virt */
-#define MASTER_IPA_CORE 0
+/* 0 was used by MASTER_IPA_CORE, now represented as RPMh clock */
#define MASTER_QUP_CORE_0 1
#define MASTER_QUP_CORE_1 2
#define MASTER_QUP_CORE_2 3
-#define SLAVE_IPA_CORE 4
+/* 4 was used by SLAVE_IPA_CORE, now represented as RPMh clock */
#define SLAVE_QUP_CORE_0 5
#define SLAVE_QUP_CORE_1 6
#define SLAVE_QUP_CORE_2 7
diff --git a/include/dt-bindings/interconnect/qcom,sdm670-rpmh.h b/include/dt-bindings/interconnect/qcom,sdm670-rpmh.h
new file mode 100644
index 000000000000..9b516cc360bb
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,sdm670-rpmh.h
@@ -0,0 +1,136 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Qualcomm SDM670 interconnect IDs
+ *
+ * Copyright (c) 2022, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SDM670_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_SDM670_H
+
+#define MASTER_A1NOC_CFG 0
+#define MASTER_BLSP_1 1
+#define MASTER_TSIF 2
+#define MASTER_EMMC 3
+#define MASTER_SDCC_2 4
+#define MASTER_SDCC_4 5
+#define MASTER_UFS_MEM 6
+#define SLAVE_A1NOC_SNOC 7
+#define SLAVE_SERVICE_A1NOC 8
+
+#define MASTER_A2NOC_CFG 0
+#define MASTER_QDSS_BAM 1
+#define MASTER_BLSP_2 2
+#define MASTER_CNOC_A2NOC 3
+#define MASTER_CRYPTO_CORE_0 4
+#define MASTER_IPA 5
+#define MASTER_QDSS_ETR 6
+#define MASTER_USB3 7
+#define SLAVE_A2NOC_SNOC 8
+#define SLAVE_SERVICE_A2NOC 9
+
+
+#define MASTER_SPDM 0
+#define MASTER_SNOC_CNOC 1
+#define SLAVE_A1NOC_CFG 2
+#define SLAVE_A2NOC_CFG 3
+#define SLAVE_AOP 4
+#define SLAVE_AOSS 5
+#define SLAVE_CAMERA_CFG 6
+#define SLAVE_CLK_CTL 7
+#define SLAVE_CDSP_CFG 8
+#define SLAVE_RBCPR_CX_CFG 9
+#define SLAVE_CRYPTO_0_CFG 10
+#define SLAVE_DCC_CFG 11
+#define SLAVE_CNOC_DDRSS 12
+#define SLAVE_DISPLAY_CFG 13
+#define SLAVE_EMMC_CFG 14
+#define SLAVE_GLM 15
+#define SLAVE_GRAPHICS_3D_CFG 16
+#define SLAVE_IMEM_CFG 17
+#define SLAVE_IPA_CFG 18
+#define SLAVE_CNOC_MNOC_CFG 19
+#define SLAVE_PDM 20
+#define SLAVE_SOUTH_PHY_CFG 21
+#define SLAVE_PIMEM_CFG 22
+#define SLAVE_PRNG 23
+#define SLAVE_QDSS_CFG 24
+#define SLAVE_BLSP_2 25
+#define SLAVE_BLSP_1 26
+#define SLAVE_SDCC_2 27
+#define SLAVE_SDCC_4 28
+#define SLAVE_SNOC_CFG 29
+#define SLAVE_SPDM_WRAPPER 30
+#define SLAVE_TCSR 31
+#define SLAVE_TLMM_NORTH 32
+#define SLAVE_TLMM_SOUTH 33
+#define SLAVE_TSIF 34
+#define SLAVE_UFS_MEM_CFG 35
+#define SLAVE_USB3 36
+#define SLAVE_VENUS_CFG 37
+#define SLAVE_VSENSE_CTRL_CFG 38
+#define SLAVE_CNOC_A2NOC 39
+#define SLAVE_SERVICE_CNOC 40
+
+#define MASTER_CNOC_DC_NOC 0
+#define SLAVE_LLCC_CFG 1
+#define SLAVE_MEM_NOC_CFG 2
+
+#define MASTER_AMPSS_M0 0
+#define MASTER_GNOC_CFG 1
+#define SLAVE_GNOC_SNOC 2
+#define SLAVE_GNOC_MEM_NOC 3
+#define SLAVE_SERVICE_GNOC 4
+
+#define MASTER_TCU_0 0
+#define MASTER_MEM_NOC_CFG 1
+#define MASTER_GNOC_MEM_NOC 2
+#define MASTER_MNOC_HF_MEM_NOC 3
+#define MASTER_MNOC_SF_MEM_NOC 4
+#define MASTER_SNOC_GC_MEM_NOC 5
+#define MASTER_SNOC_SF_MEM_NOC 6
+#define MASTER_GRAPHICS_3D 7
+#define SLAVE_MSS_PROC_MS_MPU_CFG 8
+#define SLAVE_MEM_NOC_GNOC 9
+#define SLAVE_LLCC 10
+#define SLAVE_MEM_NOC_SNOC 11
+#define SLAVE_SERVICE_MEM_NOC 12
+#define MASTER_LLCC 13
+#define SLAVE_EBI_CH0 14
+
+#define MASTER_CNOC_MNOC_CFG 0
+#define MASTER_CAMNOC_HF0 1
+#define MASTER_CAMNOC_HF1 2
+#define MASTER_CAMNOC_SF 3
+#define MASTER_MDP_PORT0 4
+#define MASTER_MDP_PORT1 5
+#define MASTER_ROTATOR 6
+#define MASTER_VIDEO_P0 7
+#define MASTER_VIDEO_P1 8
+#define MASTER_VIDEO_PROC 9
+#define SLAVE_MNOC_SF_MEM_NOC 10
+#define SLAVE_MNOC_HF_MEM_NOC 11
+#define SLAVE_SERVICE_MNOC 12
+
+#define MASTER_SNOC_CFG 0
+#define MASTER_A1NOC_SNOC 1
+#define MASTER_A2NOC_SNOC 2
+#define MASTER_GNOC_SNOC 3
+#define MASTER_MEM_NOC_SNOC 4
+#define MASTER_PIMEM 5
+#define MASTER_GIC 6
+#define SLAVE_APPSS 7
+#define SLAVE_SNOC_CNOC 8
+#define SLAVE_SNOC_MEM_NOC_GC 9
+#define SLAVE_SNOC_MEM_NOC_SF 10
+#define SLAVE_OCIMEM 11
+#define SLAVE_PIMEM 12
+#define SLAVE_SERVICE_SNOC 13
+#define SLAVE_QDSS_STM 14
+#define SLAVE_TCU 15
+#define MASTER_CAMNOC_HF0_UNCOMP 16
+#define MASTER_CAMNOC_HF1_UNCOMP 17
+#define MASTER_CAMNOC_SF_UNCOMP 18
+#define SLAVE_CAMNOC_UNCOMP 19
+
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,sdx55.h b/include/dt-bindings/interconnect/qcom,sdx55.h
index bfb6524a2d90..1925f0784ab2 100644
--- a/include/dt-bindings/interconnect/qcom,sdx55.h
+++ b/include/dt-bindings/interconnect/qcom,sdx55.h
@@ -70,7 +70,5 @@
#define SLAVE_QDSS_STM 48
#define SLAVE_TCU 49
-#define MASTER_IPA_CORE 0
-#define SLAVE_IPA_CORE 1
#endif
diff --git a/include/dt-bindings/interconnect/qcom,sm8150.h b/include/dt-bindings/interconnect/qcom,sm8150.h
index a25684680c42..ef292791f52e 100644
--- a/include/dt-bindings/interconnect/qcom,sm8150.h
+++ b/include/dt-bindings/interconnect/qcom,sm8150.h
@@ -121,9 +121,6 @@
#define SLAVE_LLCC 15
#define SLAVE_SERVICE_GEM_NOC 16
-#define MASTER_IPA_CORE 0
-#define SLAVE_IPA_CORE 1
-
#define MASTER_LLCC 0
#define SLAVE_EBI_CH0 1
diff --git a/include/dt-bindings/interconnect/qcom,sm8250.h b/include/dt-bindings/interconnect/qcom,sm8250.h
index 1b4d9fbe888d..a4af5cc19271 100644
--- a/include/dt-bindings/interconnect/qcom,sm8250.h
+++ b/include/dt-bindings/interconnect/qcom,sm8250.h
@@ -115,9 +115,6 @@
#define SLAVE_SERVICE_GEM_NOC_2 15
#define SLAVE_SERVICE_GEM_NOC 16
-#define MASTER_IPA_CORE 0
-#define SLAVE_IPA_CORE 1
-
#define MASTER_LLCC 0
#define SLAVE_EBI_CH0 1
diff --git a/include/dt-bindings/pinctrl/starfive,jh7110-pinctrl.h b/include/dt-bindings/pinctrl/starfive,jh7110-pinctrl.h
new file mode 100644
index 000000000000..3865f0139639
--- /dev/null
+++ b/include/dt-bindings/pinctrl/starfive,jh7110-pinctrl.h
@@ -0,0 +1,137 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
+ * Copyright (C) 2022 Emil Renner Berthing <kernel@esmil.dk>
+ * Copyright (C) 2022 StarFive Technology Co., Ltd.
+ */
+
+#ifndef __DT_BINDINGS_PINCTRL_STARFIVE_JH7110_H__
+#define __DT_BINDINGS_PINCTRL_STARFIVE_JH7110_H__
+
+/* sys_iomux pins */
+#define PAD_GPIO0 0
+#define PAD_GPIO1 1
+#define PAD_GPIO2 2
+#define PAD_GPIO3 3
+#define PAD_GPIO4 4
+#define PAD_GPIO5 5
+#define PAD_GPIO6 6
+#define PAD_GPIO7 7
+#define PAD_GPIO8 8
+#define PAD_GPIO9 9
+#define PAD_GPIO10 10
+#define PAD_GPIO11 11
+#define PAD_GPIO12 12
+#define PAD_GPIO13 13
+#define PAD_GPIO14 14
+#define PAD_GPIO15 15
+#define PAD_GPIO16 16
+#define PAD_GPIO17 17
+#define PAD_GPIO18 18
+#define PAD_GPIO19 19
+#define PAD_GPIO20 20
+#define PAD_GPIO21 21
+#define PAD_GPIO22 22
+#define PAD_GPIO23 23
+#define PAD_GPIO24 24
+#define PAD_GPIO25 25
+#define PAD_GPIO26 26
+#define PAD_GPIO27 27
+#define PAD_GPIO28 28
+#define PAD_GPIO29 29
+#define PAD_GPIO30 30
+#define PAD_GPIO31 31
+#define PAD_GPIO32 32
+#define PAD_GPIO33 33
+#define PAD_GPIO34 34
+#define PAD_GPIO35 35
+#define PAD_GPIO36 36
+#define PAD_GPIO37 37
+#define PAD_GPIO38 38
+#define PAD_GPIO39 39
+#define PAD_GPIO40 40
+#define PAD_GPIO41 41
+#define PAD_GPIO42 42
+#define PAD_GPIO43 43
+#define PAD_GPIO44 44
+#define PAD_GPIO45 45
+#define PAD_GPIO46 46
+#define PAD_GPIO47 47
+#define PAD_GPIO48 48
+#define PAD_GPIO49 49
+#define PAD_GPIO50 50
+#define PAD_GPIO51 51
+#define PAD_GPIO52 52
+#define PAD_GPIO53 53
+#define PAD_GPIO54 54
+#define PAD_GPIO55 55
+#define PAD_GPIO56 56
+#define PAD_GPIO57 57
+#define PAD_GPIO58 58
+#define PAD_GPIO59 59
+#define PAD_GPIO60 60
+#define PAD_GPIO61 61
+#define PAD_GPIO62 62
+#define PAD_GPIO63 63
+#define PAD_SD0_CLK 64
+#define PAD_SD0_CMD 65
+#define PAD_SD0_DATA0 66
+#define PAD_SD0_DATA1 67
+#define PAD_SD0_DATA2 68
+#define PAD_SD0_DATA3 69
+#define PAD_SD0_DATA4 70
+#define PAD_SD0_DATA5 71
+#define PAD_SD0_DATA6 72
+#define PAD_SD0_DATA7 73
+#define PAD_SD0_STRB 74
+#define PAD_GMAC1_MDC 75
+#define PAD_GMAC1_MDIO 76
+#define PAD_GMAC1_RXD0 77
+#define PAD_GMAC1_RXD1 78
+#define PAD_GMAC1_RXD2 79
+#define PAD_GMAC1_RXD3 80
+#define PAD_GMAC1_RXDV 81
+#define PAD_GMAC1_RXC 82
+#define PAD_GMAC1_TXD0 83
+#define PAD_GMAC1_TXD1 84
+#define PAD_GMAC1_TXD2 85
+#define PAD_GMAC1_TXD3 86
+#define PAD_GMAC1_TXEN 87
+#define PAD_GMAC1_TXC 88
+#define PAD_QSPI_SCLK 89
+#define PAD_QSPI_CS0 90
+#define PAD_QSPI_DATA0 91
+#define PAD_QSPI_DATA1 92
+#define PAD_QSPI_DATA2 93
+#define PAD_QSPI_DATA3 94
+
+/* aon_iomux pins */
+#define PAD_TESTEN 0
+#define PAD_RGPIO0 1
+#define PAD_RGPIO1 2
+#define PAD_RGPIO2 3
+#define PAD_RGPIO3 4
+#define PAD_RSTN 5
+#define PAD_GMAC0_MDC 6
+#define PAD_GMAC0_MDIO 7
+#define PAD_GMAC0_RXD0 8
+#define PAD_GMAC0_RXD1 9
+#define PAD_GMAC0_RXD2 10
+#define PAD_GMAC0_RXD3 11
+#define PAD_GMAC0_RXDV 12
+#define PAD_GMAC0_RXC 13
+#define PAD_GMAC0_TXD0 14
+#define PAD_GMAC0_TXD1 15
+#define PAD_GMAC0_TXD2 16
+#define PAD_GMAC0_TXD3 17
+#define PAD_GMAC0_TXEN 18
+#define PAD_GMAC0_TXC 19
+
+#define GPOUT_LOW 0
+#define GPOUT_HIGH 1
+
+#define GPOEN_ENABLE 0
+#define GPOEN_DISABLE 1
+
+#define GPI_NONE 255
+
+#endif
diff --git a/include/dt-bindings/power/allwinner,sun20i-d1-ppu.h b/include/dt-bindings/power/allwinner,sun20i-d1-ppu.h
new file mode 100644
index 000000000000..23cfb57256d6
--- /dev/null
+++ b/include/dt-bindings/power/allwinner,sun20i-d1-ppu.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+
+#ifndef _DT_BINDINGS_POWER_SUN20I_D1_PPU_H_
+#define _DT_BINDINGS_POWER_SUN20I_D1_PPU_H_
+
+#define PD_CPU 0
+#define PD_VE 1
+#define PD_DSP 2
+
+#endif /* _DT_BINDINGS_POWER_SUN20I_D1_PPU_H_ */
diff --git a/include/dt-bindings/power/mediatek,mt8188-power.h b/include/dt-bindings/power/mediatek,mt8188-power.h
new file mode 100644
index 000000000000..57e75cf3aa2c
--- /dev/null
+++ b/include/dt-bindings/power/mediatek,mt8188-power.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Garmin Chang <garmin.chang@mediatek.com>
+ */
+
+#ifndef _DT_BINDINGS_POWER_MT8188_POWER_H
+#define _DT_BINDINGS_POWER_MT8188_POWER_H
+
+#define MT8188_POWER_DOMAIN_MFG0 0
+#define MT8188_POWER_DOMAIN_MFG1 1
+#define MT8188_POWER_DOMAIN_MFG2 2
+#define MT8188_POWER_DOMAIN_MFG3 3
+#define MT8188_POWER_DOMAIN_MFG4 4
+#define MT8188_POWER_DOMAIN_PEXTP_MAC_P0 5
+#define MT8188_POWER_DOMAIN_PEXTP_PHY_TOP 6
+#define MT8188_POWER_DOMAIN_CSIRX_TOP 7
+#define MT8188_POWER_DOMAIN_ETHER 8
+#define MT8188_POWER_DOMAIN_HDMI_TX 9
+#define MT8188_POWER_DOMAIN_ADSP_AO 10
+#define MT8188_POWER_DOMAIN_ADSP_INFRA 11
+#define MT8188_POWER_DOMAIN_ADSP 12
+#define MT8188_POWER_DOMAIN_AUDIO 13
+#define MT8188_POWER_DOMAIN_AUDIO_ASRC 14
+#define MT8188_POWER_DOMAIN_VPPSYS0 15
+#define MT8188_POWER_DOMAIN_VDOSYS0 16
+#define MT8188_POWER_DOMAIN_VDOSYS1 17
+#define MT8188_POWER_DOMAIN_DP_TX 18
+#define MT8188_POWER_DOMAIN_EDP_TX 19
+#define MT8188_POWER_DOMAIN_VPPSYS1 20
+#define MT8188_POWER_DOMAIN_WPE 21
+#define MT8188_POWER_DOMAIN_VDEC0 22
+#define MT8188_POWER_DOMAIN_VDEC1 23
+#define MT8188_POWER_DOMAIN_VENC 24
+#define MT8188_POWER_DOMAIN_IMG_VCORE 25
+#define MT8188_POWER_DOMAIN_IMG_MAIN 26
+#define MT8188_POWER_DOMAIN_DIP 27
+#define MT8188_POWER_DOMAIN_IPE 28
+#define MT8188_POWER_DOMAIN_CAM_VCORE 29
+#define MT8188_POWER_DOMAIN_CAM_MAIN 30
+#define MT8188_POWER_DOMAIN_CAM_SUBA 31
+#define MT8188_POWER_DOMAIN_CAM_SUBB 32
+
+#endif /* _DT_BINDINGS_POWER_MT8188_POWER_H */
diff --git a/include/dt-bindings/power/qcom-rpmpd.h b/include/dt-bindings/power/qcom-rpmpd.h
index 278de6df425e..1bf8e87ecd7e 100644
--- a/include/dt-bindings/power/qcom-rpmpd.h
+++ b/include/dt-bindings/power/qcom-rpmpd.h
@@ -4,6 +4,25 @@
#ifndef _DT_BINDINGS_POWER_QCOM_RPMPD_H
#define _DT_BINDINGS_POWER_QCOM_RPMPD_H
+/* SA8775P Power Domain Indexes */
+#define SA8775P_CX 0
+#define SA8775P_CX_AO 1
+#define SA8775P_DDR 2
+#define SA8775P_EBI 3
+#define SA8775P_GFX 4
+#define SA8775P_LCX 5
+#define SA8775P_LMX 6
+#define SA8775P_MMCX 7
+#define SA8775P_MMCX_AO 8
+#define SA8775P_MSS 9
+#define SA8775P_MX 10
+#define SA8775P_MX_AO 11
+#define SA8775P_MXC 12
+#define SA8775P_MXC_AO 13
+#define SA8775P_NSP0 14
+#define SA8775P_NSP1 15
+#define SA8775P_XO 16
+
/* SDM670 Power Domain Indexes */
#define SDM670_MX 0
#define SDM670_MX_AO 1
@@ -192,6 +211,7 @@
#define RPMH_REGULATOR_LEVEL_MIN_SVS 48
#define RPMH_REGULATOR_LEVEL_LOW_SVS_D1 56
#define RPMH_REGULATOR_LEVEL_LOW_SVS 64
+#define RPMH_REGULATOR_LEVEL_LOW_SVS_L1 80
#define RPMH_REGULATOR_LEVEL_SVS 128
#define RPMH_REGULATOR_LEVEL_SVS_L0 144
#define RPMH_REGULATOR_LEVEL_SVS_L1 192
@@ -307,16 +327,6 @@
#define SDM660_SSCMX 8
#define SDM660_SSCMX_VFL 9
-/* SM4250 Power Domains */
-#define SM4250_VDDCX 0
-#define SM4250_VDDCX_AO 1
-#define SM4250_VDDCX_VFL 2
-#define SM4250_VDDMX 3
-#define SM4250_VDDMX_AO 4
-#define SM4250_VDDMX_VFL 5
-#define SM4250_VDD_LPI_CX 6
-#define SM4250_VDD_LPI_MX 7
-
/* SM6115 Power Domains */
#define SM6115_VDDCX 0
#define SM6115_VDDCX_AO 1
diff --git a/include/dt-bindings/power/r8a779g0-sysc.h b/include/dt-bindings/power/r8a779g0-sysc.h
index 7daa70f1814e..c7b139fb075f 100644
--- a/include/dt-bindings/power/r8a779g0-sysc.h
+++ b/include/dt-bindings/power/r8a779g0-sysc.h
@@ -38,6 +38,7 @@
#define R8A779G0_PD_A3VIP2 58
#define R8A779G0_PD_A3ISP0 60
#define R8A779G0_PD_A3ISP1 61
+#define R8A779G0_PD_A3DUL 62
/* Always-on power area */
#define R8A779G0_PD_ALWAYS_ON 64
diff --git a/include/dt-bindings/power/starfive,jh7110-pmu.h b/include/dt-bindings/power/starfive,jh7110-pmu.h
new file mode 100644
index 000000000000..132bfe401fc8
--- /dev/null
+++ b/include/dt-bindings/power/starfive,jh7110-pmu.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2022 StarFive Technology Co., Ltd.
+ * Author: Walker Chen <walker.chen@starfivetech.com>
+ */
+#ifndef __DT_BINDINGS_POWER_JH7110_POWER_H__
+#define __DT_BINDINGS_POWER_JH7110_POWER_H__
+
+#define JH7110_PD_SYSTOP 0
+#define JH7110_PD_CPU 1
+#define JH7110_PD_GPUA 2
+#define JH7110_PD_VDEC 3
+#define JH7110_PD_VOUT 4
+#define JH7110_PD_ISP 5
+#define JH7110_PD_VENC 6
+
+#endif
diff --git a/include/dt-bindings/reset/mt8195-resets.h b/include/dt-bindings/reset/mt8195-resets.h
index 24ab3631dcea..e61660438d61 100644
--- a/include/dt-bindings/reset/mt8195-resets.h
+++ b/include/dt-bindings/reset/mt8195-resets.h
@@ -35,4 +35,49 @@
#define MT8195_INFRA_RST2_PCIE_P1_SWRST 4
#define MT8195_INFRA_RST2_USBSIF_P1_SWRST 5
+/* VDOSYS1 */
+#define MT8195_VDOSYS1_SW0_RST_B_SMI_LARB2 0
+#define MT8195_VDOSYS1_SW0_RST_B_SMI_LARB3 1
+#define MT8195_VDOSYS1_SW0_RST_B_GALS 2
+#define MT8195_VDOSYS1_SW0_RST_B_FAKE_ENG0 3
+#define MT8195_VDOSYS1_SW0_RST_B_FAKE_ENG1 4
+#define MT8195_VDOSYS1_SW0_RST_B_MDP_RDMA0 5
+#define MT8195_VDOSYS1_SW0_RST_B_MDP_RDMA1 6
+#define MT8195_VDOSYS1_SW0_RST_B_MDP_RDMA2 7
+#define MT8195_VDOSYS1_SW0_RST_B_MDP_RDMA3 8
+#define MT8195_VDOSYS1_SW0_RST_B_VPP_MERGE0 9
+#define MT8195_VDOSYS1_SW0_RST_B_VPP_MERGE1 10
+#define MT8195_VDOSYS1_SW0_RST_B_VPP_MERGE2 11
+#define MT8195_VDOSYS1_SW0_RST_B_VPP_MERGE3 12
+#define MT8195_VDOSYS1_SW0_RST_B_VPP_MERGE4 13
+#define MT8195_VDOSYS1_SW0_RST_B_VPP2_TO_VDO1_DL_ASYNC 14
+#define MT8195_VDOSYS1_SW0_RST_B_VPP3_TO_VDO1_DL_ASYNC 15
+#define MT8195_VDOSYS1_SW0_RST_B_DISP_MUTEX 16
+#define MT8195_VDOSYS1_SW0_RST_B_MDP_RDMA4 17
+#define MT8195_VDOSYS1_SW0_RST_B_MDP_RDMA5 18
+#define MT8195_VDOSYS1_SW0_RST_B_MDP_RDMA6 19
+#define MT8195_VDOSYS1_SW0_RST_B_MDP_RDMA7 20
+#define MT8195_VDOSYS1_SW0_RST_B_DP_INTF0 21
+#define MT8195_VDOSYS1_SW0_RST_B_DPI0 22
+#define MT8195_VDOSYS1_SW0_RST_B_DPI1 23
+#define MT8195_VDOSYS1_SW0_RST_B_DISP_MONITOR 24
+#define MT8195_VDOSYS1_SW0_RST_B_MERGE0_DL_ASYNC 25
+#define MT8195_VDOSYS1_SW0_RST_B_MERGE1_DL_ASYNC 26
+#define MT8195_VDOSYS1_SW0_RST_B_MERGE2_DL_ASYNC 27
+#define MT8195_VDOSYS1_SW0_RST_B_MERGE3_DL_ASYNC 28
+#define MT8195_VDOSYS1_SW0_RST_B_MERGE4_DL_ASYNC 29
+#define MT8195_VDOSYS1_SW0_RST_B_VDO0_DSC_TO_VDO1_DL_ASYNC 30
+#define MT8195_VDOSYS1_SW0_RST_B_VDO0_MERGE_TO_VDO1_DL_ASYNC 31
+#define MT8195_VDOSYS1_SW1_RST_B_HDR_VDO_FE0 32
+#define MT8195_VDOSYS1_SW1_RST_B_HDR_GFX_FE0 33
+#define MT8195_VDOSYS1_SW1_RST_B_HDR_VDO_BE 34
+#define MT8195_VDOSYS1_SW1_RST_B_HDR_VDO_FE1 48
+#define MT8195_VDOSYS1_SW1_RST_B_HDR_GFX_FE1 49
+#define MT8195_VDOSYS1_SW1_RST_B_DISP_MIXER 50
+#define MT8195_VDOSYS1_SW1_RST_B_HDR_VDO_FE0_DL_ASYNC 51
+#define MT8195_VDOSYS1_SW1_RST_B_HDR_VDO_FE1_DL_ASYNC 52
+#define MT8195_VDOSYS1_SW1_RST_B_HDR_GFX_FE0_DL_ASYNC 53
+#define MT8195_VDOSYS1_SW1_RST_B_HDR_GFX_FE1_DL_ASYNC 54
+#define MT8195_VDOSYS1_SW1_RST_B_HDR_VDO_BE_DL_ASYNC 55
+
#endif /* _DT_BINDINGS_RESET_CONTROLLER_MT8195 */
diff --git a/include/dt-bindings/reset/sun20i-d1-ccu.h b/include/dt-bindings/reset/sun20i-d1-ccu.h
index de9ff5203239..f8001cf50bf1 100644
--- a/include/dt-bindings/reset/sun20i-d1-ccu.h
+++ b/include/dt-bindings/reset/sun20i-d1-ccu.h
@@ -73,5 +73,7 @@
#define RST_BUS_DSP_CFG 63
#define RST_BUS_DSP_DBG 64
#define RST_BUS_RISCV_CFG 65
+#define RST_BUS_CAN0 66
+#define RST_BUS_CAN1 67
#endif /* _DT_BINDINGS_RST_SUN20I_D1_CCU_H_ */
diff --git a/include/kunit/static_stub.h b/include/kunit/static_stub.h
new file mode 100644
index 000000000000..9b80150a5d62
--- /dev/null
+++ b/include/kunit/static_stub.h
@@ -0,0 +1,113 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * KUnit function redirection (static stubbing) API.
+ *
+ * Copyright (C) 2022, Google LLC.
+ * Author: David Gow <davidgow@google.com>
+ */
+#ifndef _KUNIT_STATIC_STUB_H
+#define _KUNIT_STATIC_STUB_H
+
+#if !IS_ENABLED(CONFIG_KUNIT)
+
+/* If CONFIG_KUNIT is not enabled, these stubs quietly disappear. */
+#define KUNIT_TRIGGER_STATIC_STUB(real_fn_name, args...) do {} while (0)
+
+#else
+
+#include <kunit/test.h>
+#include <kunit/test-bug.h>
+
+#include <linux/compiler.h> /* for {un,}likely() */
+#include <linux/sched.h> /* for task_struct */
+
+
+/**
+ * KUNIT_STATIC_STUB_REDIRECT() - call a replacement 'static stub' if one exists
+ * @real_fn_name: The name of this function (as an identifier, not a string)
+ * @args: All of the arguments passed to this function
+ *
+ * This is a function prologue which is used to allow calls to the current
+ * function to be redirected by a KUnit test. KUnit tests can call
+ * kunit_activate_static_stub() to pass a replacement function in. The
+ * replacement function will be called by KUNIT_TRIGGER_STATIC_STUB(), which
+ * will then return from the function. If the caller is not in a KUnit context,
+ * the function will continue execution as normal.
+ *
+ * Example:
+ *
+ * .. code-block:: c
+ *
+ * int real_func(int n)
+ * {
+ * KUNIT_STATIC_STUB_REDIRECT(real_func, n);
+ * return 0;
+ * }
+ *
+ * int replacement_func(int n)
+ * {
+ * return 42;
+ * }
+ *
+ * void example_test(struct kunit *test)
+ * {
+ * kunit_activate_static_stub(test, real_func, replacement_func);
+ * KUNIT_EXPECT_EQ(test, real_func(1), 42);
+ * }
+ *
+ */
+#define KUNIT_STATIC_STUB_REDIRECT(real_fn_name, args...) \
+do { \
+ typeof(&real_fn_name) replacement; \
+ struct kunit *current_test = kunit_get_current_test(); \
+ \
+ if (likely(!current_test)) \
+ break; \
+ \
+ replacement = kunit_hooks.get_static_stub_address(current_test, \
+ &real_fn_name); \
+ \
+ if (unlikely(replacement)) \
+ return replacement(args); \
+} while (0)
+
+/* Helper function for kunit_activate_static_stub(). The macro does
+ * typechecking, so use it instead.
+ */
+void __kunit_activate_static_stub(struct kunit *test,
+ void *real_fn_addr,
+ void *replacement_addr);
+
+/**
+ * kunit_activate_static_stub() - replace a function using static stubs.
+ * @test: A pointer to the 'struct kunit' test context for the current test.
+ * @real_fn_addr: The address of the function to replace.
+ * @replacement_addr: The address of the function to replace it with.
+ *
+ * When activated, calls to real_fn_addr from within this test (even if called
+ * indirectly) will instead call replacement_addr. The function pointed to by
+ * real_fn_addr must begin with the static stub prologue in
+ * KUNIT_TRIGGER_STATIC_STUB() for this to work. real_fn_addr and
+ * replacement_addr must have the same type.
+ *
+ * The redirection can be disabled again with kunit_deactivate_static_stub().
+ */
+#define kunit_activate_static_stub(test, real_fn_addr, replacement_addr) do { \
+ typecheck_fn(typeof(&real_fn_addr), replacement_addr); \
+ __kunit_activate_static_stub(test, real_fn_addr, replacement_addr); \
+} while (0)
+
+
+/**
+ * kunit_deactivate_static_stub() - disable a function redirection
+ * @test: A pointer to the 'struct kunit' test context for the current test.
+ * @real_fn_addr: The address of the function to no-longer redirect
+ *
+ * Deactivates a redirection configured with kunit_activate_static_stub(). After
+ * this function returns, calls to real_fn_addr() will execute the original
+ * real_fn, not any previously-configured replacement.
+ */
+void kunit_deactivate_static_stub(struct kunit *test, void *real_fn_addr);
+
+#endif
+#endif
diff --git a/include/kunit/test-bug.h b/include/kunit/test-bug.h
index c1b2e14eab64..30ca541b6ff2 100644
--- a/include/kunit/test-bug.h
+++ b/include/kunit/test-bug.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * KUnit API allowing dynamic analysis tools to interact with KUnit tests
+ * KUnit API providing hooks for non-test code to interact with tests.
*
* Copyright (C) 2020, Google LLC.
* Author: Uriel Guajardo <urielguajardo@google.com>
@@ -9,7 +9,7 @@
#ifndef _KUNIT_TEST_BUG_H
#define _KUNIT_TEST_BUG_H
-#if IS_BUILTIN(CONFIG_KUNIT)
+#if IS_ENABLED(CONFIG_KUNIT)
#include <linux/jump_label.h> /* For static branch */
#include <linux/sched.h>
@@ -17,6 +17,12 @@
/* Static key if KUnit is running any tests. */
DECLARE_STATIC_KEY_FALSE(kunit_running);
+/* Hooks table: a table of function pointers filled in when kunit loads */
+extern struct kunit_hooks_table {
+ __printf(3, 4) void (*fail_current_test)(const char*, int, const char*, ...);
+ void *(*get_static_stub_address)(struct kunit *test, void *real_fn_addr);
+} kunit_hooks;
+
/**
* kunit_get_current_test() - Return a pointer to the currently running
* KUnit test.
@@ -43,33 +49,20 @@ static inline struct kunit *kunit_get_current_test(void)
* kunit_fail_current_test() - If a KUnit test is running, fail it.
*
* If a KUnit test is running in the current task, mark that test as failed.
- *
- * This macro will only work if KUnit is built-in (though the tests
- * themselves can be modules). Otherwise, it compiles down to nothing.
*/
#define kunit_fail_current_test(fmt, ...) do { \
if (static_branch_unlikely(&kunit_running)) { \
- __kunit_fail_current_test(__FILE__, __LINE__, \
+ /* Guaranteed to be non-NULL when kunit_running true*/ \
+ kunit_hooks.fail_current_test(__FILE__, __LINE__, \
fmt, ##__VA_ARGS__); \
} \
} while (0)
-
-extern __printf(3, 4) void __kunit_fail_current_test(const char *file, int line,
- const char *fmt, ...);
-
#else
static inline struct kunit *kunit_get_current_test(void) { return NULL; }
-/* We define this with an empty helper function so format string warnings work */
-#define kunit_fail_current_test(fmt, ...) \
- __kunit_fail_current_test(__FILE__, __LINE__, fmt, ##__VA_ARGS__)
-
-static inline __printf(3, 4) void __kunit_fail_current_test(const char *file, int line,
- const char *fmt, ...)
-{
-}
+#define kunit_fail_current_test(fmt, ...) do {} while (0)
#endif
diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h
index cd6d8f260eab..71916de7c6c4 100644
--- a/include/kvm/arm_arch_timer.h
+++ b/include/kvm/arm_arch_timer.h
@@ -60,7 +60,7 @@ struct arch_timer_cpu {
bool enabled;
};
-int kvm_timer_hyp_init(bool);
+int __init kvm_timer_hyp_init(bool has_gic);
int kvm_timer_enable(struct kvm_vcpu *vcpu);
int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu);
void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu);
@@ -104,4 +104,8 @@ void kvm_arm_timer_write_sysreg(struct kvm_vcpu *vcpu,
u32 timer_get_ctl(struct arch_timer_context *ctxt);
u64 timer_get_cval(struct arch_timer_context *ctxt);
+/* CPU HP callbacks */
+void kvm_timer_cpu_up(void);
+void kvm_timer_cpu_down(void);
+
#endif
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index 6470f67e63c4..d3ad51fde9db 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -432,4 +432,8 @@ int vgic_v4_load(struct kvm_vcpu *vcpu);
void vgic_v4_commit(struct kvm_vcpu *vcpu);
int vgic_v4_put(struct kvm_vcpu *vcpu, bool need_db);
+/* CPU HP callbacks */
+void kvm_vgic_cpu_up(void);
+void kvm_vgic_cpu_down(void);
+
#endif /* __KVM_ARM_VGIC_H */
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 5e6a876e17ba..efff750f326d 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -723,7 +723,7 @@ const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids,
const void *acpi_device_get_match_data(const struct device *dev);
extern bool acpi_driver_match_device(struct device *dev,
const struct device_driver *drv);
-int acpi_device_uevent_modalias(struct device *, struct kobj_uevent_env *);
+int acpi_device_uevent_modalias(const struct device *, struct kobj_uevent_env *);
int acpi_device_modalias(struct device *, char *, int);
struct platform_device *acpi_create_platform_device(struct acpi_device *,
@@ -950,6 +950,12 @@ static inline bool acpi_driver_match_device(struct device *dev,
return false;
}
+static inline bool acpi_check_dsm(acpi_handle handle, const guid_t *guid,
+ u64 rev, u64 funcs)
+{
+ return false;
+}
+
static inline union acpi_object *acpi_evaluate_dsm(acpi_handle handle,
const guid_t *guid,
u64 rev, u64 func,
@@ -958,7 +964,16 @@ static inline union acpi_object *acpi_evaluate_dsm(acpi_handle handle,
return NULL;
}
-static inline int acpi_device_uevent_modalias(struct device *dev,
+static inline union acpi_object *acpi_evaluate_dsm_typed(acpi_handle handle,
+ const guid_t *guid,
+ u64 rev, u64 func,
+ union acpi_object *argv4,
+ acpi_object_type type)
+{
+ return NULL;
+}
+
+static inline int acpi_device_uevent_modalias(const struct device *dev,
struct kobj_uevent_env *env)
{
return -ENODEV;
diff --git a/include/linux/avf/virtchnl.h b/include/linux/avf/virtchnl.h
index d91af50ac58d..c15221dcb75e 100644
--- a/include/linux/avf/virtchnl.h
+++ b/include/linux/avf/virtchnl.h
@@ -1,21 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright (c) 2013-2022, Intel Corporation. */
#ifndef _VIRTCHNL_H_
#define _VIRTCHNL_H_
/* Description:
- * This header file describes the VF-PF communication protocol used
- * by the drivers for all devices starting from our 40G product line
+ * This header file describes the Virtual Function (VF) - Physical Function
+ * (PF) communication protocol used by the drivers for all devices starting
+ * from our 40G product line
*
* Admin queue buffer usage:
* desc->opcode is always aqc_opc_send_msg_to_pf
@@ -29,8 +21,8 @@
* have a maximum of sixteen queues for all of its VSIs.
*
* The PF is required to return a status code in v_retval for all messages
- * except RESET_VF, which does not require any response. The return value
- * is of status_code type, defined in the shared type.h.
+ * except RESET_VF, which does not require any response. The returned value
+ * is of virtchnl_status_code type, defined here.
*
* In general, VF driver initialization should roughly follow the order of
* these opcodes. The VF driver must first validate the API version of the
@@ -122,9 +114,13 @@ enum virtchnl_ops {
VIRTCHNL_OP_GET_STATS = 15,
VIRTCHNL_OP_RSVD = 16,
VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */
+ /* opcode 19 is reserved */
VIRTCHNL_OP_IWARP = 20, /* advanced opcode */
+ VIRTCHNL_OP_RDMA = VIRTCHNL_OP_IWARP,
VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP = 21, /* advanced opcode */
+ VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP = VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP,
VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP = 22, /* advanced opcode */
+ VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP = VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP,
VIRTCHNL_OP_CONFIG_RSS_KEY = 23,
VIRTCHNL_OP_CONFIG_RSS_LUT = 24,
VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25,
@@ -162,19 +158,6 @@ enum virtchnl_ops {
#define VIRTCHNL_CHECK_UNION_LEN(n, X) enum virtchnl_static_asset_enum_##X \
{ virtchnl_static_assert_##X = (n)/((sizeof(union X) == (n)) ? 1 : 0) }
-/* Virtual channel message descriptor. This overlays the admin queue
- * descriptor. All other data is passed in external buffers.
- */
-
-struct virtchnl_msg {
- u8 pad[8]; /* AQ flags/opcode/len/retval fields */
- enum virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */
- enum virtchnl_status_code v_retval; /* ditto for desc->retval */
- u32 vfid; /* used by PF when sending to VF */
-};
-
-VIRTCHNL_CHECK_STRUCT_LEN(20, virtchnl_msg);
-
/* Message descriptions and data structures. */
/* VIRTCHNL_OP_VERSION
@@ -235,7 +218,9 @@ enum virtchnl_vsi_type {
struct virtchnl_vsi_resource {
u16 vsi_id;
u16 num_queue_pairs;
- enum virtchnl_vsi_type vsi_type;
+
+ /* see enum virtchnl_vsi_type */
+ s32 vsi_type;
u16 qset_handle;
u8 default_mac_addr[ETH_ALEN];
};
@@ -247,7 +232,8 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
* TX/RX Checksum offloading and TSO for non-tunnelled packets.
*/
#define VIRTCHNL_VF_OFFLOAD_L2 BIT(0)
-#define VIRTCHNL_VF_OFFLOAD_IWARP BIT(1)
+#define VIRTCHNL_VF_OFFLOAD_RDMA BIT(1)
+#define VIRTCHNL_VF_CAP_RDMA VIRTCHNL_VF_OFFLOAD_RDMA
#define VIRTCHNL_VF_OFFLOAD_RSS_AQ BIT(3)
#define VIRTCHNL_VF_OFFLOAD_RSS_REG BIT(4)
#define VIRTCHNL_VF_OFFLOAD_WB_ON_ITR BIT(5)
@@ -324,7 +310,9 @@ struct virtchnl_rxq_info {
u8 rxdid;
u8 pad1[2];
u64 dma_ring_addr;
- enum virtchnl_rx_hsplit rx_split_pos; /* deprecated with AVF 1.0 */
+
+ /* see enum virtchnl_rx_hsplit; deprecated with AVF 1.0 */
+ s32 rx_split_pos;
u32 pad2;
};
@@ -336,6 +324,9 @@ VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_rxq_info);
* PF configures queues and returns status.
* If the number of queues specified is greater than the number of queues
* associated with the VSI, an error is returned and no queues are configured.
+ * NOTE: The VF is not required to configure all queues in a single request.
+ * It may send multiple messages. PF drivers must correctly handle all VF
+ * requests.
*/
struct virtchnl_queue_pair_info {
/* NOTE: vsi_id and queue_id should be identical for both queues. */
@@ -373,8 +364,13 @@ struct virtchnl_vf_res_request {
* VF uses this message to map vectors to queues.
* The rxq_map and txq_map fields are bitmaps used to indicate which queues
* are to be associated with the specified vector.
- * The "other" causes are always mapped to vector 0.
+ * The "other" causes are always mapped to vector 0. The VF may not request
+ * that vector 0 be used for traffic.
* PF configures interrupt mapping and returns status.
+ * NOTE: due to hardware requirements, all active queues (both TX and RX)
+ * should be mapped to interrupts, even if the driver intends to operate
+ * only in polling mode. In this case the interrupt may be disabled, but
+ * the ITR timer will still run to trigger writebacks.
*/
struct virtchnl_vector_map {
u16 vsi_id;
@@ -401,6 +397,9 @@ VIRTCHNL_CHECK_STRUCT_LEN(14, virtchnl_irq_map_info);
* (Currently, we only support 16 queues per VF, but we make the field
* u32 to allow for expansion.)
* PF performs requested action and returns status.
+ * NOTE: The VF is not required to enable/disable all queues in a single
+ * request. It may send multiple messages.
+ * PF drivers must correctly handle all VF requests.
*/
struct virtchnl_queue_select {
u16 vsi_id;
@@ -965,8 +964,12 @@ enum virtchnl_flow_type {
struct virtchnl_filter {
union virtchnl_flow_spec data;
union virtchnl_flow_spec mask;
- enum virtchnl_flow_type flow_type;
- enum virtchnl_action action;
+
+ /* see enum virtchnl_flow_type */
+ s32 flow_type;
+
+ /* see enum virtchnl_action */
+ s32 action;
u32 action_meta;
u8 field_flags;
u8 pad[3];
@@ -994,7 +997,8 @@ enum virtchnl_event_codes {
#define PF_EVENT_SEVERITY_CERTAIN_DOOM 255
struct virtchnl_pf_event {
- enum virtchnl_event_codes event;
+ /* see enum virtchnl_event_codes */
+ s32 event;
union {
/* If the PF driver does not support the new speed reporting
* capabilities then use link_event else use link_event_adv to
@@ -1007,6 +1011,7 @@ struct virtchnl_pf_event {
struct {
enum virtchnl_link_speed link_speed;
bool link_status;
+ u8 pad[3];
} link_event;
struct {
/* link_speed provided in Mbps */
@@ -1016,39 +1021,41 @@ struct virtchnl_pf_event {
} link_event_adv;
} event_data;
- int severity;
+ s32 severity;
};
VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_pf_event);
-/* VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP
- * VF uses this message to request PF to map IWARP vectors to IWARP queues.
- * The request for this originates from the VF IWARP driver through
- * a client interface between VF LAN and VF IWARP driver.
+/* used to specify if a ceq_idx or aeq_idx is invalid */
+#define VIRTCHNL_RDMA_INVALID_QUEUE_IDX 0xFFFF
+/* VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP
+ * VF uses this message to request PF to map RDMA vectors to RDMA queues.
+ * The request for this originates from the VF RDMA driver through
+ * a client interface between VF LAN and VF RDMA driver.
* A vector could have an AEQ and CEQ attached to it although
- * there is a single AEQ per VF IWARP instance in which case
- * most vectors will have an INVALID_IDX for aeq and valid idx for ceq.
- * There will never be a case where there will be multiple CEQs attached
- * to a single vector.
+ * there is a single AEQ per VF RDMA instance in which case
+ * most vectors will have an VIRTCHNL_RDMA_INVALID_QUEUE_IDX for aeq and valid
+ * idx for ceqs There will never be a case where there will be multiple CEQs
+ * attached to a single vector.
* PF configures interrupt mapping and returns status.
*/
-struct virtchnl_iwarp_qv_info {
+struct virtchnl_rdma_qv_info {
u32 v_idx; /* msix_vector */
- u16 ceq_idx;
- u16 aeq_idx;
+ u16 ceq_idx; /* set to VIRTCHNL_RDMA_INVALID_QUEUE_IDX if invalid */
+ u16 aeq_idx; /* set to VIRTCHNL_RDMA_INVALID_QUEUE_IDX if invalid */
u8 itr_idx;
u8 pad[3];
};
-VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_iwarp_qv_info);
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_rdma_qv_info);
-struct virtchnl_iwarp_qvlist_info {
+struct virtchnl_rdma_qvlist_info {
u32 num_vectors;
- struct virtchnl_iwarp_qv_info qv_info[1];
+ struct virtchnl_rdma_qv_info qv_info[1];
};
-VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_iwarp_qvlist_info);
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_rdma_qvlist_info);
/* VF reset states - these are written into the RSTAT register:
* VFGEN_RSTAT on the VF
@@ -1107,7 +1114,7 @@ enum virtchnl_rss_algorithm {
#define VIRTCHNL_GET_PROTO_HDR_TYPE(hdr) \
(((hdr)->type) >> PROTO_HDR_SHIFT)
#define VIRTCHNL_TEST_PROTO_HDR_TYPE(hdr, val) \
- ((hdr)->type == ((val) >> PROTO_HDR_SHIFT))
+ ((hdr)->type == ((s32)((val) >> PROTO_HDR_SHIFT)))
#define VIRTCHNL_TEST_PROTO_HDR(hdr, val) \
(VIRTCHNL_TEST_PROTO_HDR_TYPE((hdr), (val)) && \
VIRTCHNL_TEST_PROTO_HDR_FIELD((hdr), (val)))
@@ -1203,7 +1210,8 @@ enum virtchnl_proto_hdr_field {
};
struct virtchnl_proto_hdr {
- enum virtchnl_proto_hdr_type type;
+ /* see enum virtchnl_proto_hdr_type */
+ s32 type;
u32 field_selector; /* a bit mask to select field for header type */
u8 buffer[64];
/**
@@ -1233,15 +1241,18 @@ VIRTCHNL_CHECK_STRUCT_LEN(2312, virtchnl_proto_hdrs);
struct virtchnl_rss_cfg {
struct virtchnl_proto_hdrs proto_hdrs; /* protocol headers */
- enum virtchnl_rss_algorithm rss_algorithm; /* RSS algorithm type */
- u8 reserved[128]; /* reserve for future */
+
+ /* see enum virtchnl_rss_algorithm; rss algorithm type */
+ s32 rss_algorithm;
+ u8 reserved[128]; /* reserve for future */
};
VIRTCHNL_CHECK_STRUCT_LEN(2444, virtchnl_rss_cfg);
/* action configuration for FDIR */
struct virtchnl_filter_action {
- enum virtchnl_action type;
+ /* see enum virtchnl_action type */
+ s32 type;
union {
/* used for queue and qgroup action */
struct {
@@ -1283,7 +1294,7 @@ VIRTCHNL_CHECK_STRUCT_LEN(2604, virtchnl_fdir_rule);
/* Status returned to VF after VF requests FDIR commands
* VIRTCHNL_FDIR_SUCCESS
* VF FDIR related request is successfully done by PF
- * The request can be OP_ADD/DEL.
+ * The request can be OP_ADD/DEL/QUERY_FDIR_FILTER.
*
* VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE
* OP_ADD_FDIR_FILTER request is failed due to no Hardware resource.
@@ -1304,6 +1315,10 @@ VIRTCHNL_CHECK_STRUCT_LEN(2604, virtchnl_fdir_rule);
* VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT
* OP_ADD/DEL_FDIR_FILTER request is failed due to timing out
* for programming.
+ *
+ * VIRTCHNL_FDIR_FAILURE_QUERY_INVALID
+ * OP_QUERY_FDIR_FILTER request is failed due to parameters validation,
+ * for example, VF query counter of a rule who has no counter action.
*/
enum virtchnl_fdir_prgm_status {
VIRTCHNL_FDIR_SUCCESS = 0,
@@ -1313,6 +1328,7 @@ enum virtchnl_fdir_prgm_status {
VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST,
VIRTCHNL_FDIR_FAILURE_RULE_INVALID,
VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT,
+ VIRTCHNL_FDIR_FAILURE_QUERY_INVALID,
};
/* VIRTCHNL_OP_ADD_FDIR_FILTER
@@ -1329,7 +1345,9 @@ struct virtchnl_fdir_add {
u16 validate_only; /* INPUT */
u32 flow_id; /* OUTPUT */
struct virtchnl_fdir_rule rule_cfg; /* INPUT */
- enum virtchnl_fdir_prgm_status status; /* OUTPUT */
+
+ /* see enum virtchnl_fdir_prgm_status; OUTPUT */
+ s32 status;
};
VIRTCHNL_CHECK_STRUCT_LEN(2616, virtchnl_fdir_add);
@@ -1342,7 +1360,9 @@ struct virtchnl_fdir_del {
u16 vsi_id; /* INPUT */
u16 pad;
u32 flow_id; /* INPUT */
- enum virtchnl_fdir_prgm_status status; /* OUTPUT */
+
+ /* see enum virtchnl_fdir_prgm_status; OUTPUT */
+ s32 status;
};
VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_fdir_del);
@@ -1361,7 +1381,7 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
u8 *msg, u16 msglen)
{
bool err_msg_format = false;
- int valid_len = 0;
+ u32 valid_len = 0;
/* Validate message length. */
switch (v_opcode) {
@@ -1436,7 +1456,7 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
case VIRTCHNL_OP_GET_STATS:
valid_len = sizeof(struct virtchnl_queue_select);
break;
- case VIRTCHNL_OP_IWARP:
+ case VIRTCHNL_OP_RDMA:
/* These messages are opaque to us and will be validated in
* the RDMA client code. We just need to check for nonzero
* length. The firmware will enforce max length restrictions.
@@ -1446,19 +1466,16 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
else
err_msg_format = true;
break;
- case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP:
+ case VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP:
break;
- case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
- valid_len = sizeof(struct virtchnl_iwarp_qvlist_info);
+ case VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP:
+ valid_len = sizeof(struct virtchnl_rdma_qvlist_info);
if (msglen >= valid_len) {
- struct virtchnl_iwarp_qvlist_info *qv =
- (struct virtchnl_iwarp_qvlist_info *)msg;
- if (qv->num_vectors == 0) {
- err_msg_format = true;
- break;
- }
+ struct virtchnl_rdma_qvlist_info *qv =
+ (struct virtchnl_rdma_qvlist_info *)msg;
+
valid_len += ((qv->num_vectors - 1) *
- sizeof(struct virtchnl_iwarp_qv_info));
+ sizeof(struct virtchnl_rdma_qv_info));
}
break;
case VIRTCHNL_OP_CONFIG_RSS_KEY:
@@ -1502,8 +1519,6 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
case VIRTCHNL_OP_DISABLE_CHANNELS:
break;
case VIRTCHNL_OP_ADD_CLOUD_FILTER:
- valid_len = sizeof(struct virtchnl_filter);
- break;
case VIRTCHNL_OP_DEL_CLOUD_FILTER:
valid_len = sizeof(struct virtchnl_filter);
break;
diff --git a/include/linux/bitfield.h b/include/linux/bitfield.h
index c9be1657f03d..ebfa12f69501 100644
--- a/include/linux/bitfield.h
+++ b/include/linux/bitfield.h
@@ -115,6 +115,32 @@
((typeof(_mask))(_val) << __bf_shf(_mask)) & (_mask); \
})
+#define __BF_CHECK_POW2(n) BUILD_BUG_ON_ZERO(((n) & ((n) - 1)) != 0)
+
+/**
+ * FIELD_PREP_CONST() - prepare a constant bitfield element
+ * @_mask: shifted mask defining the field's length and position
+ * @_val: value to put in the field
+ *
+ * FIELD_PREP_CONST() masks and shifts up the value. The result should
+ * be combined with other fields of the bitfield using logical OR.
+ *
+ * Unlike FIELD_PREP() this is a constant expression and can therefore
+ * be used in initializers. Error checking is less comfortable for this
+ * version, and non-constant masks cannot be used.
+ */
+#define FIELD_PREP_CONST(_mask, _val) \
+ ( \
+ /* mask must be non-zero */ \
+ BUILD_BUG_ON_ZERO((_mask) == 0) + \
+ /* check if value fits */ \
+ BUILD_BUG_ON_ZERO(~((_mask) >> __bf_shf(_mask)) & (_val)) + \
+ /* check if mask is contiguous */ \
+ __BF_CHECK_POW2((_mask) + (1ULL << __bf_shf(_mask))) + \
+ /* and create the value */ \
+ (((typeof(_mask))(_val) << __bf_shf(_mask)) & (_mask)) \
+ )
+
/**
* FIELD_GET() - extract a bitfield element
* @_mask: shifted mask defining the field's length and position
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index b9637d63e6f0..41a41561b773 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -556,6 +556,7 @@ struct request_queue {
#define QUEUE_FLAG_IO_STAT 7 /* do disk/partitions IO accounting */
#define QUEUE_FLAG_NOXMERGES 9 /* No extended merges */
#define QUEUE_FLAG_ADD_RANDOM 10 /* Contributes to random pool */
+#define QUEUE_FLAG_SYNCHRONOUS 11 /* always completes in submit context */
#define QUEUE_FLAG_SAME_FORCE 12 /* force complete on same CPU */
#define QUEUE_FLAG_INIT_DONE 14 /* queue is initialized */
#define QUEUE_FLAG_STABLE_WRITES 15 /* don't modify blks until WB is done */
@@ -1253,6 +1254,12 @@ static inline bool bdev_nonrot(struct block_device *bdev)
return blk_queue_nonrot(bdev_get_queue(bdev));
}
+static inline bool bdev_synchronous(struct block_device *bdev)
+{
+ return test_bit(QUEUE_FLAG_SYNCHRONOUS,
+ &bdev_get_queue(bdev)->queue_flags);
+}
+
static inline bool bdev_stable_writes(struct block_device *bdev)
{
return test_bit(QUEUE_FLAG_STABLE_WRITES,
@@ -1397,7 +1404,6 @@ struct block_device_operations {
unsigned int flags);
int (*open) (struct block_device *, fmode_t);
void (*release) (struct gendisk *, fmode_t);
- int (*rw_page)(struct block_device *, sector_t, struct page *, enum req_op);
int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
unsigned int (*check_events) (struct gendisk *disk,
@@ -1432,10 +1438,6 @@ extern int blkdev_compat_ptr_ioctl(struct block_device *, fmode_t,
#define blkdev_compat_ptr_ioctl NULL
#endif
-extern int bdev_read_page(struct block_device *, sector_t, struct page *);
-extern int bdev_write_page(struct block_device *, sector_t, struct page *,
- struct writeback_control *);
-
static inline void blk_wake_io_task(struct task_struct *waiter)
{
/*
diff --git a/include/linux/bootconfig.h b/include/linux/bootconfig.h
index 1611f9db878e..ca73940e26df 100644
--- a/include/linux/bootconfig.h
+++ b/include/linux/bootconfig.h
@@ -59,7 +59,7 @@ struct xbc_node {
/* Maximum size of boot config is 32KB - 1 */
#define XBC_DATA_MAX (XBC_VALUE - 1)
-#define XBC_NODE_MAX 1024
+#define XBC_NODE_MAX 8192
#define XBC_KEYLEN_MAX 256
#define XBC_DEPTH_MAX 16
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 634d37a599fa..520b238abd5a 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -28,6 +28,7 @@
#include <linux/btf.h>
#include <linux/rcupdate_trace.h>
#include <linux/static_call.h>
+#include <linux/memcontrol.h>
struct bpf_verifier_env;
struct bpf_verifier_log;
@@ -180,6 +181,10 @@ enum btf_field_type {
BPF_KPTR = BPF_KPTR_UNREF | BPF_KPTR_REF,
BPF_LIST_HEAD = (1 << 4),
BPF_LIST_NODE = (1 << 5),
+ BPF_RB_ROOT = (1 << 6),
+ BPF_RB_NODE = (1 << 7),
+ BPF_GRAPH_NODE_OR_ROOT = BPF_LIST_NODE | BPF_LIST_HEAD |
+ BPF_RB_NODE | BPF_RB_ROOT,
};
struct btf_field_kptr {
@@ -189,7 +194,7 @@ struct btf_field_kptr {
u32 btf_id;
};
-struct btf_field_list_head {
+struct btf_field_graph_root {
struct btf *btf;
u32 value_btf_id;
u32 node_offset;
@@ -201,7 +206,7 @@ struct btf_field {
enum btf_field_type type;
union {
struct btf_field_kptr kptr;
- struct btf_field_list_head list_head;
+ struct btf_field_graph_root graph_root;
};
};
@@ -283,6 +288,10 @@ static inline const char *btf_field_type_name(enum btf_field_type type)
return "bpf_list_head";
case BPF_LIST_NODE:
return "bpf_list_node";
+ case BPF_RB_ROOT:
+ return "bpf_rb_root";
+ case BPF_RB_NODE:
+ return "bpf_rb_node";
default:
WARN_ON_ONCE(1);
return "unknown";
@@ -303,6 +312,10 @@ static inline u32 btf_field_type_size(enum btf_field_type type)
return sizeof(struct bpf_list_head);
case BPF_LIST_NODE:
return sizeof(struct bpf_list_node);
+ case BPF_RB_ROOT:
+ return sizeof(struct bpf_rb_root);
+ case BPF_RB_NODE:
+ return sizeof(struct bpf_rb_node);
default:
WARN_ON_ONCE(1);
return 0;
@@ -323,6 +336,10 @@ static inline u32 btf_field_type_align(enum btf_field_type type)
return __alignof__(struct bpf_list_head);
case BPF_LIST_NODE:
return __alignof__(struct bpf_list_node);
+ case BPF_RB_ROOT:
+ return __alignof__(struct bpf_rb_root);
+ case BPF_RB_NODE:
+ return __alignof__(struct bpf_rb_node);
default:
WARN_ON_ONCE(1);
return 0;
@@ -346,6 +363,13 @@ static inline void bpf_obj_init(const struct btf_field_offs *foffs, void *obj)
memset(obj + foffs->field_off[i], 0, foffs->field_sz[i]);
}
+/* 'dst' must be a temporary buffer and should not point to memory that is being
+ * used in parallel by a bpf program or bpf syscall, otherwise the access from
+ * the bpf program or bpf syscall may be corrupted by the reinitialization,
+ * leading to weird problems. Even 'dst' is newly-allocated from bpf memory
+ * allocator, it is still possible for 'dst' to be used in parallel by a bpf
+ * program or bpf syscall.
+ */
static inline void check_and_init_map_value(struct bpf_map *map, void *dst)
{
bpf_obj_init(map->field_offs, dst);
@@ -433,6 +457,9 @@ void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
void bpf_timer_cancel_and_free(void *timer);
void bpf_list_head_free(const struct btf_field *field, void *list_head,
struct bpf_spin_lock *spin_lock);
+void bpf_rb_root_free(const struct btf_field *field, void *rb_root,
+ struct bpf_spin_lock *spin_lock);
+
int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size);
@@ -575,6 +602,11 @@ enum bpf_type_flag {
/* MEM is tagged with rcu and memory access needs rcu_read_lock protection. */
MEM_RCU = BIT(13 + BPF_BASE_TYPE_BITS),
+ /* Used to tag PTR_TO_BTF_ID | MEM_ALLOC references which are non-owning.
+ * Currently only valid for linked-list and rbtree nodes.
+ */
+ NON_OWN_REF = BIT(14 + BPF_BASE_TYPE_BITS),
+
__BPF_TYPE_FLAG_MAX,
__BPF_TYPE_LAST_FLAG = __BPF_TYPE_FLAG_MAX - 1,
};
@@ -899,8 +931,12 @@ enum bpf_cgroup_storage_type {
/* The argument is a structure. */
#define BTF_FMODEL_STRUCT_ARG BIT(0)
+/* The argument is signed. */
+#define BTF_FMODEL_SIGNED_ARG BIT(1)
+
struct btf_func_model {
u8 ret_size;
+ u8 ret_flags;
u8 nr_args;
u8 arg_size[MAX_BPF_FUNC_ARGS];
u8 arg_flags[MAX_BPF_FUNC_ARGS];
@@ -939,7 +975,13 @@ struct btf_func_model {
/* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50
* bytes on x86.
*/
-#define BPF_MAX_TRAMP_LINKS 38
+enum {
+#if defined(__s390x__)
+ BPF_MAX_TRAMP_LINKS = 27,
+#else
+ BPF_MAX_TRAMP_LINKS = 38,
+#endif
+};
struct bpf_tramp_links {
struct bpf_tramp_link *links[BPF_MAX_TRAMP_LINKS];
@@ -1261,7 +1303,8 @@ struct bpf_prog_aux {
enum bpf_prog_type saved_dst_prog_type;
enum bpf_attach_type saved_dst_attach_type;
bool verifier_zext; /* Zero extensions has been inserted by verifier. */
- bool offload_requested;
+ bool dev_bound; /* Program is bound to the netdev. */
+ bool offload_requested; /* Program is bound and offloaded to the netdev. */
bool attach_btf_trace; /* true if attaching to BTF-enabled raw tp */
bool func_proto_unreliable;
bool sleepable;
@@ -1421,7 +1464,8 @@ struct bpf_struct_ops {
const struct bpf_verifier_ops *verifier_ops;
int (*init)(struct btf *btf);
int (*check_member)(const struct btf_type *t,
- const struct btf_member *member);
+ const struct btf_member *member,
+ const struct bpf_prog *prog);
int (*init_member)(const struct btf_type *t,
const struct btf_member *member,
void *kdata, const void *udata);
@@ -1472,6 +1516,7 @@ struct bpf_dummy_ops {
int (*test_1)(struct bpf_dummy_ops_state *cb);
int (*test_2)(struct bpf_dummy_ops_state *cb, int a1, unsigned short a2,
char a3, unsigned long a4);
+ int (*test_sleepable)(struct bpf_dummy_ops_state *cb);
};
int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr,
@@ -1523,9 +1568,9 @@ struct bpf_array {
u32 index_mask;
struct bpf_array_aux *aux;
union {
- char value[0] __aligned(8);
- void *ptrs[0] __aligned(8);
- void __percpu *pptrs[0] __aligned(8);
+ DECLARE_FLEX_ARRAY(char, value) __aligned(8);
+ DECLARE_FLEX_ARRAY(void *, ptrs) __aligned(8);
+ DECLARE_FLEX_ARRAY(void __percpu *, pptrs) __aligned(8);
};
};
@@ -1833,7 +1878,7 @@ struct bpf_prog * __must_check bpf_prog_inc_not_zero(struct bpf_prog *prog);
void bpf_prog_put(struct bpf_prog *prog);
void bpf_prog_free_id(struct bpf_prog *prog);
-void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock);
+void bpf_map_free_id(struct bpf_map *map);
struct btf_field *btf_record_find(const struct btf_record *rec,
u32 offset, enum btf_field_type type);
@@ -1873,6 +1918,8 @@ struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id);
void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
int node);
void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags);
+void *bpf_map_kvcalloc(struct bpf_map *map, size_t n, size_t size,
+ gfp_t flags);
void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size,
size_t align, gfp_t flags);
#else
@@ -1889,6 +1936,12 @@ bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags)
return kzalloc(size, flags);
}
+static inline void *
+bpf_map_kvcalloc(struct bpf_map *map, size_t n, size_t size, gfp_t flags)
+{
+ return kvcalloc(n, size, flags);
+}
+
static inline void __percpu *
bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, size_t align,
gfp_t flags)
@@ -2186,6 +2239,14 @@ struct bpf_core_ctx {
const struct btf *btf;
};
+bool btf_nested_type_is_trusted(struct bpf_verifier_log *log,
+ const struct bpf_reg_state *reg,
+ int off);
+
+bool btf_type_ids_nocast_alias(struct bpf_verifier_log *log,
+ const struct btf *reg_btf, u32 reg_id,
+ const struct btf *arg_btf, u32 arg_id);
+
int bpf_core_apply(struct bpf_core_ctx *ctx, const struct bpf_core_relo *relo,
int relo_idx, void *insn);
@@ -2451,7 +2512,7 @@ void __bpf_free_used_maps(struct bpf_prog_aux *aux,
bool bpf_prog_get_ok(struct bpf_prog *, enum bpf_prog_type *, bool);
int bpf_prog_offload_compile(struct bpf_prog *prog);
-void bpf_prog_offload_destroy(struct bpf_prog *prog);
+void bpf_prog_dev_bound_destroy(struct bpf_prog *prog);
int bpf_prog_offload_info_fill(struct bpf_prog_info *info,
struct bpf_prog *prog);
@@ -2479,14 +2540,26 @@ bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev);
void unpriv_ebpf_notify(int new_state);
#if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL)
-int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr);
+int bpf_dev_bound_kfunc_check(struct bpf_verifier_log *log,
+ struct bpf_prog_aux *prog_aux);
+void *bpf_dev_bound_resolve_kfunc(struct bpf_prog *prog, u32 func_id);
+int bpf_prog_dev_bound_init(struct bpf_prog *prog, union bpf_attr *attr);
+int bpf_prog_dev_bound_inherit(struct bpf_prog *new_prog, struct bpf_prog *old_prog);
+void bpf_dev_bound_netdev_unregister(struct net_device *dev);
static inline bool bpf_prog_is_dev_bound(const struct bpf_prog_aux *aux)
{
+ return aux->dev_bound;
+}
+
+static inline bool bpf_prog_is_offloaded(const struct bpf_prog_aux *aux)
+{
return aux->offload_requested;
}
-static inline bool bpf_map_is_dev_bound(struct bpf_map *map)
+bool bpf_prog_dev_bound_match(const struct bpf_prog *lhs, const struct bpf_prog *rhs);
+
+static inline bool bpf_map_is_offloaded(struct bpf_map *map)
{
return unlikely(map->ops == &bpf_map_offload_ops);
}
@@ -2507,18 +2580,50 @@ void sock_map_unhash(struct sock *sk);
void sock_map_destroy(struct sock *sk);
void sock_map_close(struct sock *sk, long timeout);
#else
-static inline int bpf_prog_offload_init(struct bpf_prog *prog,
- union bpf_attr *attr)
+static inline int bpf_dev_bound_kfunc_check(struct bpf_verifier_log *log,
+ struct bpf_prog_aux *prog_aux)
{
return -EOPNOTSUPP;
}
-static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux)
+static inline void *bpf_dev_bound_resolve_kfunc(struct bpf_prog *prog,
+ u32 func_id)
+{
+ return NULL;
+}
+
+static inline int bpf_prog_dev_bound_init(struct bpf_prog *prog,
+ union bpf_attr *attr)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int bpf_prog_dev_bound_inherit(struct bpf_prog *new_prog,
+ struct bpf_prog *old_prog)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void bpf_dev_bound_netdev_unregister(struct net_device *dev)
+{
+}
+
+static inline bool bpf_prog_is_dev_bound(const struct bpf_prog_aux *aux)
+{
+ return false;
+}
+
+static inline bool bpf_prog_is_offloaded(struct bpf_prog_aux *aux)
+{
+ return false;
+}
+
+static inline bool bpf_prog_dev_bound_match(const struct bpf_prog *lhs, const struct bpf_prog *rhs)
{
return false;
}
-static inline bool bpf_map_is_dev_bound(struct bpf_map *map)
+static inline bool bpf_map_is_offloaded(struct bpf_map *map)
{
return false;
}
@@ -2795,10 +2900,18 @@ struct btf_id_set;
bool btf_id_set_contains(const struct btf_id_set *set, u32 id);
#define MAX_BPRINTF_VARARGS 12
+#define MAX_BPRINTF_BUF 1024
+
+struct bpf_bprintf_data {
+ u32 *bin_args;
+ char *buf;
+ bool get_bin_args;
+ bool get_buf;
+};
int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args,
- u32 **bin_buf, u32 num_args);
-void bpf_bprintf_cleanup(void);
+ u32 num_args, struct bpf_bprintf_data *data);
+void bpf_bprintf_cleanup(struct bpf_bprintf_data *data);
/* the implementation of the opaque uapi struct bpf_dynptr */
struct bpf_dynptr_kern {
@@ -2852,4 +2965,11 @@ static inline bool type_is_alloc(u32 type)
return type & MEM_ALLOC;
}
+static inline gfp_t bpf_memcg_flags(gfp_t flags)
+{
+ if (memcg_bpf_enabled())
+ return flags | __GFP_ACCOUNT;
+ return flags;
+}
+
#endif /* _LINUX_BPF_H */
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 53d175cbaa02..cf1bb1cf4a7b 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -43,6 +43,22 @@ enum bpf_reg_liveness {
REG_LIVE_DONE = 0x8, /* liveness won't be updating this register anymore */
};
+/* For every reg representing a map value or allocated object pointer,
+ * we consider the tuple of (ptr, id) for them to be unique in verifier
+ * context and conside them to not alias each other for the purposes of
+ * tracking lock state.
+ */
+struct bpf_active_lock {
+ /* This can either be reg->map_ptr or reg->btf. If ptr is NULL,
+ * there's no active lock held, and other fields have no
+ * meaning. If non-NULL, it indicates that a lock is held and
+ * id member has the reg->id of the register which can be >= 0.
+ */
+ void *ptr;
+ /* This will be reg->id */
+ u32 id;
+};
+
struct bpf_reg_state {
/* Ordering of fields matters. See states_equal() */
enum bpf_reg_type type;
@@ -70,7 +86,10 @@ struct bpf_reg_state {
u32 btf_id;
};
- u32 mem_size; /* for PTR_TO_MEM | PTR_TO_MEM_OR_NULL */
+ struct { /* for PTR_TO_MEM | PTR_TO_MEM_OR_NULL */
+ u32 mem_size;
+ u32 dynptr_id; /* for dynptr slices */
+ };
/* For dynptr stack slots */
struct {
@@ -92,6 +111,26 @@ struct bpf_reg_state {
u32 subprogno; /* for PTR_TO_FUNC */
};
+ /* For scalar types (SCALAR_VALUE), this represents our knowledge of
+ * the actual value.
+ * For pointer types, this represents the variable part of the offset
+ * from the pointed-to object, and is shared with all bpf_reg_states
+ * with the same id as us.
+ */
+ struct tnum var_off;
+ /* Used to determine if any memory access using this register will
+ * result in a bad access.
+ * These refer to the same value as var_off, not necessarily the actual
+ * contents of the register.
+ */
+ s64 smin_value; /* minimum possible (s64)value */
+ s64 smax_value; /* maximum possible (s64)value */
+ u64 umin_value; /* minimum possible (u64)value */
+ u64 umax_value; /* maximum possible (u64)value */
+ s32 s32_min_value; /* minimum possible (s32)value */
+ s32 s32_max_value; /* maximum possible (s32)value */
+ u32 u32_min_value; /* minimum possible (u32)value */
+ u32 u32_max_value; /* maximum possible (u32)value */
/* For PTR_TO_PACKET, used to find other pointers with the same variable
* offset, so they can share range knowledge.
* For PTR_TO_MAP_VALUE_OR_NULL this is used to share which map value we
@@ -144,26 +183,6 @@ struct bpf_reg_state {
* allowed and has the same effect as bpf_sk_release(sk).
*/
u32 ref_obj_id;
- /* For scalar types (SCALAR_VALUE), this represents our knowledge of
- * the actual value.
- * For pointer types, this represents the variable part of the offset
- * from the pointed-to object, and is shared with all bpf_reg_states
- * with the same id as us.
- */
- struct tnum var_off;
- /* Used to determine if any memory access using this register will
- * result in a bad access.
- * These refer to the same value as var_off, not necessarily the actual
- * contents of the register.
- */
- s64 smin_value; /* minimum possible (s64)value */
- s64 smax_value; /* maximum possible (s64)value */
- u64 umin_value; /* minimum possible (u64)value */
- u64 umax_value; /* maximum possible (u64)value */
- s32 s32_min_value; /* minimum possible (s32)value */
- s32 s32_max_value; /* maximum possible (s32)value */
- u32 u32_min_value; /* minimum possible (u32)value */
- u32 u32_max_value; /* maximum possible (u32)value */
/* parentage chain for liveness checking */
struct bpf_reg_state *parent;
/* Inside the callee two registers can be both PTR_TO_STACK like
@@ -223,11 +242,6 @@ struct bpf_reference_state {
* exiting a callback function.
*/
int callback_ref;
- /* Mark the reference state to release the registers sharing the same id
- * on bpf_spin_unlock (for nodes that we will lose ownership to but are
- * safe to access inside the critical section).
- */
- bool release_on_unlock;
};
/* state of the program:
@@ -328,21 +342,8 @@ struct bpf_verifier_state {
u32 branches;
u32 insn_idx;
u32 curframe;
- /* For every reg representing a map value or allocated object pointer,
- * we consider the tuple of (ptr, id) for them to be unique in verifier
- * context and conside them to not alias each other for the purposes of
- * tracking lock state.
- */
- struct {
- /* This can either be reg->map_ptr or reg->btf. If ptr is NULL,
- * there's no active lock held, and other fields have no
- * meaning. If non-NULL, it indicates that a lock is held and
- * id member has the reg->id of the register which can be >= 0.
- */
- void *ptr;
- /* This will be reg->id */
- u32 id;
- } active_lock;
+
+ struct bpf_active_lock active_lock;
bool speculative;
bool active_rcu_lock;
diff --git a/include/linux/btf.h b/include/linux/btf.h
index 5f628f323442..49e0fe6d8274 100644
--- a/include/linux/btf.h
+++ b/include/linux/btf.h
@@ -73,6 +73,14 @@
#define KF_RCU (1 << 7) /* kfunc only takes rcu pointer arguments */
/*
+ * Tag marking a kernel function as a kfunc. This is meant to minimize the
+ * amount of copy-paste that kfunc authors have to include for correctness so
+ * as to avoid issues such as the compiler inlining or eliding either a static
+ * kfunc, or a global kfunc in an LTO build.
+ */
+#define __bpf_kfunc __used noinline
+
+/*
* Return the name of the passed struct, if exists, or halt the build if for
* example the structure gets renamed. In this way, developers have to revisit
* the code using that structure name, and update it accordingly.
@@ -236,6 +244,16 @@ static inline bool btf_type_is_small_int(const struct btf_type *t)
return btf_type_is_int(t) && t->size <= sizeof(u64);
}
+static inline u8 btf_int_encoding(const struct btf_type *t)
+{
+ return BTF_INT_ENCODING(*(u32 *)(t + 1));
+}
+
+static inline bool btf_type_is_signed_int(const struct btf_type *t)
+{
+ return btf_type_is_int(t) && (btf_int_encoding(t) & BTF_INT_SIGNED);
+}
+
static inline bool btf_type_is_enum(const struct btf_type *t)
{
return BTF_INFO_KIND(t->info) == BTF_KIND_ENUM;
@@ -306,11 +324,6 @@ static inline u8 btf_int_offset(const struct btf_type *t)
return BTF_INT_OFFSET(*(u32 *)(t + 1));
}
-static inline u8 btf_int_encoding(const struct btf_type *t)
-{
- return BTF_INT_ENCODING(*(u32 *)(t + 1));
-}
-
static inline bool btf_type_is_scalar(const struct btf_type *t)
{
return btf_type_is_int(t) || btf_type_is_enum(t);
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index 33fa5e94aa80..8f14dca5fed7 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -61,7 +61,10 @@ typedef void (bh_end_io_t)(struct buffer_head *bh, int uptodate);
struct buffer_head {
unsigned long b_state; /* buffer state bitmap (see above) */
struct buffer_head *b_this_page;/* circular list of page's buffers */
- struct page *b_page; /* the page this bh is mapped to */
+ union {
+ struct page *b_page; /* the page this bh is mapped to */
+ struct folio *b_folio; /* the folio this bh is mapped to */
+ };
sector_t b_blocknr; /* start block number */
size_t b_size; /* size of mapping */
diff --git a/include/linux/cacheinfo.h b/include/linux/cacheinfo.h
index 00b7a6ae8617..908e19d17f49 100644
--- a/include/linux/cacheinfo.h
+++ b/include/linux/cacheinfo.h
@@ -80,26 +80,31 @@ struct cpu_cacheinfo {
struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu);
int init_cache_level(unsigned int cpu);
+int init_of_cache_level(unsigned int cpu);
int populate_cache_leaves(unsigned int cpu);
int cache_setup_acpi(unsigned int cpu);
bool last_level_cache_is_valid(unsigned int cpu);
bool last_level_cache_is_shared(unsigned int cpu_x, unsigned int cpu_y);
+int fetch_cache_info(unsigned int cpu);
int detect_cache_attributes(unsigned int cpu);
#ifndef CONFIG_ACPI_PPTT
/*
- * acpi_find_last_cache_level is only called on ACPI enabled
+ * acpi_get_cache_info() is only called on ACPI enabled
* platforms using the PPTT for topology. This means that if
* the platform supports other firmware configuration methods
* we need to stub out the call when ACPI is disabled.
* ACPI enabled platforms not using PPTT won't be making calls
* to this function so we need not worry about them.
*/
-static inline int acpi_find_last_cache_level(unsigned int cpu)
+static inline
+int acpi_get_cache_info(unsigned int cpu,
+ unsigned int *levels, unsigned int *split_levels)
{
- return 0;
+ return -ENOENT;
}
#else
-int acpi_find_last_cache_level(unsigned int cpu);
+int acpi_get_cache_info(unsigned int cpu,
+ unsigned int *levels, unsigned int *split_levels);
#endif
const struct attribute_group *cache_get_priv_group(struct cacheinfo *this_leaf);
diff --git a/include/linux/can/bittiming.h b/include/linux/can/bittiming.h
index ef0a77173e3c..9b8a9c39614b 100644
--- a/include/linux/can/bittiming.h
+++ b/include/linux/can/bittiming.h
@@ -116,7 +116,7 @@ struct can_tdc_const {
#ifdef CONFIG_CAN_CALC_BITTIMING
int can_calc_bittiming(const struct net_device *dev, struct can_bittiming *bt,
- const struct can_bittiming_const *btc);
+ const struct can_bittiming_const *btc, struct netlink_ext_ack *extack);
void can_calc_tdco(struct can_tdc *tdc, const struct can_tdc_const *tdc_const,
const struct can_bittiming *dbt,
@@ -124,7 +124,7 @@ void can_calc_tdco(struct can_tdc *tdc, const struct can_tdc_const *tdc_const,
#else /* !CONFIG_CAN_CALC_BITTIMING */
static inline int
can_calc_bittiming(const struct net_device *dev, struct can_bittiming *bt,
- const struct can_bittiming_const *btc)
+ const struct can_bittiming_const *btc, struct netlink_ext_ack *extack)
{
netdev_err(dev, "bit-timing calculation not available\n");
return -EINVAL;
@@ -138,10 +138,16 @@ can_calc_tdco(struct can_tdc *tdc, const struct can_tdc_const *tdc_const,
}
#endif /* CONFIG_CAN_CALC_BITTIMING */
+void can_sjw_set_default(struct can_bittiming *bt);
+
+int can_sjw_check(const struct net_device *dev, const struct can_bittiming *bt,
+ const struct can_bittiming_const *btc, struct netlink_ext_ack *extack);
+
int can_get_bittiming(const struct net_device *dev, struct can_bittiming *bt,
const struct can_bittiming_const *btc,
const u32 *bitrate_const,
- const unsigned int bitrate_const_cnt);
+ const unsigned int bitrate_const_cnt,
+ struct netlink_ext_ack *extack);
/*
* can_bit_time() - Duration of one bit
diff --git a/include/linux/capability.h b/include/linux/capability.h
index 03c2a613ad40..0c356a517991 100644
--- a/include/linux/capability.h
+++ b/include/linux/capability.h
@@ -15,28 +15,25 @@
#include <uapi/linux/capability.h>
#include <linux/uidgid.h>
+#include <linux/bits.h>
#define _KERNEL_CAPABILITY_VERSION _LINUX_CAPABILITY_VERSION_3
-#define _KERNEL_CAPABILITY_U32S _LINUX_CAPABILITY_U32S_3
extern int file_caps_enabled;
-typedef struct kernel_cap_struct {
- __u32 cap[_KERNEL_CAPABILITY_U32S];
-} kernel_cap_t;
+typedef struct { u64 val; } kernel_cap_t;
/* same as vfs_ns_cap_data but in cpu endian and always filled completely */
struct cpu_vfs_cap_data {
__u32 magic_etc;
+ kuid_t rootid;
kernel_cap_t permitted;
kernel_cap_t inheritable;
- kuid_t rootid;
};
#define _USER_CAP_HEADER_SIZE (sizeof(struct __user_cap_header_struct))
#define _KERNEL_CAP_T_SIZE (sizeof(kernel_cap_t))
-
struct file;
struct inode;
struct dentry;
@@ -44,16 +41,6 @@ struct task_struct;
struct user_namespace;
struct mnt_idmap;
-extern const kernel_cap_t __cap_empty_set;
-extern const kernel_cap_t __cap_init_eff_set;
-
-/*
- * Internal kernel functions only
- */
-
-#define CAP_FOR_EACH_U32(__capi) \
- for (__capi = 0; __capi < _KERNEL_CAPABILITY_U32S; ++__capi)
-
/*
* CAP_FS_MASK and CAP_NFSD_MASKS:
*
@@ -67,94 +54,52 @@ extern const kernel_cap_t __cap_init_eff_set;
* 2. The security.* and trusted.* xattrs are fs-related MAC permissions
*/
-# define CAP_FS_MASK_B0 (CAP_TO_MASK(CAP_CHOWN) \
- | CAP_TO_MASK(CAP_MKNOD) \
- | CAP_TO_MASK(CAP_DAC_OVERRIDE) \
- | CAP_TO_MASK(CAP_DAC_READ_SEARCH) \
- | CAP_TO_MASK(CAP_FOWNER) \
- | CAP_TO_MASK(CAP_FSETID))
-
-# define CAP_FS_MASK_B1 (CAP_TO_MASK(CAP_MAC_OVERRIDE))
+# define CAP_FS_MASK (BIT_ULL(CAP_CHOWN) \
+ | BIT_ULL(CAP_MKNOD) \
+ | BIT_ULL(CAP_DAC_OVERRIDE) \
+ | BIT_ULL(CAP_DAC_READ_SEARCH) \
+ | BIT_ULL(CAP_FOWNER) \
+ | BIT_ULL(CAP_FSETID) \
+ | BIT_ULL(CAP_MAC_OVERRIDE))
+#define CAP_VALID_MASK (BIT_ULL(CAP_LAST_CAP+1)-1)
-#if _KERNEL_CAPABILITY_U32S != 2
-# error Fix up hand-coded capability macro initializers
-#else /* HAND-CODED capability initializers */
+# define CAP_EMPTY_SET ((kernel_cap_t) { 0 })
+# define CAP_FULL_SET ((kernel_cap_t) { CAP_VALID_MASK })
+# define CAP_FS_SET ((kernel_cap_t) { CAP_FS_MASK | BIT_ULL(CAP_LINUX_IMMUTABLE) })
+# define CAP_NFSD_SET ((kernel_cap_t) { CAP_FS_MASK | BIT_ULL(CAP_SYS_RESOURCE) })
-#define CAP_LAST_U32 ((_KERNEL_CAPABILITY_U32S) - 1)
-#define CAP_LAST_U32_VALID_MASK (CAP_TO_MASK(CAP_LAST_CAP + 1) -1)
+# define cap_clear(c) do { (c).val = 0; } while (0)
-# define CAP_EMPTY_SET ((kernel_cap_t){{ 0, 0 }})
-# define CAP_FULL_SET ((kernel_cap_t){{ ~0, CAP_LAST_U32_VALID_MASK }})
-# define CAP_FS_SET ((kernel_cap_t){{ CAP_FS_MASK_B0 \
- | CAP_TO_MASK(CAP_LINUX_IMMUTABLE), \
- CAP_FS_MASK_B1 } })
-# define CAP_NFSD_SET ((kernel_cap_t){{ CAP_FS_MASK_B0 \
- | CAP_TO_MASK(CAP_SYS_RESOURCE), \
- CAP_FS_MASK_B1 } })
-
-#endif /* _KERNEL_CAPABILITY_U32S != 2 */
-
-# define cap_clear(c) do { (c) = __cap_empty_set; } while (0)
-
-#define cap_raise(c, flag) ((c).cap[CAP_TO_INDEX(flag)] |= CAP_TO_MASK(flag))
-#define cap_lower(c, flag) ((c).cap[CAP_TO_INDEX(flag)] &= ~CAP_TO_MASK(flag))
-#define cap_raised(c, flag) ((c).cap[CAP_TO_INDEX(flag)] & CAP_TO_MASK(flag))
-
-#define CAP_BOP_ALL(c, a, b, OP) \
-do { \
- unsigned __capi; \
- CAP_FOR_EACH_U32(__capi) { \
- c.cap[__capi] = a.cap[__capi] OP b.cap[__capi]; \
- } \
-} while (0)
-
-#define CAP_UOP_ALL(c, a, OP) \
-do { \
- unsigned __capi; \
- CAP_FOR_EACH_U32(__capi) { \
- c.cap[__capi] = OP a.cap[__capi]; \
- } \
-} while (0)
+#define cap_raise(c, flag) ((c).val |= BIT_ULL(flag))
+#define cap_lower(c, flag) ((c).val &= ~BIT_ULL(flag))
+#define cap_raised(c, flag) (((c).val & BIT_ULL(flag)) != 0)
static inline kernel_cap_t cap_combine(const kernel_cap_t a,
const kernel_cap_t b)
{
- kernel_cap_t dest;
- CAP_BOP_ALL(dest, a, b, |);
- return dest;
+ return (kernel_cap_t) { a.val | b.val };
}
static inline kernel_cap_t cap_intersect(const kernel_cap_t a,
const kernel_cap_t b)
{
- kernel_cap_t dest;
- CAP_BOP_ALL(dest, a, b, &);
- return dest;
+ return (kernel_cap_t) { a.val & b.val };
}
static inline kernel_cap_t cap_drop(const kernel_cap_t a,
const kernel_cap_t drop)
{
- kernel_cap_t dest;
- CAP_BOP_ALL(dest, a, drop, &~);
- return dest;
+ return (kernel_cap_t) { a.val &~ drop.val };
}
-static inline kernel_cap_t cap_invert(const kernel_cap_t c)
+static inline bool cap_isclear(const kernel_cap_t a)
{
- kernel_cap_t dest;
- CAP_UOP_ALL(dest, c, ~);
- return dest;
+ return !a.val;
}
-static inline bool cap_isclear(const kernel_cap_t a)
+static inline bool cap_isidentical(const kernel_cap_t a, const kernel_cap_t b)
{
- unsigned __capi;
- CAP_FOR_EACH_U32(__capi) {
- if (a.cap[__capi] != 0)
- return false;
- }
- return true;
+ return a.val == b.val;
}
/*
@@ -166,39 +111,31 @@ static inline bool cap_isclear(const kernel_cap_t a)
*/
static inline bool cap_issubset(const kernel_cap_t a, const kernel_cap_t set)
{
- kernel_cap_t dest;
- dest = cap_drop(a, set);
- return cap_isclear(dest);
+ return !(a.val & ~set.val);
}
/* Used to decide between falling back on the old suser() or fsuser(). */
static inline kernel_cap_t cap_drop_fs_set(const kernel_cap_t a)
{
- const kernel_cap_t __cap_fs_set = CAP_FS_SET;
- return cap_drop(a, __cap_fs_set);
+ return cap_drop(a, CAP_FS_SET);
}
static inline kernel_cap_t cap_raise_fs_set(const kernel_cap_t a,
const kernel_cap_t permitted)
{
- const kernel_cap_t __cap_fs_set = CAP_FS_SET;
- return cap_combine(a,
- cap_intersect(permitted, __cap_fs_set));
+ return cap_combine(a, cap_intersect(permitted, CAP_FS_SET));
}
static inline kernel_cap_t cap_drop_nfsd_set(const kernel_cap_t a)
{
- const kernel_cap_t __cap_fs_set = CAP_NFSD_SET;
- return cap_drop(a, __cap_fs_set);
+ return cap_drop(a, CAP_NFSD_SET);
}
static inline kernel_cap_t cap_raise_nfsd_set(const kernel_cap_t a,
const kernel_cap_t permitted)
{
- const kernel_cap_t __cap_nfsd_set = CAP_NFSD_SET;
- return cap_combine(a,
- cap_intersect(permitted, __cap_nfsd_set));
+ return cap_combine(a, cap_intersect(permitted, CAP_NFSD_SET));
}
#ifdef CONFIG_MULTIUSER
diff --git a/include/linux/console.h b/include/linux/console.h
index 9cea254b34b8..d3195664baa5 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -15,6 +15,7 @@
#define _LINUX_CONSOLE_H_ 1
#include <linux/atomic.h>
+#include <linux/bits.h>
#include <linux/rculist.h>
#include <linux/types.h>
@@ -59,8 +60,9 @@ struct consw {
int (*con_switch)(struct vc_data *vc);
int (*con_blank)(struct vc_data *vc, int blank, int mode_switch);
int (*con_font_set)(struct vc_data *vc, struct console_font *font,
- unsigned int flags);
- int (*con_font_get)(struct vc_data *vc, struct console_font *font);
+ unsigned int vpitch, unsigned int flags);
+ int (*con_font_get)(struct vc_data *vc, struct console_font *font,
+ unsigned int vpitch);
int (*con_font_default)(struct vc_data *vc,
struct console_font *font, char *name);
int (*con_resize)(struct vc_data *vc, unsigned int width,
@@ -125,37 +127,82 @@ static inline int con_debug_leave(void)
/*
* The interface for a console, or any other device that wants to capture
* console messages (printer driver?)
- *
- * If a console driver is marked CON_BOOT then it will be auto-unregistered
- * when the first real console is registered. This is for early-printk drivers.
*/
-#define CON_PRINTBUFFER (1)
-#define CON_CONSDEV (2) /* Preferred console, /dev/console */
-#define CON_ENABLED (4)
-#define CON_BOOT (8)
-#define CON_ANYTIME (16) /* Safe to call when cpu is offline */
-#define CON_BRL (32) /* Used for a braille device */
-#define CON_EXTENDED (64) /* Use the extended output format a la /dev/kmsg */
+/**
+ * cons_flags - General console flags
+ * @CON_PRINTBUFFER: Used by newly registered consoles to avoid duplicate
+ * output of messages that were already shown by boot
+ * consoles or read by userspace via syslog() syscall.
+ * @CON_CONSDEV: Indicates that the console driver is backing
+ * /dev/console.
+ * @CON_ENABLED: Indicates if a console is allowed to print records. If
+ * false, the console also will not advance to later
+ * records.
+ * @CON_BOOT: Marks the console driver as early console driver which
+ * is used during boot before the real driver becomes
+ * available. It will be automatically unregistered
+ * when the real console driver is registered unless
+ * "keep_bootcon" parameter is used.
+ * @CON_ANYTIME: A misnomed historical flag which tells the core code
+ * that the legacy @console::write callback can be invoked
+ * on a CPU which is marked OFFLINE. That is misleading as
+ * it suggests that there is no contextual limit for
+ * invoking the callback. The original motivation was
+ * readiness of the per-CPU areas.
+ * @CON_BRL: Indicates a braille device which is exempt from
+ * receiving the printk spam for obvious reasons.
+ * @CON_EXTENDED: The console supports the extended output format of
+ * /dev/kmesg which requires a larger output buffer.
+ */
+enum cons_flags {
+ CON_PRINTBUFFER = BIT(0),
+ CON_CONSDEV = BIT(1),
+ CON_ENABLED = BIT(2),
+ CON_BOOT = BIT(3),
+ CON_ANYTIME = BIT(4),
+ CON_BRL = BIT(5),
+ CON_EXTENDED = BIT(6),
+};
+/**
+ * struct console - The console descriptor structure
+ * @name: The name of the console driver
+ * @write: Write callback to output messages (Optional)
+ * @read: Read callback for console input (Optional)
+ * @device: The underlying TTY device driver (Optional)
+ * @unblank: Callback to unblank the console (Optional)
+ * @setup: Callback for initializing the console (Optional)
+ * @exit: Callback for teardown of the console (Optional)
+ * @match: Callback for matching a console (Optional)
+ * @flags: Console flags. See enum cons_flags
+ * @index: Console index, e.g. port number
+ * @cflag: TTY control mode flags
+ * @ispeed: TTY input speed
+ * @ospeed: TTY output speed
+ * @seq: Sequence number of the next ringbuffer record to print
+ * @dropped: Number of unreported dropped ringbuffer records
+ * @data: Driver private data
+ * @node: hlist node for the console list
+ */
struct console {
- char name[16];
- void (*write)(struct console *, const char *, unsigned);
- int (*read)(struct console *, char *, unsigned);
- struct tty_driver *(*device)(struct console *, int *);
- void (*unblank)(void);
- int (*setup)(struct console *, char *);
- int (*exit)(struct console *);
- int (*match)(struct console *, char *name, int idx, char *options);
- short flags;
- short index;
- int cflag;
- uint ispeed;
- uint ospeed;
- u64 seq;
- unsigned long dropped;
- void *data;
- struct hlist_node node;
+ char name[16];
+ void (*write)(struct console *co, const char *s, unsigned int count);
+ int (*read)(struct console *co, char *s, unsigned int count);
+ struct tty_driver *(*device)(struct console *co, int *index);
+ void (*unblank)(void);
+ int (*setup)(struct console *co, char *options);
+ int (*exit)(struct console *co);
+ int (*match)(struct console *co, char *name, int idx, char *options);
+ short flags;
+ short index;
+ int cflag;
+ uint ispeed;
+ uint ospeed;
+ u64 seq;
+ unsigned long dropped;
+ void *data;
+ struct hlist_node node;
};
#ifdef CONFIG_LOCKDEP
diff --git a/include/linux/console_struct.h b/include/linux/console_struct.h
index 1518568aaf0f..539f1cd45309 100644
--- a/include/linux/console_struct.h
+++ b/include/linux/console_struct.h
@@ -18,7 +18,6 @@
#include <linux/workqueue.h>
struct uni_pagedict;
-struct uni_screen;
#define NPAR 16
#define VC_TABSTOPS_COUNT 256U
@@ -159,7 +158,7 @@ struct vc_data {
struct vc_data **vc_display_fg; /* [!] Ptr to var holding fg console for this display */
struct uni_pagedict *uni_pagedict;
struct uni_pagedict **uni_pagedict_loc; /* [!] Location of uni_pagedict variable for this console */
- struct uni_screen *vc_uni_screen; /* unicode screen content */
+ u32 **vc_uni_lines; /* unicode screen content */
/* additional information is in vt_kern.h */
};
diff --git a/include/linux/container_of.h b/include/linux/container_of.h
index 1d898f9158b4..713890c867be 100644
--- a/include/linux/container_of.h
+++ b/include/linux/container_of.h
@@ -3,7 +3,7 @@
#define _LINUX_CONTAINER_OF_H
#include <linux/build_bug.h>
-#include <linux/err.h>
+#include <linux/stddef.h>
#define typeof_member(T, m) typeof(((T*)0)->m)
diff --git a/include/linux/coresight-pmu.h b/include/linux/coresight-pmu.h
index 6c2fd6cc5a98..51ac441a37c3 100644
--- a/include/linux/coresight-pmu.h
+++ b/include/linux/coresight-pmu.h
@@ -7,8 +7,19 @@
#ifndef _LINUX_CORESIGHT_PMU_H
#define _LINUX_CORESIGHT_PMU_H
+#include <linux/bits.h>
+
#define CORESIGHT_ETM_PMU_NAME "cs_etm"
-#define CORESIGHT_ETM_PMU_SEED 0x10
+
+/*
+ * The legacy Trace ID system based on fixed calculation from the cpu
+ * number. This has been replaced by drivers using a dynamic allocation
+ * system - but need to retain the legacy algorithm for backward comparibility
+ * in certain situations:-
+ * a) new perf running on older systems that generate the legacy mapping
+ * b) older tools that may not update at the same time as the kernel.
+ */
+#define CORESIGHT_LEGACY_CPU_TRACE_ID(cpu) (0x10 + (cpu * 2))
/*
* Below are the definition of bit offsets for perf option, and works as
@@ -34,15 +45,16 @@
#define ETM4_CFG_BIT_RETSTK 12
#define ETM4_CFG_BIT_VMID_OPT 15
-static inline int coresight_get_trace_id(int cpu)
-{
- /*
- * A trace ID of value 0 is invalid, so let's start at some
- * random value that fits in 7 bits and go from there. Since
- * the common convention is to have data trace IDs be I(N) + 1,
- * set instruction trace IDs as a function of the CPU number.
- */
- return (CORESIGHT_ETM_PMU_SEED + (cpu * 2));
-}
+/*
+ * Interpretation of the PERF_RECORD_AUX_OUTPUT_HW_ID payload.
+ * Used to associate a CPU with the CoreSight Trace ID.
+ * [07:00] - Trace ID - uses 8 bits to make value easy to read in file.
+ * [59:08] - Unused (SBZ)
+ * [63:60] - Version
+ */
+#define CS_AUX_HW_ID_TRACE_ID_MASK GENMASK_ULL(7, 0)
+#define CS_AUX_HW_ID_VERSION_MASK GENMASK_ULL(63, 60)
+
+#define CS_AUX_HW_ID_CURR_VERSION 0
#endif
diff --git a/include/linux/coresight.h b/include/linux/coresight.h
index 1554021231f9..f19a47b9bb5a 100644
--- a/include/linux/coresight.h
+++ b/include/linux/coresight.h
@@ -61,6 +61,7 @@ enum coresight_dev_subtype_source {
CORESIGHT_DEV_SUBTYPE_SOURCE_PROC,
CORESIGHT_DEV_SUBTYPE_SOURCE_BUS,
CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE,
+ CORESIGHT_DEV_SUBTYPE_SOURCE_OTHERS,
};
enum coresight_dev_subtype_helper {
@@ -314,14 +315,11 @@ struct coresight_ops_link {
* Operations available for sources.
* @cpu_id: returns the value of the CPU number this component
* is associated to.
- * @trace_id: returns the value of the component's trace ID as known
- * to the HW.
* @enable: enables tracing for a source.
* @disable: disables tracing for a source.
*/
struct coresight_ops_source {
int (*cpu_id)(struct coresight_device *csdev);
- int (*trace_id)(struct coresight_device *csdev);
int (*enable)(struct coresight_device *csdev,
struct perf_event *event, u32 mode);
void (*disable)(struct coresight_device *csdev,
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 6c6859bfc454..c6fab004104a 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -187,10 +187,6 @@ enum cpuhp_state {
CPUHP_AP_CSKY_TIMER_STARTING,
CPUHP_AP_TI_GP_TIMER_STARTING,
CPUHP_AP_HYPERV_TIMER_STARTING,
- CPUHP_AP_KVM_STARTING,
- CPUHP_AP_KVM_ARM_VGIC_INIT_STARTING,
- CPUHP_AP_KVM_ARM_VGIC_STARTING,
- CPUHP_AP_KVM_ARM_TIMER_STARTING,
/* Must be the last timer callback */
CPUHP_AP_DUMMY_TIMER_STARTING,
CPUHP_AP_ARM_XEN_STARTING,
@@ -205,6 +201,7 @@ enum cpuhp_state {
/* Online section invoked on the hotplugged CPU from the hotplug thread */
CPUHP_AP_ONLINE_IDLE,
+ CPUHP_AP_KVM_ONLINE,
CPUHP_AP_SCHED_WAIT_EMPTY,
CPUHP_AP_SMPBOOT_THREADS,
CPUHP_AP_X86_VDSO_VMA_ONLINE,
@@ -221,6 +218,7 @@ enum cpuhp_state {
CPUHP_AP_PERF_X86_CQM_ONLINE,
CPUHP_AP_PERF_X86_CSTATE_ONLINE,
CPUHP_AP_PERF_X86_IDXD_ONLINE,
+ CPUHP_AP_PERF_X86_IOMMU_PERF_ONLINE,
CPUHP_AP_PERF_S390_CF_ONLINE,
CPUHP_AP_PERF_S390_SF_ONLINE,
CPUHP_AP_PERF_ARM_CCI_ONLINE,
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index d45e5de13721..10c92bd9b807 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -391,6 +391,26 @@ unsigned int cpumask_nth_andnot(unsigned int cpu, const struct cpumask *srcp1,
nr_cpumask_bits, cpumask_check(cpu));
}
+/**
+ * cpumask_nth_and_andnot - get the Nth cpu set in 1st and 2nd cpumask, and clear in 3rd.
+ * @srcp1: the cpumask pointer
+ * @srcp2: the cpumask pointer
+ * @srcp3: the cpumask pointer
+ * @cpu: the N'th cpu to find, starting from 0
+ *
+ * Returns >= nr_cpu_ids if such cpu doesn't exist.
+ */
+static __always_inline
+unsigned int cpumask_nth_and_andnot(unsigned int cpu, const struct cpumask *srcp1,
+ const struct cpumask *srcp2,
+ const struct cpumask *srcp3)
+{
+ return find_nth_and_andnot_bit(cpumask_bits(srcp1),
+ cpumask_bits(srcp2),
+ cpumask_bits(srcp3),
+ nr_cpumask_bits, cpumask_check(cpu));
+}
+
#define CPU_BITS_NONE \
{ \
[0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL \
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index 5d1e961f810e..bb1d9b0e1647 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -176,7 +176,7 @@ struct crypto_async_request;
struct crypto_tfm;
struct crypto_type;
-typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err);
+typedef void (*crypto_completion_t)(void *req, int err);
/**
* DOC: Block Cipher Context Data Structures
@@ -595,7 +595,7 @@ struct crypto_wait {
/*
* Async ops completion helper functioons
*/
-void crypto_req_done(struct crypto_async_request *req, int err);
+void crypto_req_done(void *req, int err);
static inline int crypto_wait_req(int err, struct crypto_wait *wait)
{
diff --git a/include/linux/cxl_err.h b/include/linux/cxl_err.h
deleted file mode 100644
index 629e1bdeda44..000000000000
--- a/include/linux/cxl_err.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2022 Advanced Micro Devices, Inc.
- *
- * Author: Smita Koralahalli <Smita.KoralahalliChannabasappa@amd.com>
- */
-
-#ifndef LINUX_CXL_ERR_H
-#define LINUX_CXL_ERR_H
-
-/* CXL RAS Capability Structure, CXL v3.1 sec 8.2.4.16 */
-struct cxl_ras_capability_regs {
- u32 uncor_status;
- u32 uncor_mask;
- u32 uncor_severity;
- u32 cor_status;
- u32 cor_mask;
- u32 cap_control;
- u32 header_log[16];
-};
-
-#endif //__CXL_ERR_
diff --git a/include/linux/damon.h b/include/linux/damon.h
index ad15a5b88e3a..d5d4d19928e0 100644
--- a/include/linux/damon.h
+++ b/include/linux/damon.h
@@ -8,6 +8,7 @@
#ifndef _DAMON_H_
#define _DAMON_H_
+#include <linux/memcontrol.h>
#include <linux/mutex.h>
#include <linux/time64.h>
#include <linux/types.h>
@@ -90,6 +91,12 @@ struct damon_target {
* @DAMOS_LRU_DEPRIO: Deprioritize the region on its LRU lists.
* @DAMOS_STAT: Do nothing but count the stat.
* @NR_DAMOS_ACTIONS: Total number of DAMOS actions
+ *
+ * The support of each action is up to running &struct damon_operations.
+ * &enum DAMON_OPS_VADDR and &enum DAMON_OPS_FVADDR supports all actions except
+ * &enum DAMOS_LRU_PRIO and &enum DAMOS_LRU_DEPRIO. &enum DAMON_OPS_PADDR
+ * supports only &enum DAMOS_PAGEOUT, &enum DAMOS_LRU_PRIO, &enum
+ * DAMOS_LRU_DEPRIO, and &DAMOS_STAT.
*/
enum damos_action {
DAMOS_WILLNEED,
@@ -216,6 +223,44 @@ struct damos_stat {
};
/**
+ * enum damos_filter_type - Type of memory for &struct damos_filter
+ * @DAMOS_FILTER_TYPE_ANON: Anonymous pages.
+ * @DAMOS_FILTER_TYPE_MEMCG: Specific memcg's pages.
+ * @NR_DAMOS_FILTER_TYPES: Number of filter types.
+ *
+ * The support of each filter type is up to running &struct damon_operations.
+ * &enum DAMON_OPS_PADDR is supporting all filter types, while
+ * &enum DAMON_OPS_VADDR and &enum DAMON_OPS_FVADDR are not supporting any
+ * filter types.
+ */
+enum damos_filter_type {
+ DAMOS_FILTER_TYPE_ANON,
+ DAMOS_FILTER_TYPE_MEMCG,
+ NR_DAMOS_FILTER_TYPES,
+};
+
+/**
+ * struct damos_filter - DAMOS action target memory filter.
+ * @type: Type of the page.
+ * @matching: If the matching page should filtered out or in.
+ * @memcg_id: Memcg id of the question if @type is DAMOS_FILTER_MEMCG.
+ * @list: List head for siblings.
+ *
+ * Before applying the &damos->action to a memory region, DAMOS checks if each
+ * page of the region matches to this and avoid applying the action if so.
+ * Note that the check support is up to &struct damon_operations
+ * implementation.
+ */
+struct damos_filter {
+ enum damos_filter_type type;
+ bool matching;
+ union {
+ unsigned short memcg_id;
+ };
+ struct list_head list;
+};
+
+/**
* struct damos_access_pattern - Target access pattern of the given scheme.
* @min_sz_region: Minimum size of target regions.
* @max_sz_region: Maximum size of target regions.
@@ -239,6 +284,7 @@ struct damos_access_pattern {
* @action: &damo_action to be applied to the target regions.
* @quota: Control the aggressiveness of this scheme.
* @wmarks: Watermarks for automated (in)activation of this scheme.
+ * @filters: Additional set of &struct damos_filter for &action.
* @stat: Statistics of this scheme.
* @list: List head for siblings.
*
@@ -254,6 +300,10 @@ struct damos_access_pattern {
* If all schemes that registered to a &struct damon_ctx are inactive, DAMON
* stops monitoring and just repeatedly checks the watermarks.
*
+ * Before applying the &action to a memory region, &struct damon_operations
+ * implementation could check pages of the region and skip &action to respect
+ * &filters
+ *
* After applying the &action to each region, &stat_count and &stat_sz is
* updated to reflect the number of regions and total size of regions that the
* &action is applied.
@@ -263,6 +313,7 @@ struct damos {
enum damos_action action;
struct damos_quota quota;
struct damos_watermarks wmarks;
+ struct list_head filters;
struct damos_stat stat;
struct list_head list;
};
@@ -303,10 +354,10 @@ struct damon_ctx;
* users should register the low level operations for their target address
* space and usecase via the &damon_ctx.ops. Then, the monitoring thread
* (&damon_ctx.kdamond) calls @init and @prepare_access_checks before starting
- * the monitoring, @update after each &damon_ctx.ops_update_interval, and
+ * the monitoring, @update after each &damon_attrs.ops_update_interval, and
* @check_accesses, @target_valid and @prepare_access_checks after each
- * &damon_ctx.sample_interval. Finally, @reset_aggregated is called after each
- * &damon_ctx.aggr_interval.
+ * &damon_attrs.sample_interval. Finally, @reset_aggregated is called after
+ * each &damon_attrs.aggr_interval.
*
* Each &struct damon_operations instance having valid @id can be registered
* via damon_register_ops() and selected by damon_select_ops() later.
@@ -516,6 +567,12 @@ static inline unsigned long damon_sz_region(struct damon_region *r)
#define damon_for_each_scheme_safe(s, next, ctx) \
list_for_each_entry_safe(s, next, &(ctx)->schemes, list)
+#define damos_for_each_filter(f, scheme) \
+ list_for_each_entry(f, &(scheme)->filters, list)
+
+#define damos_for_each_filter_safe(f, next, scheme) \
+ list_for_each_entry_safe(f, next, &(scheme)->filters, list)
+
#ifdef CONFIG_DAMON
struct damon_region *damon_new_region(unsigned long start, unsigned long end);
@@ -536,6 +593,11 @@ void damon_destroy_region(struct damon_region *r, struct damon_target *t);
int damon_set_regions(struct damon_target *t, struct damon_addr_range *ranges,
unsigned int nr_ranges);
+struct damos_filter *damos_new_filter(enum damos_filter_type type,
+ bool matching);
+void damos_add_filter(struct damos *s, struct damos_filter *f);
+void damos_destroy_filter(struct damos_filter *f);
+
struct damos *damon_new_scheme(struct damos_access_pattern *pattern,
enum damos_action action, struct damos_quota *quota,
struct damos_watermarks *wmarks);
diff --git a/include/linux/dax.h b/include/linux/dax.h
index 2b5ecb591059..bf6258472e49 100644
--- a/include/linux/dax.h
+++ b/include/linux/dax.h
@@ -262,11 +262,14 @@ static inline bool dax_mapping(struct address_space *mapping)
}
#ifdef CONFIG_DEV_DAX_HMEM_DEVICES
-void hmem_register_device(int target_nid, struct resource *r);
+void hmem_register_resource(int target_nid, struct resource *r);
#else
-static inline void hmem_register_device(int target_nid, struct resource *r)
+static inline void hmem_register_resource(int target_nid, struct resource *r)
{
}
#endif
+typedef int (*walk_hmem_fn)(struct device *dev, int target_nid,
+ const struct resource *res);
+int walk_hmem_resources(struct device *dev, walk_hmem_fn fn);
#endif
diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
index 4dc7cda4fd46..7fd704bb8f3d 100644
--- a/include/linux/devfreq.h
+++ b/include/linux/devfreq.h
@@ -273,8 +273,8 @@ void devm_devfreq_unregister_notifier(struct device *dev,
struct devfreq *devfreq_get_devfreq_by_node(struct device_node *node);
struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev,
const char *phandle_name, int index);
+#endif /* CONFIG_PM_DEVFREQ */
-#if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND)
/**
* struct devfreq_simple_ondemand_data - ``void *data`` fed to struct devfreq
* and devfreq_add_device
@@ -292,9 +292,7 @@ struct devfreq_simple_ondemand_data {
unsigned int upthreshold;
unsigned int downdifferential;
};
-#endif
-#if IS_ENABLED(CONFIG_DEVFREQ_GOV_PASSIVE)
enum devfreq_parent_dev_type {
DEVFREQ_PARENT_DEV,
CPUFREQ_PARENT_DEV,
@@ -337,9 +335,8 @@ struct devfreq_passive_data {
struct notifier_block nb;
struct list_head cpu_data_list;
};
-#endif
-#else /* !CONFIG_PM_DEVFREQ */
+#if !defined(CONFIG_PM_DEVFREQ)
static inline struct devfreq *devfreq_add_device(struct device *dev,
struct devfreq_dev_profile *profile,
const char *governor_name,
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 04c6acf7faaa..7975483816e4 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2001 Sistina Software (UK) Limited.
* Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
@@ -87,10 +88,10 @@ typedef int (*dm_preresume_fn) (struct dm_target *ti);
typedef void (*dm_resume_fn) (struct dm_target *ti);
typedef void (*dm_status_fn) (struct dm_target *ti, status_type_t status_type,
- unsigned status_flags, char *result, unsigned maxlen);
+ unsigned int status_flags, char *result, unsigned int maxlen);
-typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv,
- char *result, unsigned maxlen);
+typedef int (*dm_message_fn) (struct dm_target *ti, unsigned int argc, char **argv,
+ char *result, unsigned int maxlen);
typedef int (*dm_prepare_ioctl_fn) (struct dm_target *ti, struct block_device **bdev);
@@ -187,7 +188,7 @@ struct target_type {
uint64_t features;
const char *name;
struct module *module;
- unsigned version[3];
+ unsigned int version[3];
dm_ctr_fn ctr;
dm_dtr_fn dtr;
dm_map_fn map;
@@ -313,31 +314,31 @@ struct dm_target {
* It is a responsibility of the target driver to remap these bios
* to the real underlying devices.
*/
- unsigned num_flush_bios;
+ unsigned int num_flush_bios;
/*
* The number of discard bios that will be submitted to the target.
* The bio number can be accessed with dm_bio_get_target_bio_nr.
*/
- unsigned num_discard_bios;
+ unsigned int num_discard_bios;
/*
* The number of secure erase bios that will be submitted to the target.
* The bio number can be accessed with dm_bio_get_target_bio_nr.
*/
- unsigned num_secure_erase_bios;
+ unsigned int num_secure_erase_bios;
/*
* The number of WRITE ZEROES bios that will be submitted to the target.
* The bio number can be accessed with dm_bio_get_target_bio_nr.
*/
- unsigned num_write_zeroes_bios;
+ unsigned int num_write_zeroes_bios;
/*
* The minimum number of extra bytes allocated in each io for the
* target to use.
*/
- unsigned per_io_data_size;
+ unsigned int per_io_data_size;
/* target specific data */
void *private;
@@ -383,7 +384,7 @@ struct dm_target {
void *dm_per_bio_data(struct bio *bio, size_t data_size);
struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size);
-unsigned dm_bio_get_target_bio_nr(const struct bio *bio);
+unsigned int dm_bio_get_target_bio_nr(const struct bio *bio);
u64 dm_start_time_ns_from_clone(struct bio *bio);
@@ -394,7 +395,7 @@ void dm_unregister_target(struct target_type *t);
* Target argument parsing.
*/
struct dm_arg_set {
- unsigned argc;
+ unsigned int argc;
char **argv;
};
@@ -403,8 +404,8 @@ struct dm_arg_set {
* the error message to use if the number is found to be outside that range.
*/
struct dm_arg {
- unsigned min;
- unsigned max;
+ unsigned int min;
+ unsigned int max;
char *error;
};
@@ -413,7 +414,7 @@ struct dm_arg {
* returning -EINVAL and setting *error.
*/
int dm_read_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set,
- unsigned *value, char **error);
+ unsigned int *value, char **error);
/*
* Process the next argument as the start of a group containing between
@@ -421,7 +422,7 @@ int dm_read_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set,
* *num_args or, if invalid, return -EINVAL and set *error.
*/
int dm_read_arg_group(const struct dm_arg *arg, struct dm_arg_set *arg_set,
- unsigned *num_args, char **error);
+ unsigned int *num_args, char **error);
/*
* Return the current argument and shift to the next.
@@ -431,12 +432,14 @@ const char *dm_shift_arg(struct dm_arg_set *as);
/*
* Move through num_args arguments.
*/
-void dm_consume_args(struct dm_arg_set *as, unsigned num_args);
+void dm_consume_args(struct dm_arg_set *as, unsigned int num_args);
-/*-----------------------------------------------------------------
+/*
+ *----------------------------------------------------------------
* Functions for creating and manipulating mapped devices.
* Drop the reference with dm_put when you finish with the object.
- *---------------------------------------------------------------*/
+ *----------------------------------------------------------------
+ */
/*
* DM_ANY_MINOR chooses the next available minor number.
@@ -461,7 +464,7 @@ void *dm_get_mdptr(struct mapped_device *md);
/*
* A device can still be used while suspended, but I/O is deferred.
*/
-int dm_suspend(struct mapped_device *md, unsigned suspend_flags);
+int dm_suspend(struct mapped_device *md, unsigned int suspend_flags);
int dm_resume(struct mapped_device *md);
/*
@@ -481,7 +484,7 @@ struct gendisk *dm_disk(struct mapped_device *md);
int dm_suspended(struct dm_target *ti);
int dm_post_suspending(struct dm_target *ti);
int dm_noflush_suspending(struct dm_target *ti);
-void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors);
+void dm_accept_partial_bio(struct bio *bio, unsigned int n_sectors);
void dm_submit_bio_remap(struct bio *clone, struct bio *tgt_clone);
union map_info *dm_get_rq_mapinfo(struct request *rq);
@@ -517,15 +520,17 @@ struct queue_limits *dm_get_queue_limits(struct mapped_device *md);
int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo);
int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo);
-/*-----------------------------------------------------------------
+/*
+ *---------------------------------------------------------------
* Functions for manipulating device-mapper tables.
- *---------------------------------------------------------------*/
+ *---------------------------------------------------------------
+ */
/*
* First create an empty table.
*/
int dm_table_create(struct dm_table **result, fmode_t mode,
- unsigned num_targets, struct mapped_device *md);
+ unsigned int num_targets, struct mapped_device *md);
/*
* Then call this once for each target.
@@ -593,9 +598,11 @@ struct dm_table *dm_swap_table(struct mapped_device *md,
*/
void dm_destroy_crypto_profile(struct blk_crypto_profile *profile);
-/*-----------------------------------------------------------------
+/*
+ *---------------------------------------------------------------
* Macros.
- *---------------------------------------------------------------*/
+ *---------------------------------------------------------------
+ */
#define DM_NAME "device-mapper"
#define DM_FMT(fmt) DM_NAME ": " DM_MSG_PREFIX ": " fmt "\n"
@@ -612,8 +619,7 @@ void dm_destroy_crypto_profile(struct blk_crypto_profile *profile);
#define DMDEBUG(fmt, ...) pr_debug(DM_FMT(fmt), ##__VA_ARGS__)
#define DMDEBUG_LIMIT(fmt, ...) pr_debug_ratelimited(DM_FMT(fmt), ##__VA_ARGS__)
-#define DMEMIT(x...) sz += ((sz >= maxlen) ? \
- 0 : scnprintf(result + sz, maxlen - sz, x))
+#define DMEMIT(x...) (sz += ((sz >= maxlen) ? 0 : scnprintf(result + sz, maxlen - sz, x)))
#define DMEMIT_TARGET_NAME_VERSION(y) \
DMEMIT("target_name=%s,target_version=%u.%u.%u", \
diff --git a/include/linux/device.h b/include/linux/device.h
index 44e3acae7b36..1508e637bb26 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -88,8 +88,8 @@ int subsys_virtual_register(struct bus_type *subsys,
struct device_type {
const char *name;
const struct attribute_group **groups;
- int (*uevent)(struct device *dev, struct kobj_uevent_env *env);
- char *(*devnode)(struct device *dev, umode_t *mode,
+ int (*uevent)(const struct device *dev, struct kobj_uevent_env *env);
+ char *(*devnode)(const struct device *dev, umode_t *mode,
kuid_t *uid, kgid_t *gid);
void (*release)(struct device *dev);
@@ -328,6 +328,7 @@ enum device_link_state {
#define DL_FLAG_MANAGED BIT(6)
#define DL_FLAG_SYNC_STATE_ONLY BIT(7)
#define DL_FLAG_INFERRED BIT(8)
+#define DL_FLAG_CYCLE BIT(9)
/**
* enum dl_dev_state - Device driver presence tracking information.
@@ -907,8 +908,6 @@ int device_rename(struct device *dev, const char *new_name);
int device_move(struct device *dev, struct device *new_parent,
enum dpm_order dpm_order);
int device_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid);
-const char *device_get_devnode(struct device *dev, umode_t *mode, kuid_t *uid,
- kgid_t *gid, const char **tmp);
int device_is_dependent(struct device *dev, void *target);
static inline bool device_supports_offline(struct device *dev)
diff --git a/include/linux/device/bus.h b/include/linux/device/bus.h
index d8b29ccd07e5..6ce32ef4b8fd 100644
--- a/include/linux/device/bus.h
+++ b/include/linux/device/bus.h
@@ -66,8 +66,6 @@ struct fwnode_handle;
* @iommu_ops: IOMMU specific operations for this bus, used to attach IOMMU
* driver implementations to a bus and allow the driver to do
* bus-specific setup
- * @p: The private data of the driver core, only the driver core can
- * touch this.
* @lock_key: Lock class key for use by the lock validator
* @need_parent_lock: When probing or removing a device on this bus, the
* device core should lock the device's parent.
@@ -90,7 +88,7 @@ struct bus_type {
const struct attribute_group **drv_groups;
int (*match)(struct device *dev, struct device_driver *drv);
- int (*uevent)(struct device *dev, struct kobj_uevent_env *env);
+ int (*uevent)(const struct device *dev, struct kobj_uevent_env *env);
int (*probe)(struct device *dev);
void (*sync_state)(struct device *dev);
void (*remove)(struct device *dev);
@@ -111,15 +109,12 @@ struct bus_type {
const struct iommu_ops *iommu_ops;
- struct subsys_private *p;
- struct lock_class_key lock_key;
-
bool need_parent_lock;
};
extern int __must_check bus_register(struct bus_type *bus);
-extern void bus_unregister(struct bus_type *bus);
+extern void bus_unregister(const struct bus_type *bus);
extern int __must_check bus_rescan_devices(struct bus_type *bus);
@@ -136,9 +131,8 @@ struct bus_attribute {
#define BUS_ATTR_WO(_name) \
struct bus_attribute bus_attr_##_name = __ATTR_WO(_name)
-extern int __must_check bus_create_file(struct bus_type *,
- struct bus_attribute *);
-extern void bus_remove_file(struct bus_type *, struct bus_attribute *);
+int __must_check bus_create_file(const struct bus_type *bus, struct bus_attribute *attr);
+void bus_remove_file(const struct bus_type *bus, struct bus_attribute *attr);
/* Generic device matching functions that all busses can use to match with */
int device_match_name(struct device *dev, const void *name);
@@ -150,20 +144,9 @@ int device_match_acpi_handle(struct device *dev, const void *handle);
int device_match_any(struct device *dev, const void *unused);
/* iterator helpers for buses */
-struct subsys_dev_iter {
- struct klist_iter ki;
- const struct device_type *type;
-};
-void subsys_dev_iter_init(struct subsys_dev_iter *iter,
- struct bus_type *subsys,
- struct device *start,
- const struct device_type *type);
-struct device *subsys_dev_iter_next(struct subsys_dev_iter *iter);
-void subsys_dev_iter_exit(struct subsys_dev_iter *iter);
-
-int bus_for_each_dev(struct bus_type *bus, struct device *start, void *data,
+int bus_for_each_dev(const struct bus_type *bus, struct device *start, void *data,
int (*fn)(struct device *dev, void *data));
-struct device *bus_find_device(struct bus_type *bus, struct device *start,
+struct device *bus_find_device(const struct bus_type *bus, struct device *start,
const void *data,
int (*match)(struct device *dev, const void *data));
/**
@@ -173,7 +156,7 @@ struct device *bus_find_device(struct bus_type *bus, struct device *start,
* @start: Device to begin with
* @name: name of the device to match
*/
-static inline struct device *bus_find_device_by_name(struct bus_type *bus,
+static inline struct device *bus_find_device_by_name(const struct bus_type *bus,
struct device *start,
const char *name)
{
@@ -187,7 +170,7 @@ static inline struct device *bus_find_device_by_name(struct bus_type *bus,
* @np: of_node of the device to match.
*/
static inline struct device *
-bus_find_device_by_of_node(struct bus_type *bus, const struct device_node *np)
+bus_find_device_by_of_node(const struct bus_type *bus, const struct device_node *np)
{
return bus_find_device(bus, NULL, np, device_match_of_node);
}
@@ -199,7 +182,7 @@ bus_find_device_by_of_node(struct bus_type *bus, const struct device_node *np)
* @fwnode: fwnode of the device to match.
*/
static inline struct device *
-bus_find_device_by_fwnode(struct bus_type *bus, const struct fwnode_handle *fwnode)
+bus_find_device_by_fwnode(const struct bus_type *bus, const struct fwnode_handle *fwnode)
{
return bus_find_device(bus, NULL, fwnode, device_match_fwnode);
}
@@ -210,7 +193,7 @@ bus_find_device_by_fwnode(struct bus_type *bus, const struct fwnode_handle *fwno
* @bus: bus type
* @devt: device type of the device to match.
*/
-static inline struct device *bus_find_device_by_devt(struct bus_type *bus,
+static inline struct device *bus_find_device_by_devt(const struct bus_type *bus,
dev_t devt)
{
return bus_find_device(bus, NULL, &devt, device_match_devt);
@@ -223,7 +206,7 @@ static inline struct device *bus_find_device_by_devt(struct bus_type *bus,
* @cur: device to begin the search with.
*/
static inline struct device *
-bus_find_next_device(struct bus_type *bus,struct device *cur)
+bus_find_next_device(const struct bus_type *bus,struct device *cur)
{
return bus_find_device(bus, cur, NULL, device_match_any);
}
@@ -238,21 +221,19 @@ struct acpi_device;
* @adev: ACPI COMPANION device to match.
*/
static inline struct device *
-bus_find_device_by_acpi_dev(struct bus_type *bus, const struct acpi_device *adev)
+bus_find_device_by_acpi_dev(const struct bus_type *bus, const struct acpi_device *adev)
{
return bus_find_device(bus, NULL, adev, device_match_acpi_dev);
}
#else
static inline struct device *
-bus_find_device_by_acpi_dev(struct bus_type *bus, const void *adev)
+bus_find_device_by_acpi_dev(const struct bus_type *bus, const void *adev)
{
return NULL;
}
#endif
-struct device *subsys_find_device_by_id(struct bus_type *bus, unsigned int id,
- struct device *hint);
-int bus_for_each_drv(struct bus_type *bus, struct device_driver *start,
+int bus_for_each_drv(const struct bus_type *bus, struct device_driver *start,
void *data, int (*fn)(struct device_driver *, void *));
void bus_sort_breadthfirst(struct bus_type *bus,
int (*compare)(const struct device *a,
@@ -265,28 +246,42 @@ void bus_sort_breadthfirst(struct bus_type *bus,
*/
struct notifier_block;
-extern int bus_register_notifier(struct bus_type *bus,
+extern int bus_register_notifier(const struct bus_type *bus,
struct notifier_block *nb);
-extern int bus_unregister_notifier(struct bus_type *bus,
+extern int bus_unregister_notifier(const struct bus_type *bus,
struct notifier_block *nb);
-/* All 4 notifers below get called with the target struct device *
- * as an argument. Note that those functions are likely to be called
- * with the device lock held in the core, so be careful.
+/**
+ * enum bus_notifier_event - Bus Notifier events that have happened
+ * @BUS_NOTIFY_ADD_DEVICE: device is added to this bus
+ * @BUS_NOTIFY_DEL_DEVICE: device is about to be removed from this bus
+ * @BUS_NOTIFY_REMOVED_DEVICE: device is successfully removed from this bus
+ * @BUS_NOTIFY_BIND_DRIVER: a driver is about to be bound to this device on this bus
+ * @BUS_NOTIFY_BOUND_DRIVER: a driver is successfully bound to this device on this bus
+ * @BUS_NOTIFY_UNBIND_DRIVER: a driver is about to be unbound from this device on this bus
+ * @BUS_NOTIFY_UNBOUND_DRIVER: a driver is successfully unbound from this device on this bus
+ * @BUS_NOTIFY_DRIVER_NOT_BOUND: a driver failed to be bound to this device on this bus
+ *
+ * These are the value passed to a bus notifier when a specific event happens.
+ *
+ * Note that bus notifiers are likely to be called with the device lock already
+ * held by the driver core, so be careful in any notifier callback as to what
+ * you do with the device structure.
+ *
+ * All bus notifiers are called with the target struct device * as an argument.
*/
-#define BUS_NOTIFY_ADD_DEVICE 0x00000001 /* device added */
-#define BUS_NOTIFY_DEL_DEVICE 0x00000002 /* device to be removed */
-#define BUS_NOTIFY_REMOVED_DEVICE 0x00000003 /* device removed */
-#define BUS_NOTIFY_BIND_DRIVER 0x00000004 /* driver about to be
- bound */
-#define BUS_NOTIFY_BOUND_DRIVER 0x00000005 /* driver bound to device */
-#define BUS_NOTIFY_UNBIND_DRIVER 0x00000006 /* driver about to be
- unbound */
-#define BUS_NOTIFY_UNBOUND_DRIVER 0x00000007 /* driver is unbound
- from the device */
-#define BUS_NOTIFY_DRIVER_NOT_BOUND 0x00000008 /* driver fails to be bound */
+enum bus_notifier_event {
+ BUS_NOTIFY_ADD_DEVICE,
+ BUS_NOTIFY_DEL_DEVICE,
+ BUS_NOTIFY_REMOVED_DEVICE,
+ BUS_NOTIFY_BIND_DRIVER,
+ BUS_NOTIFY_BOUND_DRIVER,
+ BUS_NOTIFY_UNBIND_DRIVER,
+ BUS_NOTIFY_UNBOUND_DRIVER,
+ BUS_NOTIFY_DRIVER_NOT_BOUND,
+};
-extern struct kset *bus_get_kset(struct bus_type *bus);
-extern struct klist *bus_get_device_klist(struct bus_type *bus);
+extern struct kset *bus_get_kset(const struct bus_type *bus);
+struct device *bus_get_dev_root(const struct bus_type *bus);
#endif
diff --git a/include/linux/device/driver.h b/include/linux/device/driver.h
index 2114d65b862f..50d0a416a5e7 100644
--- a/include/linux/device/driver.h
+++ b/include/linux/device/driver.h
@@ -240,7 +240,6 @@ driver_find_device_by_acpi_dev(struct device_driver *drv, const void *adev)
}
#endif
-extern int driver_deferred_probe_timeout;
void driver_deferred_probe_add(struct device *dev);
int driver_deferred_probe_check_state(struct device *dev);
void driver_init(void);
diff --git a/include/linux/dfl.h b/include/linux/dfl.h
index 431636a0dc78..0a7a00a0ee7f 100644
--- a/include/linux/dfl.h
+++ b/include/linux/dfl.h
@@ -27,11 +27,15 @@ enum dfl_id_type {
* @id: id of the dfl device.
* @type: type of DFL FIU of the device. See enum dfl_id_type.
* @feature_id: feature identifier local to its DFL FIU type.
+ * @revision: revision of this dfl device feature.
* @mmio_res: mmio resource of this dfl device.
* @irqs: list of Linux IRQ numbers of this dfl device.
* @num_irqs: number of IRQs supported by this dfl device.
* @cdev: pointer to DFL FPGA container device this dfl device belongs to.
* @id_entry: matched id entry in dfl driver's id table.
+ * @dfh_version: version of DFH for the device
+ * @param_size: size of the block parameters in bytes
+ * @params: pointer to block of parameters copied memory
*/
struct dfl_device {
struct device dev;
@@ -44,6 +48,9 @@ struct dfl_device {
unsigned int num_irqs;
struct dfl_fpga_cdev *cdev;
const struct dfl_device_id *id_entry;
+ u8 dfh_version;
+ unsigned int param_size;
+ void *params;
};
/**
@@ -84,4 +91,5 @@ void dfl_driver_unregister(struct dfl_driver *dfl_drv);
module_driver(__dfl_driver, dfl_driver_register, \
dfl_driver_unregister)
+void *dfh_find_param(struct dfl_device *dfl_dev, int param_id, size_t *pcount);
#endif /* __LINUX_DFL_H */
diff --git a/include/linux/dm-bufio.h b/include/linux/dm-bufio.h
index 15d9e15ca830..2056743aaaaa 100644
--- a/include/linux/dm-bufio.h
+++ b/include/linux/dm-bufio.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2009-2011 Red Hat, Inc.
*
@@ -26,8 +27,8 @@ struct dm_buffer;
* Create a buffered IO cache on a given device
*/
struct dm_bufio_client *
-dm_bufio_client_create(struct block_device *bdev, unsigned block_size,
- unsigned reserved_buffers, unsigned aux_size,
+dm_bufio_client_create(struct block_device *bdev, unsigned int block_size,
+ unsigned int reserved_buffers, unsigned int aux_size,
void (*alloc_callback)(struct dm_buffer *),
void (*write_callback)(struct dm_buffer *),
unsigned int flags);
@@ -81,7 +82,7 @@ void *dm_bufio_new(struct dm_bufio_client *c, sector_t block,
* I/O to finish.
*/
void dm_bufio_prefetch(struct dm_bufio_client *c,
- sector_t block, unsigned n_blocks);
+ sector_t block, unsigned int n_blocks);
/*
* Release a reference obtained with dm_bufio_{read,get,new}. The data
@@ -106,7 +107,7 @@ void dm_bufio_mark_buffer_dirty(struct dm_buffer *b);
* write the specified part of the buffer or it may write a larger superset.
*/
void dm_bufio_mark_partial_buffer_dirty(struct dm_buffer *b,
- unsigned start, unsigned end);
+ unsigned int start, unsigned int end);
/*
* Initiate writing of dirty buffers, without waiting for completion.
@@ -152,9 +153,9 @@ void dm_bufio_forget_buffers(struct dm_bufio_client *c, sector_t block, sector_t
/*
* Set the minimum number of buffers before cleanup happens.
*/
-void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned n);
+void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned int n);
-unsigned dm_bufio_get_block_size(struct dm_bufio_client *c);
+unsigned int dm_bufio_get_block_size(struct dm_bufio_client *c);
sector_t dm_bufio_get_device_size(struct dm_bufio_client *c);
struct dm_io_client *dm_bufio_get_dm_io_client(struct dm_bufio_client *c);
sector_t dm_bufio_get_block_number(struct dm_buffer *b);
diff --git a/include/linux/dm-dirty-log.h b/include/linux/dm-dirty-log.h
index 7084503c3405..0b10faedb26a 100644
--- a/include/linux/dm-dirty-log.h
+++ b/include/linux/dm-dirty-log.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2003 Sistina Software
* Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
@@ -33,7 +34,7 @@ struct dm_dirty_log_type {
struct list_head list;
int (*ctr)(struct dm_dirty_log *log, struct dm_target *ti,
- unsigned argc, char **argv);
+ unsigned int argc, char **argv);
void (*dtr)(struct dm_dirty_log *log);
/*
@@ -96,7 +97,7 @@ struct dm_dirty_log_type {
* Do not confuse this function with 'in_sync()', one
* tells you if an area is synchronised, the other
* assigns recovery work.
- */
+ */
int (*get_resync_work)(struct dm_dirty_log *log, region_t *region);
/*
@@ -116,7 +117,7 @@ struct dm_dirty_log_type {
* Support function for mirror status requests.
*/
int (*status)(struct dm_dirty_log *log, status_type_t status_type,
- char *result, unsigned maxlen);
+ char *result, unsigned int maxlen);
/*
* is_remote_recovering is necessary for cluster mirroring. It provides
@@ -139,7 +140,7 @@ int dm_dirty_log_type_unregister(struct dm_dirty_log_type *type);
struct dm_dirty_log *dm_dirty_log_create(const char *type_name,
struct dm_target *ti,
int (*flush_callback_fn)(struct dm_target *ti),
- unsigned argc, char **argv);
+ unsigned int argc, char **argv);
void dm_dirty_log_destroy(struct dm_dirty_log *log);
#endif /* __KERNEL__ */
diff --git a/include/linux/dm-io.h b/include/linux/dm-io.h
index 8e1c4ab5df04..7595142f3fc5 100644
--- a/include/linux/dm-io.h
+++ b/include/linux/dm-io.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2003 Sistina Software
* Copyright (C) 2004 - 2008 Red Hat, Inc. All rights reserved.
@@ -26,7 +27,7 @@ struct page_list {
struct page *page;
};
-typedef void (*io_notify_fn)(unsigned long error, void *context);
+typedef void (*io_notify_fn)(unsigned int long error, void *context);
enum dm_io_mem_type {
DM_IO_PAGE_LIST,/* Page list */
@@ -38,7 +39,7 @@ enum dm_io_mem_type {
struct dm_io_memory {
enum dm_io_mem_type type;
- unsigned offset;
+ unsigned int offset;
union {
struct page_list *pl;
@@ -78,8 +79,8 @@ void dm_io_client_destroy(struct dm_io_client *client);
* Each bit in the optional 'sync_error_bits' bitset indicates whether an
* error occurred doing io to the corresponding region.
*/
-int dm_io(struct dm_io_request *io_req, unsigned num_regions,
- struct dm_io_region *region, unsigned long *sync_error_bits);
+int dm_io(struct dm_io_request *io_req, unsigned int num_regions,
+ struct dm_io_region *region, unsigned int long *sync_error_bits);
#endif /* __KERNEL__ */
#endif /* _LINUX_DM_IO_H */
diff --git a/include/linux/dm-kcopyd.h b/include/linux/dm-kcopyd.h
index c1707ee5b540..51fb1af0b63e 100644
--- a/include/linux/dm-kcopyd.h
+++ b/include/linux/dm-kcopyd.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2001 - 2003 Sistina Software
* Copyright (C) 2004 - 2008 Red Hat, Inc. All rights reserved.
@@ -23,11 +24,11 @@
#define DM_KCOPYD_WRITE_SEQ 2
struct dm_kcopyd_throttle {
- unsigned throttle;
- unsigned num_io_jobs;
- unsigned io_period;
- unsigned total_period;
- unsigned last_jiffies;
+ unsigned int throttle;
+ unsigned int num_io_jobs;
+ unsigned int io_period;
+ unsigned int total_period;
+ unsigned int last_jiffies;
};
/*
@@ -60,12 +61,12 @@ void dm_kcopyd_client_flush(struct dm_kcopyd_client *kc);
* read_err is a boolean,
* write_err is a bitset, with 1 bit for each destination region
*/
-typedef void (*dm_kcopyd_notify_fn)(int read_err, unsigned long write_err,
+typedef void (*dm_kcopyd_notify_fn)(int read_err, unsigned int long write_err,
void *context);
void dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
- unsigned num_dests, struct dm_io_region *dests,
- unsigned flags, dm_kcopyd_notify_fn fn, void *context);
+ unsigned int num_dests, struct dm_io_region *dests,
+ unsigned int flags, dm_kcopyd_notify_fn fn, void *context);
/*
* Prepare a callback and submit it via the kcopyd thread.
@@ -80,11 +81,11 @@ void dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
*/
void *dm_kcopyd_prepare_callback(struct dm_kcopyd_client *kc,
dm_kcopyd_notify_fn fn, void *context);
-void dm_kcopyd_do_callback(void *job, int read_err, unsigned long write_err);
+void dm_kcopyd_do_callback(void *job, int read_err, unsigned int long write_err);
void dm_kcopyd_zero(struct dm_kcopyd_client *kc,
- unsigned num_dests, struct dm_io_region *dests,
- unsigned flags, dm_kcopyd_notify_fn fn, void *context);
+ unsigned int num_dests, struct dm_io_region *dests,
+ unsigned int flags, dm_kcopyd_notify_fn fn, void *context);
#endif /* __KERNEL__ */
#endif /* _LINUX_DM_KCOPYD_H */
diff --git a/include/linux/dm-region-hash.h b/include/linux/dm-region-hash.h
index 9e2a7a401df5..3079ed93dd2d 100644
--- a/include/linux/dm-region-hash.h
+++ b/include/linux/dm-region-hash.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2003 Sistina Software Limited.
* Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
@@ -12,9 +13,11 @@
#include <linux/dm-dirty-log.h>
-/*-----------------------------------------------------------------
+/*
+ *----------------------------------------------------------------
* Region hash
- *----------------------------------------------------------------*/
+ *----------------------------------------------------------------
+ */
struct dm_region_hash;
struct dm_region;
@@ -37,7 +40,7 @@ struct dm_region_hash *dm_region_hash_create(
struct bio_list *bios),
void (*wakeup_workers)(void *context),
void (*wakeup_all_recovery_waiters)(void *context),
- sector_t target_begin, unsigned max_recovery,
+ sector_t target_begin, unsigned int max_recovery,
struct dm_dirty_log *log, uint32_t region_size,
region_t nr_regions);
void dm_region_hash_destroy(struct dm_region_hash *rh);
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index 6fa8d4e29719..3f31baa3293f 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -356,7 +356,7 @@ struct dma_buf {
*/
const char *name;
- /** @name_lock: Spinlock to protect name acces for read access. */
+ /** @name_lock: Spinlock to protect name access for read access. */
spinlock_t name_lock;
/**
@@ -393,7 +393,7 @@ struct dma_buf {
* anything the userspace API considers write access.
*
* - Drivers may just always add a write fence, since that only
- * causes unecessarily synchronization, but no correctness issues.
+ * causes unnecessary synchronization, but no correctness issues.
*
* - Some drivers only expose a synchronous userspace API with no
* pipelining across drivers. These do not set any fences for their
diff --git a/include/linux/dma-map-ops.h b/include/linux/dma-map-ops.h
index d678afeb8a13..41bf4bdb117a 100644
--- a/include/linux/dma-map-ops.h
+++ b/include/linux/dma-map-ops.h
@@ -90,7 +90,7 @@ static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{
if (dev->dma_ops)
return dev->dma_ops;
- return get_arch_dma_ops(dev->bus);
+ return get_arch_dma_ops();
}
static inline void set_dma_ops(struct device *dev,
diff --git a/include/linux/dma/amd_xdma.h b/include/linux/dma/amd_xdma.h
new file mode 100644
index 000000000000..ceba69ed7cb4
--- /dev/null
+++ b/include/linux/dma/amd_xdma.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2022, Advanced Micro Devices, Inc.
+ */
+
+#ifndef _DMAENGINE_AMD_XDMA_H
+#define _DMAENGINE_AMD_XDMA_H
+
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+
+int xdma_enable_user_irq(struct platform_device *pdev, u32 irq_num);
+void xdma_disable_user_irq(struct platform_device *pdev, u32 irq_num);
+int xdma_get_user_irq(struct platform_device *pdev, u32 user_irq_index);
+
+#endif /* _DMAENGINE_AMD_XDMA_H */
diff --git a/include/linux/dma/edma.h b/include/linux/dma/edma.h
index 7d8062e9c544..d2638d9259dc 100644
--- a/include/linux/dma/edma.h
+++ b/include/linux/dma/edma.h
@@ -18,13 +18,31 @@
struct dw_edma;
struct dw_edma_region {
- phys_addr_t paddr;
- void __iomem *vaddr;
+ u64 paddr;
+ union {
+ void *mem;
+ void __iomem *io;
+ } vaddr;
size_t sz;
};
+/**
+ * struct dw_edma_core_ops - platform-specific eDMA methods
+ * @irq_vector: Get IRQ number of the passed eDMA channel. Note the
+ * method accepts the channel id in the end-to-end
+ * numbering with the eDMA write channels being placed
+ * first in the row.
+ * @pci_address: Get PCIe bus address corresponding to the passed CPU
+ * address. Note there is no need in specifying this
+ * function if the address translation is performed by
+ * the DW PCIe RP/EP controller with the DW eDMA device in
+ * subject and DMA_BYPASS isn't set for all the outbound
+ * iATU windows. That will be done by the controller
+ * automatically.
+ */
struct dw_edma_core_ops {
int (*irq_vector)(struct device *dev, unsigned int nr);
+ u64 (*pci_address)(struct device *dev, phys_addr_t cpu_addr);
};
enum dw_edma_map_format {
@@ -61,7 +79,6 @@ enum dw_edma_chip_flags {
*/
struct dw_edma_chip {
struct device *dev;
- int id;
int nr_irqs;
const struct dw_edma_core_ops *ops;
u32 flags;
@@ -84,7 +101,7 @@ struct dw_edma_chip {
};
/* Export to the platform drivers */
-#if IS_ENABLED(CONFIG_DW_EDMA)
+#if IS_REACHABLE(CONFIG_DW_EDMA)
int dw_edma_probe(struct dw_edma_chip *chip);
int dw_edma_remove(struct dw_edma_chip *chip);
#else
diff --git a/include/linux/dma/imx-dma.h b/include/linux/dma/imx-dma.h
index f487a4fa103a..cfec5f946e23 100644
--- a/include/linux/dma/imx-dma.h
+++ b/include/linux/dma/imx-dma.h
@@ -40,6 +40,7 @@ enum sdma_peripheral_type {
IMX_DMATYPE_ASRC_SP, /* Shared ASRC */
IMX_DMATYPE_SAI, /* SAI */
IMX_DMATYPE_MULTI_SAI, /* MULTI FIFOs For Audio */
+ IMX_DMATYPE_HDMI, /* HDMI Audio */
};
enum imx_dma_prio {
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index c923f4e60f24..c3656e590213 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -394,7 +394,7 @@ enum dma_slave_buswidth {
* should be read (RX), if the source is memory this argument is
* ignored.
* @dst_addr: this is the physical address where DMA slave data
- * should be written (TX), if the source is memory this argument
+ * should be written (TX), if the destination is memory this argument
* is ignored.
* @src_addr_width: this is the width in bytes of the source (RX)
* register where DMA data shall be read. If the source
@@ -773,6 +773,7 @@ struct dma_filter {
/**
* struct dma_device - info on the entity supplying DMA services
+ * @ref: reference is taken and put every time a channel is allocated or freed
* @chancnt: how many DMA channels are supported
* @privatecnt: how many DMA channels are requested by dma_request_channel
* @channels: the list of struct dma_chan
@@ -789,6 +790,7 @@ struct dma_filter {
* @dev_id: unique device ID
* @dev: struct device reference for dma mapping api
* @owner: owner module (automatically set based on the provided dev)
+ * @chan_ida: unique channel ID
* @src_addr_widths: bit mask of src addr widths the device supports
* Width is specified in bytes, e.g. for a device supporting
* a width of 4 the mask should have BIT(4) set.
@@ -802,6 +804,7 @@ struct dma_filter {
* @max_sg_burst: max number of SG list entries executed in a single burst
* DMA tansaction with no software intervention for reinitialization.
* Zero value means unlimited number of entries.
+ * @descriptor_reuse: a submitted transfer can be resubmitted after completion
* @residue_granularity: granularity of the transfer residue reported
* by tx_status
* @device_alloc_chan_resources: allocate resources and return the
@@ -839,7 +842,6 @@ struct dma_filter {
* struct with auxiliary transfer status information, otherwise the call
* will just return a simple status code
* @device_issue_pending: push pending transactions to hardware
- * @descriptor_reuse: a submitted transfer can be resubmitted after completion
* @device_release: called sometime atfer dma_async_device_unregister() is
* called and there are no further references to this structure. This
* must be implemented to free resources however many existing drivers
@@ -847,6 +849,7 @@ struct dma_filter {
* @dbg_summary_show: optional routine to show contents in debugfs; default code
* will be used when this is omitted, but custom code can show extra,
* controller specific information.
+ * @dbg_dev_root: the root folder in debugfs for this device
*/
struct dma_device {
struct kref ref;
@@ -855,7 +858,7 @@ struct dma_device {
struct list_head channels;
struct list_head global_node;
struct dma_filter filter;
- dma_cap_mask_t cap_mask;
+ dma_cap_mask_t cap_mask;
enum dma_desc_metadata_mode desc_metadata_modes;
unsigned short max_xor;
unsigned short max_pq;
@@ -924,10 +927,8 @@ struct dma_device {
struct dma_chan *chan, dma_addr_t dst, u64 data,
unsigned long flags);
- void (*device_caps)(struct dma_chan *chan,
- struct dma_slave_caps *caps);
- int (*device_config)(struct dma_chan *chan,
- struct dma_slave_config *config);
+ void (*device_caps)(struct dma_chan *chan, struct dma_slave_caps *caps);
+ int (*device_config)(struct dma_chan *chan, struct dma_slave_config *config);
int (*device_pause)(struct dma_chan *chan);
int (*device_resume)(struct dma_chan *chan);
int (*device_terminate_all)(struct dma_chan *chan);
diff --git a/include/linux/dmar.h b/include/linux/dmar.h
index d81a51978d01..725d5e6acec0 100644
--- a/include/linux/dmar.h
+++ b/include/linux/dmar.h
@@ -39,6 +39,7 @@ struct dmar_drhd_unit {
struct list_head list; /* list of drhd units */
struct acpi_dmar_header *hdr; /* ACPI header */
u64 reg_base_addr; /* register base address*/
+ unsigned long reg_size; /* size of register set */
struct dmar_dev_scope *devices;/* target device array */
int devices_cnt; /* target device count */
u16 segment; /* PCI domain */
diff --git a/include/linux/dsa/ksz_common.h b/include/linux/dsa/ksz_common.h
new file mode 100644
index 000000000000..576a99ca698d
--- /dev/null
+++ b/include/linux/dsa/ksz_common.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Microchip switch tag common header
+ *
+ * Copyright (C) 2022 Microchip Technology Inc.
+ */
+
+#ifndef _NET_DSA_KSZ_COMMON_H_
+#define _NET_DSA_KSZ_COMMON_H_
+
+#include <net/dsa.h>
+
+/* All time stamps from the KSZ consist of 2 bits for seconds and 30 bits for
+ * nanoseconds. This is NOT the same as 32 bits for nanoseconds.
+ */
+#define KSZ_TSTAMP_SEC_MASK GENMASK(31, 30)
+#define KSZ_TSTAMP_NSEC_MASK GENMASK(29, 0)
+
+static inline ktime_t ksz_decode_tstamp(u32 tstamp)
+{
+ u64 ns = FIELD_GET(KSZ_TSTAMP_SEC_MASK, tstamp) * NSEC_PER_SEC +
+ FIELD_GET(KSZ_TSTAMP_NSEC_MASK, tstamp);
+
+ return ns_to_ktime(ns);
+}
+
+struct ksz_deferred_xmit_work {
+ struct dsa_port *dp;
+ struct sk_buff *skb;
+ struct kthread_work work;
+};
+
+struct ksz_tagger_data {
+ void (*xmit_work_fn)(struct kthread_work *work);
+ void (*hwtstamp_set_state)(struct dsa_switch *ds, bool on);
+};
+
+struct ksz_skb_cb {
+ struct sk_buff *clone;
+ unsigned int ptp_type;
+ bool update_correction;
+ u32 tstamp;
+};
+
+#define KSZ_SKB_CB(skb) \
+ ((struct ksz_skb_cb *)((skb)->cb))
+
+static inline struct ksz_tagger_data *
+ksz_tagger_data(struct dsa_switch *ds)
+{
+ return ds->tagger_data;
+}
+
+#endif /* _NET_DSA_KSZ_COMMON_H_ */
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 98598bd1d2fa..04a733f0ba95 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -391,6 +391,7 @@ void efi_native_runtime_setup(void);
#define EFI_RT_PROPERTIES_TABLE_GUID EFI_GUID(0xeb66918a, 0x7eef, 0x402a, 0x84, 0x2e, 0x93, 0x1d, 0x21, 0xc3, 0x8a, 0xe9)
#define EFI_DXE_SERVICES_TABLE_GUID EFI_GUID(0x05ad34ba, 0x6f02, 0x4214, 0x95, 0x2e, 0x4d, 0xa0, 0x39, 0x8e, 0x2b, 0xb9)
#define EFI_SMBIOS_PROTOCOL_GUID EFI_GUID(0x03583ff6, 0xcb36, 0x4940, 0x94, 0x7e, 0xb9, 0xb3, 0x9f, 0x4a, 0xfa, 0xf7)
+#define EFI_MEMORY_ATTRIBUTE_PROTOCOL_GUID EFI_GUID(0xf4560cf6, 0x40ec, 0x4b4a, 0xa1, 0x92, 0xbf, 0x1d, 0x57, 0xd0, 0xb1, 0x89)
#define EFI_IMAGE_SECURITY_DATABASE_GUID EFI_GUID(0xd719b2cb, 0x3d3a, 0x4596, 0xa3, 0xbc, 0xda, 0xd0, 0x0e, 0x67, 0x65, 0x6f)
#define EFI_SHIM_LOCK_GUID EFI_GUID(0x605dab50, 0xe046, 0x4300, 0xab, 0xb6, 0x3d, 0xd8, 0x10, 0xdd, 0x8b, 0x23)
@@ -583,11 +584,15 @@ typedef struct {
#define EFI_INVALID_TABLE_ADDR (~0UL)
+// BIT0 implies that Runtime code includes the forward control flow guard
+// instruction, such as X86 CET-IBT or ARM BTI.
+#define EFI_MEMORY_ATTRIBUTES_FLAGS_RT_FORWARD_CONTROL_FLOW_GUARD 0x1
+
typedef struct {
u32 version;
u32 num_entries;
u32 desc_size;
- u32 reserved;
+ u32 flags;
efi_memory_desc_t entry[0];
} efi_memory_attributes_table_t;
@@ -721,8 +726,7 @@ static inline void efi_esrt_init(void) { }
extern int efi_config_parse_tables(const efi_config_table_t *config_tables,
int count,
const efi_config_table_type_t *arch_tables);
-extern int efi_systab_check_header(const efi_table_hdr_t *systab_hdr,
- int min_major_version);
+extern int efi_systab_check_header(const efi_table_hdr_t *systab_hdr);
extern void efi_systab_report_header(const efi_table_hdr_t *systab_hdr,
unsigned long fw_vendor);
extern u64 efi_get_iobase (void);
@@ -732,6 +736,7 @@ extern u64 efi_mem_attribute (unsigned long phys_addr, unsigned long size);
extern int __init efi_uart_console_only (void);
extern u64 efi_mem_desc_end(efi_memory_desc_t *md);
extern int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md);
+extern int __efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md);
extern void efi_mem_reserve(phys_addr_t addr, u64 size);
extern int efi_mem_reserve_persistent(phys_addr_t addr, u64 size);
extern void efi_initialize_iomem_resources(struct resource *code_resource,
@@ -751,7 +756,7 @@ extern unsigned long efi_mem_attr_table;
* argument in the page tables referred to by the
* first argument.
*/
-typedef int (*efi_memattr_perm_setter)(struct mm_struct *, efi_memory_desc_t *);
+typedef int (*efi_memattr_perm_setter)(struct mm_struct *, efi_memory_desc_t *, bool);
extern int efi_memattr_init(void);
extern int efi_memattr_apply_permissions(struct mm_struct *mm,
@@ -1040,7 +1045,6 @@ struct efivar_operations {
struct efivars {
struct kset *kset;
- struct kobject *kobject;
const struct efivar_operations *ops;
};
@@ -1054,12 +1058,16 @@ struct efivars {
#define EFI_VAR_NAME_LEN 1024
int efivars_register(struct efivars *efivars,
- const struct efivar_operations *ops,
- struct kobject *kobject);
+ const struct efivar_operations *ops);
int efivars_unregister(struct efivars *efivars);
-struct kobject *efivars_kobject(void);
-int efivar_supports_writes(void);
+#ifdef CONFIG_EFI
+bool efivar_is_available(void);
+#else
+static inline bool efivar_is_available(void) { return false; }
+#endif
+
+bool efivar_supports_writes(void);
int efivar_lock(void);
int efivar_trylock(void);
@@ -1319,4 +1327,14 @@ struct linux_efi_initrd {
/* Header of a populated EFI secret area */
#define EFI_SECRET_TABLE_HEADER_GUID EFI_GUID(0x1e74f542, 0x71dd, 0x4d66, 0x96, 0x3e, 0xef, 0x42, 0x87, 0xff, 0x17, 0x3b)
+bool xen_efi_config_table_is_usable(const efi_guid_t *guid, unsigned long table);
+
+static inline
+bool efi_config_table_is_usable(const efi_guid_t *guid, unsigned long table)
+{
+ if (!IS_ENABLED(CONFIG_XEN_EFI))
+ return true;
+ return xen_efi_config_table_is_usable(guid, table);
+}
+
#endif /* _LINUX_EFI_H */
diff --git a/include/linux/error-injection.h b/include/linux/error-injection.h
index 635a95caf29f..20e738f4eae8 100644
--- a/include/linux/error-injection.h
+++ b/include/linux/error-injection.h
@@ -3,6 +3,7 @@
#define _LINUX_ERROR_INJECTION_H
#include <linux/compiler.h>
+#include <linux/errno.h>
#include <asm-generic/error-injection.h>
#ifdef CONFIG_FUNCTION_ERROR_INJECTION
@@ -19,7 +20,7 @@ static inline bool within_error_injection_list(unsigned long addr)
static inline int get_injectable_error_type(unsigned long addr)
{
- return EI_ETYPE_NONE;
+ return -EOPNOTSUPP;
}
#endif
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index a541f0c4f146..224645f17c33 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -508,6 +508,20 @@ static inline void eth_addr_inc(u8 *addr)
}
/**
+ * eth_addr_add() - Add (or subtract) an offset to/from the given MAC address.
+ *
+ * @offset: Offset to add.
+ * @addr: Pointer to a six-byte array containing Ethernet address to increment.
+ */
+static inline void eth_addr_add(u8 *addr, long offset)
+{
+ u64 u = ether_addr_to_u64(addr);
+
+ u += offset;
+ u64_to_ether_addr(u, addr);
+}
+
+/**
* is_etherdev_addr - Tell if given Ethernet address belongs to the device.
* @dev: Pointer to a device structure
* @addr: Pointer to a six-byte array containing the Ethernet address
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index 9e0a76fc7de9..2792185dda22 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -15,6 +15,7 @@
#include <linux/bitmap.h>
#include <linux/compat.h>
+#include <linux/if_ether.h>
#include <linux/netlink.h>
#include <uapi/linux/ethtool.h>
@@ -72,12 +73,14 @@ enum {
* @rx_buf_len: Current length of buffers on the rx ring.
* @tcp_data_split: Scatter packet headers and data to separate buffers
* @tx_push: The flag of tx push mode
+ * @rx_push: The flag of rx push mode
* @cqe_size: Size of TX/RX completion queue event
*/
struct kernel_ethtool_ringparam {
u32 rx_buf_len;
u8 tcp_data_split;
u8 tx_push;
+ u8 rx_push;
u32 cqe_size;
};
@@ -86,11 +89,13 @@ struct kernel_ethtool_ringparam {
* @ETHTOOL_RING_USE_RX_BUF_LEN: capture for setting rx_buf_len
* @ETHTOOL_RING_USE_CQE_SIZE: capture for setting cqe_size
* @ETHTOOL_RING_USE_TX_PUSH: capture for setting tx_push
+ * @ETHTOOL_RING_USE_RX_PUSH: capture for setting rx_push
*/
enum ethtool_supported_ring_param {
ETHTOOL_RING_USE_RX_BUF_LEN = BIT(0),
ETHTOOL_RING_USE_CQE_SIZE = BIT(1),
ETHTOOL_RING_USE_TX_PUSH = BIT(2),
+ ETHTOOL_RING_USE_RX_PUSH = BIT(3),
};
#define __ETH_RSS_HASH_BIT(bit) ((u32)1 << (bit))
@@ -106,11 +111,6 @@ enum ethtool_supported_ring_param {
struct net_device;
struct netlink_ext_ack;
-/* Some generic methods drivers may use in their ethtool_ops */
-u32 ethtool_op_get_link(struct net_device *dev);
-int ethtool_op_get_ts_info(struct net_device *dev, struct ethtool_ts_info *eti);
-
-
/* Link extended state and substate. */
struct ethtool_link_ext_state_info {
enum ethtool_link_ext_state link_ext_state;
@@ -217,6 +217,9 @@ __ethtool_get_link_ksettings(struct net_device *dev,
struct kernel_ethtool_coalesce {
u8 use_cqe_mode_tx;
u8 use_cqe_mode_rx;
+ u32 tx_aggr_max_bytes;
+ u32 tx_aggr_max_frames;
+ u32 tx_aggr_time_usecs;
};
/**
@@ -260,7 +263,10 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
#define ETHTOOL_COALESCE_RATE_SAMPLE_INTERVAL BIT(21)
#define ETHTOOL_COALESCE_USE_CQE_RX BIT(22)
#define ETHTOOL_COALESCE_USE_CQE_TX BIT(23)
-#define ETHTOOL_COALESCE_ALL_PARAMS GENMASK(23, 0)
+#define ETHTOOL_COALESCE_TX_AGGR_MAX_BYTES BIT(24)
+#define ETHTOOL_COALESCE_TX_AGGR_MAX_FRAMES BIT(25)
+#define ETHTOOL_COALESCE_TX_AGGR_TIME_USECS BIT(26)
+#define ETHTOOL_COALESCE_ALL_PARAMS GENMASK(26, 0)
#define ETHTOOL_COALESCE_USECS \
(ETHTOOL_COALESCE_RX_USECS | ETHTOOL_COALESCE_TX_USECS)
@@ -288,6 +294,10 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
ETHTOOL_COALESCE_RATE_SAMPLE_INTERVAL)
#define ETHTOOL_COALESCE_USE_CQE \
(ETHTOOL_COALESCE_USE_CQE_RX | ETHTOOL_COALESCE_USE_CQE_TX)
+#define ETHTOOL_COALESCE_TX_AGGR \
+ (ETHTOOL_COALESCE_TX_AGGR_MAX_BYTES | \
+ ETHTOOL_COALESCE_TX_AGGR_MAX_FRAMES | \
+ ETHTOOL_COALESCE_TX_AGGR_TIME_USECS)
#define ETHTOOL_STAT_NOT_SET (~0ULL)
@@ -301,48 +311,59 @@ static inline void ethtool_stats_init(u64 *stats, unsigned int n)
* via a more targeted API.
*/
struct ethtool_eth_mac_stats {
- u64 FramesTransmittedOK;
- u64 SingleCollisionFrames;
- u64 MultipleCollisionFrames;
- u64 FramesReceivedOK;
- u64 FrameCheckSequenceErrors;
- u64 AlignmentErrors;
- u64 OctetsTransmittedOK;
- u64 FramesWithDeferredXmissions;
- u64 LateCollisions;
- u64 FramesAbortedDueToXSColls;
- u64 FramesLostDueToIntMACXmitError;
- u64 CarrierSenseErrors;
- u64 OctetsReceivedOK;
- u64 FramesLostDueToIntMACRcvError;
- u64 MulticastFramesXmittedOK;
- u64 BroadcastFramesXmittedOK;
- u64 FramesWithExcessiveDeferral;
- u64 MulticastFramesReceivedOK;
- u64 BroadcastFramesReceivedOK;
- u64 InRangeLengthErrors;
- u64 OutOfRangeLengthField;
- u64 FrameTooLongErrors;
+ enum ethtool_mac_stats_src src;
+ struct_group(stats,
+ u64 FramesTransmittedOK;
+ u64 SingleCollisionFrames;
+ u64 MultipleCollisionFrames;
+ u64 FramesReceivedOK;
+ u64 FrameCheckSequenceErrors;
+ u64 AlignmentErrors;
+ u64 OctetsTransmittedOK;
+ u64 FramesWithDeferredXmissions;
+ u64 LateCollisions;
+ u64 FramesAbortedDueToXSColls;
+ u64 FramesLostDueToIntMACXmitError;
+ u64 CarrierSenseErrors;
+ u64 OctetsReceivedOK;
+ u64 FramesLostDueToIntMACRcvError;
+ u64 MulticastFramesXmittedOK;
+ u64 BroadcastFramesXmittedOK;
+ u64 FramesWithExcessiveDeferral;
+ u64 MulticastFramesReceivedOK;
+ u64 BroadcastFramesReceivedOK;
+ u64 InRangeLengthErrors;
+ u64 OutOfRangeLengthField;
+ u64 FrameTooLongErrors;
+ );
};
/* Basic IEEE 802.3 PHY statistics (30.3.2.1.*), not otherwise exposed
* via a more targeted API.
*/
struct ethtool_eth_phy_stats {
- u64 SymbolErrorDuringCarrier;
+ enum ethtool_mac_stats_src src;
+ struct_group(stats,
+ u64 SymbolErrorDuringCarrier;
+ );
};
/* Basic IEEE 802.3 MAC Ctrl statistics (30.3.3.*), not otherwise exposed
* via a more targeted API.
*/
struct ethtool_eth_ctrl_stats {
- u64 MACControlFramesTransmitted;
- u64 MACControlFramesReceived;
- u64 UnsupportedOpcodesReceived;
+ enum ethtool_mac_stats_src src;
+ struct_group(stats,
+ u64 MACControlFramesTransmitted;
+ u64 MACControlFramesReceived;
+ u64 UnsupportedOpcodesReceived;
+ );
};
/**
* struct ethtool_pause_stats - statistics for IEEE 802.3x pause frames
+ * @src: input field denoting whether stats should be queried from the eMAC or
+ * pMAC (if the MM layer is supported). To be ignored otherwise.
* @tx_pause_frames: transmitted pause frame count. Reported to user space
* as %ETHTOOL_A_PAUSE_STAT_TX_FRAMES.
*
@@ -356,8 +377,11 @@ struct ethtool_eth_ctrl_stats {
* from the standard.
*/
struct ethtool_pause_stats {
- u64 tx_pause_frames;
- u64 rx_pause_frames;
+ enum ethtool_mac_stats_src src;
+ struct_group(stats,
+ u64 tx_pause_frames;
+ u64 rx_pause_frames;
+ );
};
#define ETHTOOL_MAX_LANES 8
@@ -407,6 +431,8 @@ struct ethtool_rmon_hist_range {
/**
* struct ethtool_rmon_stats - selected RMON (RFC 2819) statistics
+ * @src: input field denoting whether stats should be queried from the eMAC or
+ * pMAC (if the MM layer is supported). To be ignored otherwise.
* @undersize_pkts: Equivalent to `etherStatsUndersizePkts` from the RFC.
* @oversize_pkts: Equivalent to `etherStatsOversizePkts` from the RFC.
* @fragments: Equivalent to `etherStatsFragments` from the RFC.
@@ -422,13 +448,16 @@ struct ethtool_rmon_hist_range {
* ranges is left to the driver.
*/
struct ethtool_rmon_stats {
- u64 undersize_pkts;
- u64 oversize_pkts;
- u64 fragments;
- u64 jabbers;
+ enum ethtool_mac_stats_src src;
+ struct_group(stats,
+ u64 undersize_pkts;
+ u64 oversize_pkts;
+ u64 fragments;
+ u64 jabbers;
- u64 hist[ETHTOOL_RMON_HIST_MAX];
- u64 hist_tx[ETHTOOL_RMON_HIST_MAX];
+ u64 hist[ETHTOOL_RMON_HIST_MAX];
+ u64 hist_tx[ETHTOOL_RMON_HIST_MAX];
+ );
};
#define ETH_MODULE_EEPROM_PAGE_LEN 128
@@ -468,6 +497,98 @@ struct ethtool_module_power_mode_params {
};
/**
+ * struct ethtool_mm_state - 802.3 MAC merge layer state
+ * @verify_time:
+ * wait time between verification attempts in ms (according to clause
+ * 30.14.1.6 aMACMergeVerifyTime)
+ * @max_verify_time:
+ * maximum accepted value for the @verify_time variable in set requests
+ * @verify_status:
+ * state of the verification state machine of the MM layer (according to
+ * clause 30.14.1.2 aMACMergeStatusVerify)
+ * @tx_enabled:
+ * set if the MM layer is administratively enabled in the TX direction
+ * (according to clause 30.14.1.3 aMACMergeEnableTx)
+ * @tx_active:
+ * set if the MM layer is enabled in the TX direction, which makes FP
+ * possible (according to 30.14.1.5 aMACMergeStatusTx). This should be
+ * true if MM is enabled, and the verification status is either verified,
+ * or disabled.
+ * @pmac_enabled:
+ * set if the preemptible MAC is powered on and is able to receive
+ * preemptible packets and respond to verification frames.
+ * @verify_enabled:
+ * set if the Verify function of the MM layer (which sends SMD-V
+ * verification requests) is administratively enabled (regardless of
+ * whether it is currently in the ETHTOOL_MM_VERIFY_STATUS_DISABLED state
+ * or not), according to clause 30.14.1.4 aMACMergeVerifyDisableTx (but
+ * using positive rather than negative logic). The device should always
+ * respond to received SMD-V requests as long as @pmac_enabled is set.
+ * @tx_min_frag_size:
+ * the minimum size of non-final mPacket fragments that the link partner
+ * supports receiving, expressed in octets. Compared to the definition
+ * from clause 30.14.1.7 aMACMergeAddFragSize which is expressed in the
+ * range 0 to 3 (requiring a translation to the size in octets according
+ * to the formula 64 * (1 + addFragSize) - 4), a value in a continuous and
+ * unbounded range can be specified here.
+ * @rx_min_frag_size:
+ * the minimum size of non-final mPacket fragments that this device
+ * supports receiving, expressed in octets.
+ */
+struct ethtool_mm_state {
+ u32 verify_time;
+ u32 max_verify_time;
+ enum ethtool_mm_verify_status verify_status;
+ bool tx_enabled;
+ bool tx_active;
+ bool pmac_enabled;
+ bool verify_enabled;
+ u32 tx_min_frag_size;
+ u32 rx_min_frag_size;
+};
+
+/**
+ * struct ethtool_mm_cfg - 802.3 MAC merge layer configuration
+ * @verify_time: see struct ethtool_mm_state
+ * @verify_enabled: see struct ethtool_mm_state
+ * @tx_enabled: see struct ethtool_mm_state
+ * @pmac_enabled: see struct ethtool_mm_state
+ * @tx_min_frag_size: see struct ethtool_mm_state
+ */
+struct ethtool_mm_cfg {
+ u32 verify_time;
+ bool verify_enabled;
+ bool tx_enabled;
+ bool pmac_enabled;
+ u32 tx_min_frag_size;
+};
+
+/**
+ * struct ethtool_mm_stats - 802.3 MAC merge layer statistics
+ * @MACMergeFrameAssErrorCount:
+ * received MAC frames with reassembly errors
+ * @MACMergeFrameSmdErrorCount:
+ * received MAC frames/fragments rejected due to unknown or incorrect SMD
+ * @MACMergeFrameAssOkCount:
+ * received MAC frames that were successfully reassembled and passed up
+ * @MACMergeFragCountRx:
+ * number of additional correct SMD-C mPackets received due to preemption
+ * @MACMergeFragCountTx:
+ * number of additional mPackets sent due to preemption
+ * @MACMergeHoldCount:
+ * number of times the MM layer entered the HOLD state, which blocks
+ * transmission of preemptible traffic
+ */
+struct ethtool_mm_stats {
+ u64 MACMergeFrameAssErrorCount;
+ u64 MACMergeFrameSmdErrorCount;
+ u64 MACMergeFrameAssOkCount;
+ u64 MACMergeFragCountRx;
+ u64 MACMergeFragCountTx;
+ u64 MACMergeHoldCount;
+};
+
+/**
* struct ethtool_ops - optional netdev operations
* @cap_link_lanes_supported: indicates if the driver supports lanes
* parameter.
@@ -639,6 +760,9 @@ struct ethtool_module_power_mode_params {
* plugged-in.
* @set_module_power_mode: Set the power mode policy for the plug-in module
* used by the network device.
+ * @get_mm: Query the 802.3 MAC Merge layer state.
+ * @set_mm: Set the 802.3 MAC Merge layer parameters.
+ * @get_mm_stats: Query the 802.3 MAC Merge layer statistics.
*
* All operations are optional (i.e. the function pointer may be set
* to %NULL) and callers must take this into account. Callers must
@@ -777,6 +901,10 @@ struct ethtool_ops {
int (*set_module_power_mode)(struct net_device *dev,
const struct ethtool_module_power_mode_params *params,
struct netlink_ext_ack *extack);
+ int (*get_mm)(struct net_device *dev, struct ethtool_mm_state *state);
+ int (*set_mm)(struct net_device *dev, struct ethtool_mm_cfg *cfg,
+ struct netlink_ext_ack *extack);
+ void (*get_mm_stats)(struct net_device *dev, struct ethtool_mm_stats *stats);
};
int ethtool_check_ops(const struct ethtool_ops *ops);
@@ -802,12 +930,17 @@ int ethtool_virtdev_set_link_ksettings(struct net_device *dev,
struct phy_device;
struct phy_tdr_config;
+struct phy_plca_cfg;
+struct phy_plca_status;
/**
* struct ethtool_phy_ops - Optional PHY device options
* @get_sset_count: Get number of strings that @get_strings will write.
* @get_strings: Return a set of strings that describe the requested objects
* @get_stats: Return extended statistics about the PHY device.
+ * @get_plca_cfg: Return PLCA configuration.
+ * @set_plca_cfg: Set PLCA configuration.
+ * @get_plca_status: Get PLCA configuration.
* @start_cable_test: Start a cable test
* @start_cable_test_tdr: Start a Time Domain Reflectometry cable test
*
@@ -819,6 +952,13 @@ struct ethtool_phy_ops {
int (*get_strings)(struct phy_device *dev, u8 *data);
int (*get_stats)(struct phy_device *dev,
struct ethtool_stats *stats, u64 *data);
+ int (*get_plca_cfg)(struct phy_device *dev,
+ struct phy_plca_cfg *plca_cfg);
+ int (*set_plca_cfg)(struct phy_device *dev,
+ const struct phy_plca_cfg *plca_cfg,
+ struct netlink_ext_ack *extack);
+ int (*get_plca_status)(struct phy_device *dev,
+ struct phy_plca_status *plca_st);
int (*start_cable_test)(struct phy_device *phydev,
struct netlink_ext_ack *extack);
int (*start_cable_test_tdr)(struct phy_device *phydev,
@@ -851,6 +991,51 @@ ethtool_params_from_link_mode(struct ethtool_link_ksettings *link_ksettings,
*/
int ethtool_get_phc_vclocks(struct net_device *dev, int **vclock_index);
+/* Some generic methods drivers may use in their ethtool_ops */
+u32 ethtool_op_get_link(struct net_device *dev);
+int ethtool_op_get_ts_info(struct net_device *dev, struct ethtool_ts_info *eti);
+
+/**
+ * ethtool_mm_frag_size_add_to_min - Translate (standard) additional fragment
+ * size expressed as multiplier into (absolute) minimum fragment size
+ * value expressed in octets
+ * @val_add: Value of addFragSize multiplier
+ */
+static inline u32 ethtool_mm_frag_size_add_to_min(u32 val_add)
+{
+ return (ETH_ZLEN + ETH_FCS_LEN) * (1 + val_add) - ETH_FCS_LEN;
+}
+
+/**
+ * ethtool_mm_frag_size_min_to_add - Translate (absolute) minimum fragment size
+ * expressed in octets into (standard) additional fragment size expressed
+ * as multiplier
+ * @val_min: Value of addFragSize variable in octets
+ * @val_add: Pointer where the standard addFragSize value is to be returned
+ * @extack: Netlink extended ack
+ *
+ * Translate a value in octets to one of 0, 1, 2, 3 according to the reverse
+ * application of the 802.3 formula 64 * (1 + addFragSize) - 4. To be called
+ * by drivers which do not support programming the minimum fragment size to a
+ * continuous range. Returns error on other fragment length values.
+ */
+static inline int ethtool_mm_frag_size_min_to_add(u32 val_min, u32 *val_add,
+ struct netlink_ext_ack *extack)
+{
+ u32 add_frag_size;
+
+ for (add_frag_size = 0; add_frag_size < 4; add_frag_size++) {
+ if (ethtool_mm_frag_size_add_to_min(add_frag_size) == val_min) {
+ *val_add = add_frag_size;
+ return 0;
+ }
+ }
+
+ NL_SET_ERR_MSG_MOD(extack,
+ "minFragSize required to be one of 60, 124, 188 or 252");
+ return -EINVAL;
+}
+
/**
* ethtool_sprintf - Write formatted string to ethtool string data
* @data: Pointer to start of string to update
diff --git a/include/linux/ethtool_netlink.h b/include/linux/ethtool_netlink.h
index aba348d58ff6..17003b385756 100644
--- a/include/linux/ethtool_netlink.h
+++ b/include/linux/ethtool_netlink.h
@@ -29,6 +29,17 @@ int ethnl_cable_test_amplitude(struct phy_device *phydev, u8 pair, s16 mV);
int ethnl_cable_test_pulse(struct phy_device *phydev, u16 mV);
int ethnl_cable_test_step(struct phy_device *phydev, u32 first, u32 last,
u32 step);
+void ethtool_aggregate_mac_stats(struct net_device *dev,
+ struct ethtool_eth_mac_stats *mac_stats);
+void ethtool_aggregate_phy_stats(struct net_device *dev,
+ struct ethtool_eth_phy_stats *phy_stats);
+void ethtool_aggregate_ctrl_stats(struct net_device *dev,
+ struct ethtool_eth_ctrl_stats *ctrl_stats);
+void ethtool_aggregate_pause_stats(struct net_device *dev,
+ struct ethtool_pause_stats *pause_stats);
+void ethtool_aggregate_rmon_stats(struct net_device *dev,
+ struct ethtool_rmon_stats *rmon_stats);
+
#else
static inline int ethnl_cable_test_alloc(struct phy_device *phydev, u8 cmd)
{
@@ -70,5 +81,36 @@ static inline int ethnl_cable_test_step(struct phy_device *phydev, u32 first,
{
return -EOPNOTSUPP;
}
+
+static inline void
+ethtool_aggregate_mac_stats(struct net_device *dev,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+}
+
+static inline void
+ethtool_aggregate_phy_stats(struct net_device *dev,
+ struct ethtool_eth_phy_stats *phy_stats)
+{
+}
+
+static inline void
+ethtool_aggregate_ctrl_stats(struct net_device *dev,
+ struct ethtool_eth_ctrl_stats *ctrl_stats)
+{
+}
+
+static inline void
+ethtool_aggregate_pause_stats(struct net_device *dev,
+ struct ethtool_pause_stats *pause_stats)
+{
+}
+
+static inline void
+ethtool_aggregate_rmon_stats(struct net_device *dev,
+ struct ethtool_rmon_stats *rmon_stats)
+{
+}
+
#endif /* IS_ENABLED(CONFIG_ETHTOOL_NETLINK) */
#endif /* _LINUX_ETHTOOL_NETLINK_H_ */
diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h
index 9f4d4bcbf251..601700fedc91 100644
--- a/include/linux/exportfs.h
+++ b/include/linux/exportfs.h
@@ -131,7 +131,7 @@ struct fid {
u32 parent_block;
u32 parent_generation;
} udf;
- __u32 raw[0];
+ DECLARE_FLEX_ARRAY(__u32, raw);
};
};
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index ee0d75d9a302..1701f25117ea 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -315,7 +315,7 @@ struct f2fs_inode {
__u8 i_log_cluster_size; /* log of cluster size */
__le16 i_compress_flag; /* compress flag */
/* 0 bit: chksum flag
- * [10,15] bits: compress level
+ * [8,15] bits: compress level
*/
__le32 i_extra_end[0]; /* for attribute size calculation */
} __packed;
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 73eb1f85ea8e..d8d20514ea05 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -215,7 +215,6 @@ struct fb_deferred_io {
struct mutex lock; /* mutex that protects the pageref list */
struct list_head pagereflist; /* list of pagerefs for touched pages */
/* callback */
- void (*first_io)(struct fb_info *info);
void (*deferred_io)(struct fb_info *info, struct list_head *pagelist);
};
#endif
@@ -423,8 +422,6 @@ struct fb_tile_ops {
*/
#define FBINFO_MISC_ALWAYS_SETPAR 0x40000
-/* where the fb is a firmware driver, and can be replaced with a proper one */
-#define FBINFO_MISC_FIRMWARE 0x80000
/*
* Host and GPU endianness differ.
*/
@@ -499,30 +496,10 @@ struct fb_info {
void *fbcon_par; /* fbcon use-only private area */
/* From here on everything is device dependent */
void *par;
- /* we need the PCI or similar aperture base/size not
- smem_start/size as smem_start may just be an object
- allocated inside the aperture so may not actually overlap */
- struct apertures_struct {
- unsigned int count;
- struct aperture {
- resource_size_t base;
- resource_size_t size;
- } ranges[0];
- } *apertures;
bool skip_vt_switch; /* no VT switch on suspend/resume required */
};
-static inline struct apertures_struct *alloc_apertures(unsigned int max_num) {
- struct apertures_struct *a;
-
- a = kzalloc(struct_size(a, ranges, max_num), GFP_KERNEL);
- if (!a)
- return NULL;
- a->count = max_num;
- return a;
-}
-
#define FBINFO_FLAG_DEFAULT FBINFO_DEFAULT
/* This will go away
diff --git a/include/linux/filter.h b/include/linux/filter.h
index ccc4a4a58c72..1727898f1641 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -14,6 +14,7 @@
#include <linux/printk.h>
#include <linux/workqueue.h>
#include <linux/sched.h>
+#include <linux/sched/clock.h>
#include <linux/capability.h>
#include <linux/set_memory.h>
#include <linux/kallsyms.h>
diff --git a/include/linux/find.h b/include/linux/find.h
index ccaf61a0f5fd..4647864a5ffd 100644
--- a/include/linux/find.h
+++ b/include/linux/find.h
@@ -22,6 +22,9 @@ unsigned long __find_nth_and_bit(const unsigned long *addr1, const unsigned long
unsigned long size, unsigned long n);
unsigned long __find_nth_andnot_bit(const unsigned long *addr1, const unsigned long *addr2,
unsigned long size, unsigned long n);
+unsigned long __find_nth_and_andnot_bit(const unsigned long *addr1, const unsigned long *addr2,
+ const unsigned long *addr3, unsigned long size,
+ unsigned long n);
extern unsigned long _find_first_and_bit(const unsigned long *addr1,
const unsigned long *addr2, unsigned long size);
extern unsigned long _find_first_zero_bit(const unsigned long *addr, unsigned long size);
@@ -255,6 +258,36 @@ unsigned long find_nth_andnot_bit(const unsigned long *addr1, const unsigned lon
return __find_nth_andnot_bit(addr1, addr2, size, n);
}
+/**
+ * find_nth_and_andnot_bit - find N'th set bit in 2 memory regions,
+ * excluding those set in 3rd region
+ * @addr1: The 1st address to start the search at
+ * @addr2: The 2nd address to start the search at
+ * @addr3: The 3rd address to start the search at
+ * @size: The maximum number of bits to search
+ * @n: The number of set bit, which position is needed, counting from 0
+ *
+ * Returns the bit number of the N'th set bit.
+ * If no such, returns @size.
+ */
+static __always_inline
+unsigned long find_nth_and_andnot_bit(const unsigned long *addr1,
+ const unsigned long *addr2,
+ const unsigned long *addr3,
+ unsigned long size, unsigned long n)
+{
+ if (n >= size)
+ return size;
+
+ if (small_const_nbits(size)) {
+ unsigned long val = *addr1 & *addr2 & (~*addr3) & GENMASK(size - 1, 0);
+
+ return val ? fns(val, n) : size;
+ }
+
+ return __find_nth_and_andnot_bit(addr1, addr2, addr3, size, n);
+}
+
#ifndef find_first_and_bit
/**
* find_first_and_bit - find the first set bit in both memory regions
diff --git a/include/linux/firewire.h b/include/linux/firewire.h
index 980019053e54..1716c01c4e54 100644
--- a/include/linux/firewire.h
+++ b/include/linux/firewire.h
@@ -208,10 +208,7 @@ struct fw_device {
struct fw_attribute_group attribute_group;
};
-static inline struct fw_device *fw_device(struct device *dev)
-{
- return container_of(dev, struct fw_device, device);
-}
+#define fw_device(dev) container_of_const(dev, struct fw_device, device)
static inline int fw_device_is_shutdown(struct fw_device *device)
{
@@ -229,10 +226,7 @@ struct fw_unit {
struct fw_attribute_group attribute_group;
};
-static inline struct fw_unit *fw_unit(struct device *dev)
-{
- return container_of(dev, struct fw_unit, device);
-}
+#define fw_unit(dev) container_of_const(dev, struct fw_unit, device)
static inline struct fw_unit *fw_unit_get(struct fw_unit *unit)
{
@@ -246,10 +240,7 @@ static inline void fw_unit_put(struct fw_unit *unit)
put_device(&unit->device);
}
-static inline struct fw_device *fw_parent_device(struct fw_unit *unit)
-{
- return fw_device(unit->device.parent);
-}
+#define fw_parent_device(unit) fw_device(unit->device.parent)
struct ieee1394_device_id;
@@ -278,9 +269,8 @@ typedef void (*fw_transaction_callback_t)(struct fw_card *card, int rcode,
* Otherwise there is a danger of recursion of inbound and outbound
* transactions from and to the local node.
*
- * The callback is responsible that either fw_send_response() or kfree()
- * is called on the @request, except for FCP registers for which the core
- * takes care of that.
+ * The callback is responsible that fw_send_response() is called on the @request, except for FCP
+ * registers for which the core takes care of that.
*/
typedef void (*fw_address_callback_t)(struct fw_card *card,
struct fw_request *request,
diff --git a/include/linux/qcom_scm.h b/include/linux/firmware/qcom/qcom_scm.h
index f8335644a01a..1e449a5d7f5c 100644
--- a/include/linux/qcom_scm.h
+++ b/include/linux/firmware/qcom/qcom_scm.h
@@ -9,6 +9,8 @@
#include <linux/types.h>
#include <linux/cpumask.h>
+#include <dt-bindings/firmware/qcom,scm.h>
+
#define QCOM_SCM_VERSION(major, minor) (((major) << 16) | ((minor) & 0xFF))
#define QCOM_SCM_CPU_PWR_DOWN_L2_ON 0x0
#define QCOM_SCM_CPU_PWR_DOWN_L2_OFF 0x1
@@ -51,10 +53,6 @@ enum qcom_scm_ice_cipher {
QCOM_SCM_ICE_CIPHER_AES_256_CBC = 4,
};
-#define QCOM_SCM_VMID_HLOS 0x3
-#define QCOM_SCM_VMID_MSS_MSA 0xF
-#define QCOM_SCM_VMID_WLAN 0x18
-#define QCOM_SCM_VMID_WLAN_CE 0x19
#define QCOM_SCM_PERM_READ 0x4
#define QCOM_SCM_PERM_WRITE 0x2
#define QCOM_SCM_PERM_EXEC 0x1
diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h
index b09f443d3ab9..0e4c70987e6a 100644
--- a/include/linux/firmware/xlnx-zynqmp.h
+++ b/include/linux/firmware/xlnx-zynqmp.h
@@ -79,6 +79,10 @@
#define EVENT_ERROR_PSM_ERR1 (0x28108000U)
#define EVENT_ERROR_PSM_ERR2 (0x2810C000U)
+/* ZynqMP SD tap delay tuning */
+#define SD_ITAPDLY 0xFF180314
+#define SD_OTAPDLYSEL 0xFF180318
+
enum pm_api_cb_id {
PM_INIT_SUSPEND_CB = 30,
PM_ACKNOWLEDGE_CB = 31,
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 2acc46fb5f97..c85916e9f7db 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -166,6 +166,8 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
/* File supports DIRECT IO */
#define FMODE_CAN_ODIRECT ((__force fmode_t)0x400000)
+#define FMODE_NOREUSE ((__force fmode_t)0x800000)
+
/* File was opened by fanotify and shouldn't generate fanotify events */
#define FMODE_NONOTIFY ((__force fmode_t)0x4000000)
@@ -2728,6 +2730,12 @@ ssize_t vfs_iocb_iter_write(struct file *file, struct kiocb *iocb,
struct iov_iter *iter);
/* fs/splice.c */
+ssize_t filemap_splice_read(struct file *in, loff_t *ppos,
+ struct pipe_inode_info *pipe,
+ size_t len, unsigned int flags);
+ssize_t direct_splice_read(struct file *in, loff_t *ppos,
+ struct pipe_inode_info *pipe,
+ size_t len, unsigned int flags);
extern ssize_t generic_file_splice_read(struct file *, loff_t *,
struct pipe_inode_info *, size_t, unsigned int);
extern ssize_t iter_file_splice_write(struct pipe_inode_info *,
diff --git a/include/linux/fsl/enetc_mdio.h b/include/linux/fsl/enetc_mdio.h
index 2d9203314865..df25fffdc0ae 100644
--- a/include/linux/fsl/enetc_mdio.h
+++ b/include/linux/fsl/enetc_mdio.h
@@ -37,16 +37,27 @@ struct enetc_mdio_priv {
#if IS_REACHABLE(CONFIG_FSL_ENETC_MDIO)
-int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum);
-int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value);
+int enetc_mdio_read_c22(struct mii_bus *bus, int phy_id, int regnum);
+int enetc_mdio_write_c22(struct mii_bus *bus, int phy_id, int regnum,
+ u16 value);
+int enetc_mdio_read_c45(struct mii_bus *bus, int phy_id, int devad, int regnum);
+int enetc_mdio_write_c45(struct mii_bus *bus, int phy_id, int devad, int regnum,
+ u16 value);
struct enetc_hw *enetc_hw_alloc(struct device *dev, void __iomem *port_regs);
#else
-static inline int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
+static inline int enetc_mdio_read_c22(struct mii_bus *bus, int phy_id,
+ int regnum)
{ return -EINVAL; }
-static inline int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum,
- u16 value)
+static inline int enetc_mdio_write_c22(struct mii_bus *bus, int phy_id,
+ int regnum, u16 value)
+{ return -EINVAL; }
+static inline int enetc_mdio_read_c45(struct mii_bus *bus, int phy_id,
+ int devad, int regnum)
+{ return -EINVAL; }
+static inline int enetc_mdio_write_c45(struct mii_bus *bus, int phy_id,
+ int devad, int regnum, u16 value)
{ return -EINVAL; }
struct enetc_hw *enetc_hw_alloc(struct device *dev, void __iomem *port_regs)
{ return ERR_PTR(-EINVAL); }
diff --git a/include/linux/fsl/ptp_qoriq.h b/include/linux/fsl/ptp_qoriq.h
index 01acebe37fab..b301bf7199d3 100644
--- a/include/linux/fsl/ptp_qoriq.h
+++ b/include/linux/fsl/ptp_qoriq.h
@@ -149,6 +149,7 @@ struct ptp_qoriq {
struct device *dev;
bool extts_fifo_support;
bool fiper3_support;
+ bool etsec;
int irq;
int phc_index;
u32 tclk_period; /* nanoseconds */
diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h
index 89b9bdfca925..5700451b300f 100644
--- a/include/linux/fwnode.h
+++ b/include/linux/fwnode.h
@@ -18,7 +18,7 @@ struct fwnode_operations;
struct device;
/*
- * fwnode link flags
+ * fwnode flags
*
* LINKS_ADDED: The fwnode has already be parsed to add fwnode links.
* NOT_DEVICE: The fwnode will never be populated as a struct device.
@@ -36,6 +36,7 @@ struct device;
#define FWNODE_FLAG_INITIALIZED BIT(2)
#define FWNODE_FLAG_NEEDS_CHILD_BOUND_ON_ADD BIT(3)
#define FWNODE_FLAG_BEST_EFFORT BIT(4)
+#define FWNODE_FLAG_VISITED BIT(5)
struct fwnode_handle {
struct fwnode_handle *secondary;
@@ -46,11 +47,19 @@ struct fwnode_handle {
u8 flags;
};
+/*
+ * fwnode link flags
+ *
+ * CYCLE: The fwnode link is part of a cycle. Don't defer probe.
+ */
+#define FWLINK_FLAG_CYCLE BIT(0)
+
struct fwnode_link {
struct fwnode_handle *supplier;
struct list_head s_hook;
struct fwnode_handle *consumer;
struct list_head c_hook;
+ u8 flags;
};
/**
@@ -198,7 +207,6 @@ static inline void fwnode_dev_initialized(struct fwnode_handle *fwnode,
fwnode->flags &= ~FWNODE_FLAG_INITIALIZED;
}
-extern u32 fw_devlink_get_flags(void);
extern bool fw_devlink_is_strict(void);
int fwnode_link_add(struct fwnode_handle *con, struct fwnode_handle *sup);
void fwnode_links_purge(struct fwnode_handle *fwnode);
diff --git a/include/linux/gfp_types.h b/include/linux/gfp_types.h
index d88c46ca82e1..5088637fe5c2 100644
--- a/include/linux/gfp_types.h
+++ b/include/linux/gfp_types.h
@@ -31,7 +31,7 @@ typedef unsigned int __bitwise gfp_t;
#define ___GFP_IO 0x40u
#define ___GFP_FS 0x80u
#define ___GFP_ZERO 0x100u
-#define ___GFP_ATOMIC 0x200u
+/* 0x200u unused */
#define ___GFP_DIRECT_RECLAIM 0x400u
#define ___GFP_KSWAPD_RECLAIM 0x800u
#define ___GFP_WRITE 0x1000u
@@ -116,11 +116,8 @@ typedef unsigned int __bitwise gfp_t;
*
* %__GFP_HIGH indicates that the caller is high-priority and that granting
* the request is necessary before the system can make forward progress.
- * For example, creating an IO context to clean pages.
- *
- * %__GFP_ATOMIC indicates that the caller cannot reclaim or sleep and is
- * high priority. Users are typically interrupt handlers. This may be
- * used in conjunction with %__GFP_HIGH
+ * For example creating an IO context to clean pages and requests
+ * from atomic context.
*
* %__GFP_MEMALLOC allows access to all memory. This should only be used when
* the caller guarantees the allocation will allow more memory to be freed
@@ -135,7 +132,6 @@ typedef unsigned int __bitwise gfp_t;
* %__GFP_NOMEMALLOC is used to explicitly forbid access to emergency reserves.
* This takes precedence over the %__GFP_MEMALLOC flag if both are set.
*/
-#define __GFP_ATOMIC ((__force gfp_t)___GFP_ATOMIC)
#define __GFP_HIGH ((__force gfp_t)___GFP_HIGH)
#define __GFP_MEMALLOC ((__force gfp_t)___GFP_MEMALLOC)
#define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC)
@@ -329,7 +325,7 @@ typedef unsigned int __bitwise gfp_t;
* version does not attempt reclaim/compaction at all and is by default used
* in page fault path, while the non-light is used by khugepaged.
*/
-#define GFP_ATOMIC (__GFP_HIGH|__GFP_ATOMIC|__GFP_KSWAPD_RECLAIM)
+#define GFP_ATOMIC (__GFP_HIGH|__GFP_KSWAPD_RECLAIM)
#define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS)
#define GFP_KERNEL_ACCOUNT (GFP_KERNEL | __GFP_ACCOUNT)
#define GFP_NOWAIT (__GFP_KSWAPD_RECLAIM)
diff --git a/include/linux/gpio.h b/include/linux/gpio.h
index 346f60bbab30..85beb236c925 100644
--- a/include/linux/gpio.h
+++ b/include/linux/gpio.h
@@ -81,11 +81,6 @@ static inline int gpio_to_irq(unsigned int gpio)
return __gpio_to_irq(gpio);
}
-static inline int irq_to_gpio(unsigned int irq)
-{
- return -EINVAL;
-}
-
#endif /* ! CONFIG_ARCH_HAVE_CUSTOM_GPIO_H */
/* CONFIG_GPIOLIB: bindings for managed devices that want to request gpios */
@@ -197,14 +192,6 @@ static inline int gpio_export(unsigned gpio, bool direction_may_change)
return -EINVAL;
}
-static inline int gpio_export_link(struct device *dev, const char *name,
- unsigned gpio)
-{
- /* GPIO can never have been exported */
- WARN_ON(1);
- return -EINVAL;
-}
-
static inline void gpio_unexport(unsigned gpio)
{
/* GPIO can never have been exported */
@@ -218,13 +205,6 @@ static inline int gpio_to_irq(unsigned gpio)
return -EINVAL;
}
-static inline int irq_to_gpio(unsigned irq)
-{
- /* irq can never have been returned from gpio_to_irq() */
- WARN_ON(1);
- return -EINVAL;
-}
-
static inline int devm_gpio_request(struct device *dev, unsigned gpio,
const char *label)
{
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
index 45da8f137fe5..59cb20cfac3d 100644
--- a/include/linux/gpio/consumer.h
+++ b/include/linux/gpio/consumer.h
@@ -581,54 +581,6 @@ struct gpio_desc *devm_fwnode_gpiod_get(struct device *dev,
flags, label);
}
-#if IS_ENABLED(CONFIG_GPIOLIB) && IS_ENABLED(CONFIG_OF_GPIO)
-struct device_node;
-
-struct gpio_desc *gpiod_get_from_of_node(const struct device_node *node,
- const char *propname, int index,
- enum gpiod_flags dflags,
- const char *label);
-
-#else /* CONFIG_GPIOLIB && CONFIG_OF_GPIO */
-
-struct device_node;
-
-static inline
-struct gpio_desc *gpiod_get_from_of_node(const struct device_node *node,
- const char *propname, int index,
- enum gpiod_flags dflags,
- const char *label)
-{
- return ERR_PTR(-ENOSYS);
-}
-
-#endif /* CONFIG_GPIOLIB && CONFIG_OF_GPIO */
-
-#ifdef CONFIG_GPIOLIB
-struct device_node;
-
-struct gpio_desc *devm_gpiod_get_from_of_node(struct device *dev,
- const struct device_node *node,
- const char *propname, int index,
- enum gpiod_flags dflags,
- const char *label);
-
-#else /* CONFIG_GPIOLIB */
-
-struct device_node;
-
-static inline
-struct gpio_desc *devm_gpiod_get_from_of_node(struct device *dev,
- const struct device_node *node,
- const char *propname, int index,
- enum gpiod_flags dflags,
- const char *label)
-{
- return ERR_PTR(-ENOSYS);
-}
-
-#endif /* CONFIG_GPIOLIB */
-
struct acpi_gpio_params {
unsigned int crs_entry_index;
unsigned int line_index;
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index 44783fc16125..ccd8a512d854 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -336,7 +336,7 @@ struct gpio_irq_chip {
* @set_multiple: assigns output values for multiple signals defined by "mask"
* @set_config: optional hook for all kinds of settings. Uses the same
* packed config format as generic pinconf.
- * @to_irq: optional hook supporting non-static gpio_to_irq() mappings;
+ * @to_irq: optional hook supporting non-static gpiod_to_irq() mappings;
* implementation may not sleep
* @dbg_show: optional routine to show contents in debugfs; default code
* will be used when this is omitted, but custom code can show extra
@@ -504,13 +504,6 @@ struct gpio_chip {
*/
/**
- * @of_node:
- *
- * Pointer to a device tree node representing this GPIO controller.
- */
- struct device_node *of_node;
-
- /**
* @of_gpio_n_cells:
*
* Number of cells used to form the GPIO specifier.
@@ -525,18 +518,6 @@ struct gpio_chip {
*/
int (*of_xlate)(struct gpio_chip *gc,
const struct of_phandle_args *gpiospec, u32 *flags);
-
- /**
- * @of_gpio_ranges_fallback:
- *
- * Optional hook for the case that no gpio-ranges property is defined
- * within the device tree node "np" (usually DT before introduction
- * of gpio-ranges). So this callback is helpful to provide the
- * necessary backward compatibility for the pin ranges.
- */
- int (*of_gpio_ranges_fallback)(struct gpio_chip *gc,
- struct device_node *np);
-
#endif /* CONFIG_OF_GPIO */
};
diff --git a/include/linux/hid-sensor-ids.h b/include/linux/hid-sensor-ids.h
index ac631159403a..13b1e65fbdcc 100644
--- a/include/linux/hid-sensor-ids.h
+++ b/include/linux/hid-sensor-ids.h
@@ -132,6 +132,7 @@
#define HID_USAGE_SENSOR_PROP_FRIENDLY_NAME 0x200301
#define HID_USAGE_SENSOR_PROP_SERIAL_NUM 0x200307
#define HID_USAGE_SENSOR_PROP_MANUFACTURER 0x200305
+#define HID_USAGE_SENSOR_PROP_MODEL 0x200306
#define HID_USAGE_SENSOR_PROP_REPORT_INTERVAL 0x20030E
#define HID_USAGE_SENSOR_PROP_SENSITIVITY_ABS 0x20030F
#define HID_USAGE_SENSOR_PROP_SENSITIVITY_RANGE_PCT 0x200310
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 8677ae38599e..eaf8ab177303 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -26,6 +26,7 @@
#include <linux/mutex.h>
#include <linux/power_supply.h>
#include <uapi/linux/hid.h>
+#include <linux/hid_bpf.h>
/*
* We parse each description item into this structure. Short items data
@@ -312,6 +313,7 @@ struct hid_item {
#define HID_DG_LATENCYMODE 0x000d0060
#define HID_BAT_ABSOLUTESTATEOFCHARGE 0x00850065
+#define HID_BAT_CHARGING 0x00850044
#define HID_VD_ASUS_CUSTOM_MEDIA_KEYS 0xff310076
@@ -595,7 +597,7 @@ struct hid_device { /* device report descriptor */
struct device dev; /* device */
struct hid_driver *driver;
- struct hid_ll_driver *ll_driver;
+ const struct hid_ll_driver *ll_driver;
struct mutex ll_open_lock;
unsigned int ll_open_count;
@@ -611,6 +613,7 @@ struct hid_device { /* device report descriptor */
__s32 battery_max;
__s32 battery_report_type;
__s32 battery_report_id;
+ __s32 battery_charge_status;
enum hid_battery_status battery_status;
bool battery_avoid_query;
ktime_t battery_ratelimit_time;
@@ -619,6 +622,7 @@ struct hid_device { /* device report descriptor */
unsigned long status; /* see STAT flags above */
unsigned claimed; /* Claimed by hidinput, hiddev? */
unsigned quirks; /* Various quirks the device can pull on us */
+ unsigned initial_quirks; /* Initial set of quirks supplied when creating device */
bool io_started; /* If IO has started */
struct list_head inputs; /* The list of inputs */
@@ -651,6 +655,10 @@ struct hid_device { /* device report descriptor */
wait_queue_head_t debug_wait;
unsigned int id; /* system unique id */
+
+#ifdef CONFIG_BPF
+ struct hid_bpf bpf; /* hid-bpf data */
+#endif /* CONFIG_BPF */
};
#define to_hid_device(pdev) \
@@ -853,21 +861,7 @@ struct hid_ll_driver {
bool (*may_wakeup)(struct hid_device *hdev);
};
-extern struct hid_ll_driver i2c_hid_ll_driver;
-extern struct hid_ll_driver hidp_hid_driver;
-extern struct hid_ll_driver uhid_hid_driver;
-extern struct hid_ll_driver usb_hid_driver;
-
-static inline bool hid_is_using_ll_driver(struct hid_device *hdev,
- struct hid_ll_driver *driver)
-{
- return hdev->ll_driver == driver;
-}
-
-static inline bool hid_is_usb(struct hid_device *hdev)
-{
- return hid_is_using_ll_driver(hdev, &usb_hid_driver);
-}
+extern bool hid_is_usb(const struct hid_device *hdev);
#define PM_HINT_FULLON 1<<5
#define PM_HINT_NORMAL 1<<1
@@ -882,8 +876,6 @@ static inline bool hid_is_usb(struct hid_device *hdev)
/* HID core API */
-extern int hid_debug;
-
extern bool hid_ignore(struct hid_device *);
extern int hid_add_device(struct hid_device *);
extern void hid_destroy_device(struct hid_device *);
@@ -1191,11 +1183,7 @@ int hid_pidff_init(struct hid_device *hid);
#define hid_pidff_init NULL
#endif
-#define dbg_hid(fmt, ...) \
-do { \
- if (hid_debug) \
- printk(KERN_DEBUG "%s: " fmt, __FILE__, ##__VA_ARGS__); \
-} while (0)
+#define dbg_hid(fmt, ...) pr_debug("%s: " fmt, __FILE__, ##__VA_ARGS__)
#define hid_err(hid, fmt, ...) \
dev_err(&(hid)->dev, fmt, ##__VA_ARGS__)
diff --git a/include/linux/hid_bpf.h b/include/linux/hid_bpf.h
new file mode 100644
index 000000000000..e9afb61e6ee0
--- /dev/null
+++ b/include/linux/hid_bpf.h
@@ -0,0 +1,170 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef __HID_BPF_H
+#define __HID_BPF_H
+
+#include <linux/bpf.h>
+#include <linux/spinlock.h>
+#include <uapi/linux/hid.h>
+
+struct hid_device;
+
+/*
+ * The following is the user facing HID BPF API.
+ *
+ * Extra care should be taken when editing this part, as
+ * it might break existing out of the tree bpf programs.
+ */
+
+/**
+ * struct hid_bpf_ctx - User accessible data for all HID programs
+ *
+ * ``data`` is not directly accessible from the context. We need to issue
+ * a call to ``hid_bpf_get_data()`` in order to get a pointer to that field.
+ *
+ * All of these fields are currently read-only.
+ *
+ * @index: program index in the jump table. No special meaning (a smaller index
+ * doesn't mean the program will be executed before another program with
+ * a bigger index).
+ * @hid: the ``struct hid_device`` representing the device itself
+ * @report_type: used for ``hid_bpf_device_event()``
+ * @allocated_size: Allocated size of data.
+ *
+ * This is how much memory is available and can be requested
+ * by the HID program.
+ * Note that for ``HID_BPF_RDESC_FIXUP``, that memory is set to
+ * ``4096`` (4 KB)
+ * @size: Valid data in the data field.
+ *
+ * Programs can get the available valid size in data by fetching this field.
+ * Programs can also change this value by returning a positive number in the
+ * program.
+ * To discard the event, return a negative error code.
+ *
+ * ``size`` must always be less or equal than ``allocated_size`` (it is enforced
+ * once all BPF programs have been run).
+ * @retval: Return value of the previous program.
+ */
+struct hid_bpf_ctx {
+ __u32 index;
+ const struct hid_device *hid;
+ __u32 allocated_size;
+ enum hid_report_type report_type;
+ union {
+ __s32 retval;
+ __s32 size;
+ };
+};
+
+/**
+ * enum hid_bpf_attach_flags - flags used when attaching a HIF-BPF program
+ *
+ * @HID_BPF_FLAG_NONE: no specific flag is used, the kernel choses where to
+ * insert the program
+ * @HID_BPF_FLAG_INSERT_HEAD: insert the given program before any other program
+ * currently attached to the device. This doesn't
+ * guarantee that this program will always be first
+ * @HID_BPF_FLAG_MAX: sentinel value, not to be used by the callers
+ */
+enum hid_bpf_attach_flags {
+ HID_BPF_FLAG_NONE = 0,
+ HID_BPF_FLAG_INSERT_HEAD = _BITUL(0),
+ HID_BPF_FLAG_MAX,
+};
+
+/* Following functions are tracepoints that BPF programs can attach to */
+int hid_bpf_device_event(struct hid_bpf_ctx *ctx);
+int hid_bpf_rdesc_fixup(struct hid_bpf_ctx *ctx);
+
+/* Following functions are kfunc that we export to BPF programs */
+/* available everywhere in HID-BPF */
+__u8 *hid_bpf_get_data(struct hid_bpf_ctx *ctx, unsigned int offset, const size_t __sz);
+
+/* only available in syscall */
+int hid_bpf_attach_prog(unsigned int hid_id, int prog_fd, __u32 flags);
+int hid_bpf_hw_request(struct hid_bpf_ctx *ctx, __u8 *buf, size_t buf__sz,
+ enum hid_report_type rtype, enum hid_class_request reqtype);
+struct hid_bpf_ctx *hid_bpf_allocate_context(unsigned int hid_id);
+void hid_bpf_release_context(struct hid_bpf_ctx *ctx);
+
+/*
+ * Below is HID internal
+ */
+
+/* internal function to call eBPF programs, not to be used by anybody */
+int __hid_bpf_tail_call(struct hid_bpf_ctx *ctx);
+
+#define HID_BPF_MAX_PROGS_PER_DEV 64
+#define HID_BPF_FLAG_MASK (((HID_BPF_FLAG_MAX - 1) << 1) - 1)
+
+/* types of HID programs to attach to */
+enum hid_bpf_prog_type {
+ HID_BPF_PROG_TYPE_UNDEF = -1,
+ HID_BPF_PROG_TYPE_DEVICE_EVENT, /* an event is emitted from the device */
+ HID_BPF_PROG_TYPE_RDESC_FIXUP,
+ HID_BPF_PROG_TYPE_MAX,
+};
+
+struct hid_report_enum;
+
+struct hid_bpf_ops {
+ struct hid_report *(*hid_get_report)(struct hid_report_enum *report_enum, const u8 *data);
+ int (*hid_hw_raw_request)(struct hid_device *hdev,
+ unsigned char reportnum, __u8 *buf,
+ size_t len, enum hid_report_type rtype,
+ enum hid_class_request reqtype);
+ struct module *owner;
+ struct bus_type *bus_type;
+};
+
+extern struct hid_bpf_ops *hid_bpf_ops;
+
+struct hid_bpf_prog_list {
+ u16 prog_idx[HID_BPF_MAX_PROGS_PER_DEV];
+ u8 prog_cnt;
+};
+
+/* stored in each device */
+struct hid_bpf {
+ u8 *device_data; /* allocated when a bpf program of type
+ * SEC(f.../hid_bpf_device_event) has been attached
+ * to this HID device
+ */
+ u32 allocated_data;
+
+ struct hid_bpf_prog_list __rcu *progs[HID_BPF_PROG_TYPE_MAX]; /* attached BPF progs */
+ bool destroyed; /* prevents the assignment of any progs */
+
+ spinlock_t progs_lock; /* protects RCU update of progs */
+};
+
+/* specific HID-BPF link when a program is attached to a device */
+struct hid_bpf_link {
+ struct bpf_link link;
+ int hid_table_index;
+};
+
+#ifdef CONFIG_HID_BPF
+u8 *dispatch_hid_bpf_device_event(struct hid_device *hid, enum hid_report_type type, u8 *data,
+ u32 *size, int interrupt);
+int hid_bpf_connect_device(struct hid_device *hdev);
+void hid_bpf_disconnect_device(struct hid_device *hdev);
+void hid_bpf_destroy_device(struct hid_device *hid);
+void hid_bpf_device_init(struct hid_device *hid);
+u8 *call_hid_bpf_rdesc_fixup(struct hid_device *hdev, u8 *rdesc, unsigned int *size);
+#else /* CONFIG_HID_BPF */
+static inline u8 *dispatch_hid_bpf_device_event(struct hid_device *hid, enum hid_report_type type,
+ u8 *data, u32 *size, int interrupt) { return data; }
+static inline int hid_bpf_connect_device(struct hid_device *hdev) { return 0; }
+static inline void hid_bpf_disconnect_device(struct hid_device *hdev) {}
+static inline void hid_bpf_destroy_device(struct hid_device *hid) {}
+static inline void hid_bpf_device_init(struct hid_device *hid) {}
+static inline u8 *call_hid_bpf_rdesc_fixup(struct hid_device *hdev, u8 *rdesc, unsigned int *size)
+{
+ return kmemdup(rdesc, *size, GFP_KERNEL);
+}
+
+#endif /* CONFIG_HID_BPF */
+
+#endif /* __HID_BPF_H */
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 44242268f53b..b06254e76d99 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -86,8 +86,8 @@ static inline void kmap_flush_unused(void);
* virtual address of the direct mapping. Only real highmem pages are
* temporarily mapped.
*
- * While it is significantly faster than kmap() for the higmem case it
- * comes with restrictions about the pointer validity.
+ * While kmap_local_page() is significantly faster than kmap() for the highmem
+ * case it comes with restrictions about the pointer validity.
*
* On HIGHMEM enabled systems mapping a highmem page has the side effect of
* disabling migration in order to keep the virtual address stable across
@@ -119,9 +119,8 @@ static inline void *kmap_local_page(struct page *page);
* virtual address of the direct mapping. Only real highmem pages are
* temporarily mapped.
*
- * While it is significantly faster than kmap() for the higmem case it
- * comes with restrictions about the pointer validity. Only use when really
- * necessary.
+ * While it is significantly faster than kmap() for the highmem case it
+ * comes with restrictions about the pointer validity.
*
* On HIGHMEM enabled systems mapping a highmem page has the side effect of
* disabling migration in order to keep the virtual address stable across
@@ -208,31 +207,30 @@ static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
}
#endif
-#ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
+#ifndef vma_alloc_zeroed_movable_folio
/**
- * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move
- * @vma: The VMA the page is to be allocated for
- * @vaddr: The virtual address the page will be inserted into
+ * vma_alloc_zeroed_movable_folio - Allocate a zeroed page for a VMA.
+ * @vma: The VMA the page is to be allocated for.
+ * @vaddr: The virtual address the page will be inserted into.
*
- * Returns: The allocated and zeroed HIGHMEM page
+ * This function will allocate a page suitable for inserting into this
+ * VMA at this virtual address. It may be allocated from highmem or
+ * the movable zone. An architecture may provide its own implementation.
*
- * This function will allocate a page for a VMA that the caller knows will
- * be able to migrate in the future using move_pages() or reclaimed
- *
- * An architecture may override this function by defining
- * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE and providing their own
- * implementation.
+ * Return: A folio containing one allocated and zeroed page or NULL if
+ * we are out of memory.
*/
-static inline struct page *
-alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
+static inline
+struct folio *vma_alloc_zeroed_movable_folio(struct vm_area_struct *vma,
unsigned long vaddr)
{
- struct page *page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr);
+ struct folio *folio;
- if (page)
- clear_user_highpage(page, vaddr);
+ folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, vaddr, false);
+ if (folio)
+ clear_user_highpage(&folio->page, vaddr);
- return page;
+ return folio;
}
#endif
@@ -416,6 +414,36 @@ static inline void memzero_page(struct page *page, size_t offset, size_t len)
}
/**
+ * memcpy_from_file_folio - Copy some bytes from a file folio.
+ * @to: The destination buffer.
+ * @folio: The folio to copy from.
+ * @pos: The position in the file.
+ * @len: The maximum number of bytes to copy.
+ *
+ * Copy up to @len bytes from this folio. This may be limited by PAGE_SIZE
+ * if the folio comes from HIGHMEM, and by the size of the folio.
+ *
+ * Return: The number of bytes copied from the folio.
+ */
+static inline size_t memcpy_from_file_folio(char *to, struct folio *folio,
+ loff_t pos, size_t len)
+{
+ size_t offset = offset_in_folio(folio, pos);
+ char *from = kmap_local_folio(folio, offset);
+
+ if (folio_test_highmem(folio)) {
+ offset = offset_in_page(offset);
+ len = min_t(size_t, len, PAGE_SIZE - offset);
+ } else
+ len = min(len, folio_size(folio) - offset);
+
+ memcpy(to, from, len);
+ kunmap_local(from);
+
+ return len;
+}
+
+/**
* folio_zero_segments() - Zero two byte ranges in a folio.
* @folio: The folio to write to.
* @start1: The first byte to zero.
diff --git a/include/linux/hisi_acc_qm.h b/include/linux/hisi_acc_qm.h
index be3aedaa96dc..a7d54d4d41fd 100644
--- a/include/linux/hisi_acc_qm.h
+++ b/include/linux/hisi_acc_qm.h
@@ -122,7 +122,6 @@ enum qp_state {
};
enum qm_hw_ver {
- QM_HW_UNKNOWN = -1,
QM_HW_V1 = 0x20,
QM_HW_V2 = 0x21,
QM_HW_V3 = 0x30,
@@ -272,6 +271,20 @@ struct hisi_qm_poll_data {
u16 *qp_finish_id;
};
+/**
+ * struct qm_err_isolate
+ * @isolate_lock: protects device error log
+ * @err_threshold: user config error threshold which triggers isolation
+ * @is_isolate: device isolation state
+ * @uacce_hw_errs: index into qm device error list
+ */
+struct qm_err_isolate {
+ struct mutex isolate_lock;
+ u32 err_threshold;
+ bool is_isolate;
+ struct list_head qm_hw_errs;
+};
+
struct hisi_qm {
enum qm_hw_ver ver;
enum qm_fun_type fun_type;
@@ -309,7 +322,8 @@ struct hisi_qm {
const struct hisi_qm_err_ini *err_ini;
struct hisi_qm_err_info err_info;
struct hisi_qm_err_status err_status;
- unsigned long misc_ctl; /* driver removing and reset sched */
+ /* driver removing and reset sched */
+ unsigned long misc_ctl;
/* Device capability bit */
unsigned long caps;
@@ -332,7 +346,6 @@ struct hisi_qm {
const char *algs;
bool use_sva;
- bool is_frozen;
resource_size_t phys_base;
resource_size_t db_phys_base;
@@ -341,6 +354,7 @@ struct hisi_qm {
struct qm_shaper_factor *factor;
u32 mb_qos;
u32 type_rate;
+ struct qm_err_isolate isolate_data;
};
struct hisi_qp_status {
diff --git a/include/linux/host1x.h b/include/linux/host1x.h
index dc55d9d3b94f..9a9de4b97a25 100644
--- a/include/linux/host1x.h
+++ b/include/linux/host1x.h
@@ -8,6 +8,7 @@
#include <linux/device.h>
#include <linux/dma-direction.h>
+#include <linux/dma-fence.h>
#include <linux/spinlock.h>
#include <linux/types.h>
@@ -221,7 +222,9 @@ u32 host1x_syncpt_base_id(struct host1x_syncpt_base *base);
void host1x_syncpt_release_vblank_reservation(struct host1x_client *client,
u32 syncpt_id);
-struct dma_fence *host1x_fence_create(struct host1x_syncpt *sp, u32 threshold);
+struct dma_fence *host1x_fence_create(struct host1x_syncpt *sp, u32 threshold,
+ bool timeout);
+void host1x_fence_cancel(struct dma_fence *fence);
/*
* host1x channel
@@ -288,8 +291,9 @@ struct host1x_job {
u32 syncpt_incrs;
u32 syncpt_end;
- /* Completion waiter ref */
- void *waiter;
+ /* Completion fence for job tracking */
+ struct dma_fence *fence;
+ struct dma_fence_cb fence_cb;
/* Maximum time to wait for this job */
unsigned int timeout;
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index a1341fdcf666..70bd867eba94 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -187,7 +187,7 @@ static inline int split_huge_page(struct page *page)
{
return split_huge_page_to_list(page, NULL);
}
-void deferred_split_huge_page(struct page *page);
+void deferred_split_folio(struct folio *folio);
void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long address, bool freeze, struct folio *folio);
@@ -293,15 +293,6 @@ static inline bool thp_migration_supported(void)
return IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION);
}
-static inline struct list_head *page_deferred_list(struct page *page)
-{
- /*
- * See organization of tail pages of compound page in
- * "struct page" definition.
- */
- return &page[2].deferred_list;
-}
-
#else /* CONFIG_TRANSPARENT_HUGEPAGE */
#define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
#define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
@@ -349,7 +340,7 @@ static inline int split_huge_page(struct page *page)
{
return 0;
}
-static inline void deferred_split_huge_page(struct page *page) {}
+static inline void deferred_split_folio(struct folio *folio) {}
#define split_huge_pmd(__vma, __pmd, __address) \
do { } while (0)
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 9ab9d3105d5c..7c977d234aba 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -2,6 +2,7 @@
#ifndef _LINUX_HUGETLB_H
#define _LINUX_HUGETLB_H
+#include <linux/mm.h>
#include <linux/mm_types.h>
#include <linux/mmdebug.h>
#include <linux/fs.h>
@@ -170,11 +171,11 @@ bool hugetlb_reserve_pages(struct inode *inode, long from, long to,
vm_flags_t vm_flags);
long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
long freed);
-int isolate_hugetlb(struct page *page, struct list_head *list);
-int get_hwpoison_huge_page(struct page *page, bool *hugetlb, bool unpoison);
+bool isolate_hugetlb(struct folio *folio, struct list_head *list);
+int get_hwpoison_hugetlb_folio(struct folio *folio, bool *hugetlb, bool unpoison);
int get_huge_page_for_hwpoison(unsigned long pfn, int flags,
bool *migratable_cleared);
-void putback_active_hugepage(struct page *page);
+void folio_putback_active_hugetlb(struct folio *folio);
void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int reason);
void free_huge_page(struct page *page);
void hugetlb_fix_reserve_counts(struct inode *inode);
@@ -193,6 +194,43 @@ extern struct list_head huge_boot_pages;
pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, unsigned long sz);
+/*
+ * huge_pte_offset(): Walk the hugetlb pgtable until the last level PTE.
+ * Returns the pte_t* if found, or NULL if the address is not mapped.
+ *
+ * IMPORTANT: we should normally not directly call this function, instead
+ * this is only a common interface to implement arch-specific
+ * walker. Please use hugetlb_walk() instead, because that will attempt to
+ * verify the locking for you.
+ *
+ * Since this function will walk all the pgtable pages (including not only
+ * high-level pgtable page, but also PUD entry that can be unshared
+ * concurrently for VM_SHARED), the caller of this function should be
+ * responsible of its thread safety. One can follow this rule:
+ *
+ * (1) For private mappings: pmd unsharing is not possible, so holding the
+ * mmap_lock for either read or write is sufficient. Most callers
+ * already hold the mmap_lock, so normally, no special action is
+ * required.
+ *
+ * (2) For shared mappings: pmd unsharing is possible (so the PUD-ranged
+ * pgtable page can go away from under us! It can be done by a pmd
+ * unshare with a follow up munmap() on the other process), then we
+ * need either:
+ *
+ * (2.1) hugetlb vma lock read or write held, to make sure pmd unshare
+ * won't happen upon the range (it also makes sure the pte_t we
+ * read is the right and stable one), or,
+ *
+ * (2.2) hugetlb mapping i_mmap_rwsem lock held read or write, to make
+ * sure even if unshare happened the racy unmap() will wait until
+ * i_mmap_rwsem is released.
+ *
+ * Option (2.1) is the safest, which guarantees pte stability from pmd
+ * sharing pov, until the vma lock released. Option (2.2) doesn't protect
+ * a concurrent pmd unshare, but it makes sure the pgtable page is safe to
+ * access.
+ */
pte_t *huge_pte_offset(struct mm_struct *mm,
unsigned long addr, unsigned long sz);
unsigned long hugetlb_mask_last_page(struct hstate *h);
@@ -211,7 +249,7 @@ void hugetlb_vma_lock_release(struct kref *kref);
int pmd_huge(pmd_t pmd);
int pud_huge(pud_t pud);
-unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
+long hugetlb_change_protection(struct vm_area_struct *vma,
unsigned long address, unsigned long end, pgprot_t newprot,
unsigned long cp_flags);
@@ -375,12 +413,12 @@ static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
return NULL;
}
-static inline int isolate_hugetlb(struct page *page, struct list_head *list)
+static inline bool isolate_hugetlb(struct folio *folio, struct list_head *list)
{
- return -EBUSY;
+ return false;
}
-static inline int get_hwpoison_huge_page(struct page *page, bool *hugetlb, bool unpoison)
+static inline int get_hwpoison_hugetlb_folio(struct folio *folio, bool *hugetlb, bool unpoison)
{
return 0;
}
@@ -391,7 +429,7 @@ static inline int get_huge_page_for_hwpoison(unsigned long pfn, int flags,
return 0;
}
-static inline void putback_active_hugepage(struct page *page)
+static inline void folio_putback_active_hugetlb(struct folio *folio)
{
}
@@ -400,7 +438,7 @@ static inline void move_hugetlb_state(struct folio *old_folio,
{
}
-static inline unsigned long hugetlb_change_protection(
+static inline long hugetlb_change_protection(
struct vm_area_struct *vma, unsigned long address,
unsigned long end, pgprot_t newprot,
unsigned long cp_flags)
@@ -679,16 +717,16 @@ struct huge_bootmem_page {
};
int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list);
-struct page *alloc_huge_page(struct vm_area_struct *vma,
+struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
unsigned long addr, int avoid_reserve);
-struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
+struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
nodemask_t *nmask, gfp_t gfp_mask);
-struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
+struct folio *alloc_hugetlb_folio_vma(struct hstate *h, struct vm_area_struct *vma,
unsigned long address);
-int hugetlb_add_to_page_cache(struct page *page, struct address_space *mapping,
+int hugetlb_add_to_page_cache(struct folio *folio, struct address_space *mapping,
pgoff_t idx);
void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
- unsigned long address, struct page *page);
+ unsigned long address, struct folio *folio);
/* arch callback */
int __init __alloc_bootmem_huge_page(struct hstate *h, int nid);
@@ -843,9 +881,9 @@ extern int dissolve_free_huge_pages(unsigned long start_pfn,
unsigned long end_pfn);
#ifdef CONFIG_MEMORY_FAILURE
-extern void hugetlb_clear_page_hwpoison(struct page *hpage);
+extern void folio_clear_hugetlb_hwpoison(struct folio *folio);
#else
-static inline void hugetlb_clear_page_hwpoison(struct page *hpage)
+static inline void folio_clear_hugetlb_hwpoison(struct folio *folio)
{
}
#endif
@@ -998,21 +1036,21 @@ static inline int isolate_or_dissolve_huge_page(struct page *page,
return -ENOMEM;
}
-static inline struct page *alloc_huge_page(struct vm_area_struct *vma,
+static inline struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
unsigned long addr,
int avoid_reserve)
{
return NULL;
}
-static inline struct page *
-alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
+static inline struct folio *
+alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
nodemask_t *nmask, gfp_t gfp_mask)
{
return NULL;
}
-static inline struct page *alloc_huge_page_vma(struct hstate *h,
+static inline struct folio *alloc_hugetlb_folio_vma(struct hstate *h,
struct vm_area_struct *vma,
unsigned long address)
{
@@ -1213,4 +1251,35 @@ bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr);
#define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
#endif
+static inline bool __vma_shareable_lock(struct vm_area_struct *vma)
+{
+ return (vma->vm_flags & VM_MAYSHARE) && vma->vm_private_data;
+}
+
+/*
+ * Safe version of huge_pte_offset() to check the locks. See comments
+ * above huge_pte_offset().
+ */
+static inline pte_t *
+hugetlb_walk(struct vm_area_struct *vma, unsigned long addr, unsigned long sz)
+{
+#if defined(CONFIG_HUGETLB_PAGE) && \
+ defined(CONFIG_ARCH_WANT_HUGE_PMD_SHARE) && defined(CONFIG_LOCKDEP)
+ struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
+
+ /*
+ * If pmd sharing possible, locking needed to safely walk the
+ * hugetlb pgtables. More information can be found at the comment
+ * above huge_pte_offset() in the same file.
+ *
+ * NOTE: lockdep_is_held() is only defined with CONFIG_LOCKDEP.
+ */
+ if (__vma_shareable_lock(vma))
+ WARN_ON_ONCE(!lockdep_is_held(&vma_lock->rw_sema) &&
+ !lockdep_is_held(
+ &vma->vm_file->f_mapping->i_mmap_rwsem));
+#endif
+ return huge_pte_offset(vma->vm_mm, addr, sz);
+}
+
#endif /* _LINUX_HUGETLB_H */
diff --git a/include/linux/hugetlb_cgroup.h b/include/linux/hugetlb_cgroup.h
index f706626a8063..3d82d91f49ac 100644
--- a/include/linux/hugetlb_cgroup.h
+++ b/include/linux/hugetlb_cgroup.h
@@ -141,10 +141,10 @@ extern int hugetlb_cgroup_charge_cgroup_rsvd(int idx, unsigned long nr_pages,
struct hugetlb_cgroup **ptr);
extern void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
struct hugetlb_cgroup *h_cg,
- struct page *page);
+ struct folio *folio);
extern void hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
struct hugetlb_cgroup *h_cg,
- struct page *page);
+ struct folio *folio);
extern void hugetlb_cgroup_uncharge_folio(int idx, unsigned long nr_pages,
struct folio *folio);
extern void hugetlb_cgroup_uncharge_folio_rsvd(int idx, unsigned long nr_pages,
@@ -230,14 +230,14 @@ static inline int hugetlb_cgroup_charge_cgroup_rsvd(int idx,
static inline void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
struct hugetlb_cgroup *h_cg,
- struct page *page)
+ struct folio *folio)
{
}
static inline void
hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
struct hugetlb_cgroup *h_cg,
- struct page *page)
+ struct folio *folio)
{
}
diff --git a/include/linux/hwmon.h b/include/linux/hwmon.h
index 14325f93c6b2..c1b62384b6ee 100644
--- a/include/linux/hwmon.h
+++ b/include/linux/hwmon.h
@@ -436,6 +436,10 @@ struct hwmon_chip_info {
/* hwmon_device_register() is deprecated */
struct device *hwmon_device_register(struct device *dev);
+/*
+ * hwmon_device_register_with_groups() and
+ * devm_hwmon_device_register_with_groups() are deprecated.
+ */
struct device *
hwmon_device_register_with_groups(struct device *dev, const char *name,
void *drvdata,
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index cd5cb9f6fae0..bfbc37ce223b 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -1309,10 +1309,7 @@ struct hv_device {
};
-static inline struct hv_device *device_to_hv_device(struct device *d)
-{
- return container_of(d, struct hv_device, device);
-}
+#define device_to_hv_device(d) container_of_const(d, struct hv_device, device)
static inline struct hv_driver *drv_to_hv_drv(struct device_driver *d)
{
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index d84e0e99f084..500404d85141 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -965,15 +965,33 @@ int i2c_handle_smbus_host_notify(struct i2c_adapter *adap, unsigned short addr);
#endif /* I2C */
+/* must call put_device() when done with returned i2c_client device */
+struct i2c_client *i2c_find_device_by_fwnode(struct fwnode_handle *fwnode);
+
+/* must call put_device() when done with returned i2c_adapter device */
+struct i2c_adapter *i2c_find_adapter_by_fwnode(struct fwnode_handle *fwnode);
+
+/* must call i2c_put_adapter() when done with returned i2c_adapter device */
+struct i2c_adapter *i2c_get_adapter_by_fwnode(struct fwnode_handle *fwnode);
+
#if IS_ENABLED(CONFIG_OF)
/* must call put_device() when done with returned i2c_client device */
-struct i2c_client *of_find_i2c_device_by_node(struct device_node *node);
+static inline struct i2c_client *of_find_i2c_device_by_node(struct device_node *node)
+{
+ return i2c_find_device_by_fwnode(of_fwnode_handle(node));
+}
/* must call put_device() when done with returned i2c_adapter device */
-struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node);
+static inline struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node)
+{
+ return i2c_find_adapter_by_fwnode(of_fwnode_handle(node));
+}
/* must call i2c_put_adapter() when done with returned i2c_adapter device */
-struct i2c_adapter *of_get_i2c_adapter_by_node(struct device_node *node);
+static inline struct i2c_adapter *of_get_i2c_adapter_by_node(struct device_node *node)
+{
+ return i2c_get_adapter_by_fwnode(of_fwnode_handle(node));
+}
const struct of_device_id
*i2c_of_match_device(const struct of_device_id *matches,
diff --git a/include/linux/i3c/device.h b/include/linux/i3c/device.h
index 1c997abe868c..90fa83464f00 100644
--- a/include/linux/i3c/device.h
+++ b/include/linux/i3c/device.h
@@ -18,17 +18,18 @@
/**
* enum i3c_error_code - I3C error codes
*
+ * @I3C_ERROR_UNKNOWN: unknown error, usually means the error is not I3C
+ * related
+ * @I3C_ERROR_M0: M0 error
+ * @I3C_ERROR_M1: M1 error
+ * @I3C_ERROR_M2: M2 error
+ *
* These are the standard error codes as defined by the I3C specification.
* When -EIO is returned by the i3c_device_do_priv_xfers() or
* i3c_device_send_hdr_cmds() one can check the error code in
* &struct_i3c_priv_xfer.err or &struct i3c_hdr_cmd.err to get a better idea of
* what went wrong.
*
- * @I3C_ERROR_UNKNOWN: unknown error, usually means the error is not I3C
- * related
- * @I3C_ERROR_M0: M0 error
- * @I3C_ERROR_M1: M1 error
- * @I3C_ERROR_M2: M2 error
*/
enum i3c_error_code {
I3C_ERROR_UNKNOWN = 0,
@@ -186,7 +187,14 @@ static inline struct i3c_driver *drv_to_i3cdrv(struct device_driver *drv)
}
struct device *i3cdev_to_dev(struct i3c_device *i3cdev);
-struct i3c_device *dev_to_i3cdev(struct device *dev);
+
+/**
+ * dev_to_i3cdev() - Returns the I3C device containing @dev
+ * @__dev: device object
+ *
+ * Return: a pointer to an I3C device object.
+ */
+#define dev_to_i3cdev(__dev) container_of_const(__dev, struct i3c_device, dev)
const struct i3c_device_id *
i3c_device_match_id(struct i3c_device *i3cdev,
@@ -296,7 +304,7 @@ int i3c_device_do_priv_xfers(struct i3c_device *dev,
int i3c_device_do_setdasa(struct i3c_device *dev);
-void i3c_device_get_info(struct i3c_device *dev, struct i3c_device_info *info);
+void i3c_device_get_info(const struct i3c_device *dev, struct i3c_device_info *info);
struct i3c_ibi_payload {
unsigned int len;
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index 80d6308dea06..2463bdd2a382 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -1356,6 +1356,7 @@ struct ieee80211_mgmt {
} __packed wnm_timing_msr;
} u;
} __packed action;
+ DECLARE_FLEX_ARRAY(u8, body); /* Generic frame body */
} u;
} __packed __aligned(2);
diff --git a/include/linux/ieee802154.h b/include/linux/ieee802154.h
index 0303eb84d596..140f61ec0f5f 100644
--- a/include/linux/ieee802154.h
+++ b/include/linux/ieee802154.h
@@ -44,6 +44,13 @@
#define IEEE802154_SHORT_ADDR_LEN 2
#define IEEE802154_PAN_ID_LEN 2
+/* Duration in superframe order */
+#define IEEE802154_MAX_SCAN_DURATION 14
+#define IEEE802154_ACTIVE_SCAN_DURATION 15
+/* Superframe duration in slots */
+#define IEEE802154_SUPERFRAME_PERIOD 16
+/* Various periods expressed in symbols */
+#define IEEE802154_SLOT_PERIOD 60
#define IEEE802154_LIFS_PERIOD 40
#define IEEE802154_SIFS_PERIOD 12
#define IEEE802154_MAX_SIFS_FRAME_SIZE 18
diff --git a/include/linux/igmp.h b/include/linux/igmp.h
index 78890143f079..b19d3284551f 100644
--- a/include/linux/igmp.h
+++ b/include/linux/igmp.h
@@ -15,6 +15,7 @@
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/refcount.h>
+#include <linux/sockptr.h>
#include <uapi/linux/igmp.h>
static inline struct igmphdr *igmp_hdr(const struct sk_buff *skb)
diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
index 8e0afaaa3f75..81413cd3a3e7 100644
--- a/include/linux/iio/iio.h
+++ b/include/linux/iio/iio.h
@@ -381,6 +381,11 @@ s64 iio_get_time_ns(const struct iio_dev *indio_dev);
#define INDIO_MAX_RAW_ELEMENTS 4
+struct iio_val_int_plus_micro {
+ int integer;
+ int micro;
+};
+
struct iio_trigger; /* forward declaration */
/**
diff --git a/include/linux/ima.h b/include/linux/ima.h
index 172b113a9864..86b57757c7b1 100644
--- a/include/linux/ima.h
+++ b/include/linux/ima.h
@@ -21,7 +21,8 @@ extern int ima_file_check(struct file *file, int mask);
extern void ima_post_create_tmpfile(struct mnt_idmap *idmap,
struct inode *inode);
extern void ima_file_free(struct file *file);
-extern int ima_file_mmap(struct file *file, unsigned long prot);
+extern int ima_file_mmap(struct file *file, unsigned long reqprot,
+ unsigned long prot, unsigned long flags);
extern int ima_file_mprotect(struct vm_area_struct *vma, unsigned long prot);
extern int ima_load_data(enum kernel_load_data_id id, bool contents);
extern int ima_post_load_data(char *buf, loff_t size,
@@ -76,7 +77,8 @@ static inline void ima_file_free(struct file *file)
return;
}
-static inline int ima_file_mmap(struct file *file, unsigned long prot)
+static inline int ima_file_mmap(struct file *file, unsigned long reqprot,
+ unsigned long prot, unsigned long flags)
{
return 0;
}
diff --git a/include/linux/intel-svm.h b/include/linux/intel-svm.h
deleted file mode 100644
index f9a0d44f6fdb..000000000000
--- a/include/linux/intel-svm.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright © 2015 Intel Corporation.
- *
- * Authors: David Woodhouse <David.Woodhouse@intel.com>
- */
-
-#ifndef __INTEL_SVM_H__
-#define __INTEL_SVM_H__
-
-/* Page Request Queue depth */
-#define PRQ_ORDER 4
-#define PRQ_RING_MASK ((0x1000 << PRQ_ORDER) - 0x20)
-#define PRQ_DEPTH ((0x1000 << PRQ_ORDER) >> 5)
-
-#endif /* __INTEL_SVM_H__ */
diff --git a/include/linux/iomap.h b/include/linux/iomap.h
index fca43a4bd96b..0f8123504e5e 100644
--- a/include/linux/iomap.h
+++ b/include/linux/iomap.h
@@ -13,6 +13,7 @@
struct address_space;
struct fiemap_extent_info;
struct inode;
+struct iomap_iter;
struct iomap_dio;
struct iomap_writepage_ctx;
struct iov_iter;
@@ -84,7 +85,7 @@ struct vm_fault;
*/
#define IOMAP_NULL_ADDR -1ULL /* addr is not valid */
-struct iomap_page_ops;
+struct iomap_folio_ops;
struct iomap {
u64 addr; /* disk offset of mapping, bytes */
@@ -96,7 +97,7 @@ struct iomap {
struct dax_device *dax_dev; /* dax_dev for dax operations */
void *inline_data;
void *private; /* filesystem private */
- const struct iomap_page_ops *page_ops;
+ const struct iomap_folio_ops *folio_ops;
u64 validity_cookie; /* used with .iomap_valid() */
};
@@ -124,19 +125,20 @@ static inline bool iomap_inline_data_valid(const struct iomap *iomap)
}
/*
- * When a filesystem sets page_ops in an iomap mapping it returns, page_prepare
- * and page_done will be called for each page written to. This only applies to
- * buffered writes as unbuffered writes will not typically have pages
+ * When a filesystem sets folio_ops in an iomap mapping it returns, get_folio
+ * and put_folio will be called for each folio written to. This only applies
+ * to buffered writes as unbuffered writes will not typically have folios
* associated with them.
*
- * When page_prepare succeeds, page_done will always be called to do any
- * cleanup work necessary. In that page_done call, @page will be NULL if the
- * associated page could not be obtained.
+ * When get_folio succeeds, put_folio will always be called to do any
+ * cleanup work necessary. put_folio is responsible for unlocking and putting
+ * @folio.
*/
-struct iomap_page_ops {
- int (*page_prepare)(struct inode *inode, loff_t pos, unsigned len);
- void (*page_done)(struct inode *inode, loff_t pos, unsigned copied,
- struct page *page);
+struct iomap_folio_ops {
+ struct folio *(*get_folio)(struct iomap_iter *iter, loff_t pos,
+ unsigned len);
+ void (*put_folio)(struct inode *inode, loff_t pos, unsigned copied,
+ struct folio *folio);
/*
* Check that the cached iomap still maps correctly to the filesystem's
@@ -259,6 +261,7 @@ int iomap_file_buffered_write_punch_delalloc(struct inode *inode,
int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops);
void iomap_readahead(struct readahead_control *, const struct iomap_ops *ops);
bool iomap_is_partially_uptodate(struct folio *, size_t from, size_t count);
+struct folio *iomap_get_folio(struct iomap_iter *iter, loff_t pos);
bool iomap_release_folio(struct folio *folio, gfp_t gfp_flags);
void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len);
int iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 46e1347bfa22..6595454d4f48 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -120,7 +120,6 @@ static inline bool iommu_is_dma_domain(struct iommu_domain *domain)
enum iommu_cap {
IOMMU_CAP_CACHE_COHERENCY, /* IOMMU_CACHE is supported */
- IOMMU_CAP_INTR_REMAP, /* IOMMU supports interrupt isolation */
IOMMU_CAP_NOEXEC, /* IOMMU_NOEXEC flag */
IOMMU_CAP_PRE_BOOT_PROTECTION, /* Firmware says it used the IOMMU for
DMA protection and we should too */
@@ -228,6 +227,9 @@ struct iommu_iotlb_gather {
* @release_device: Remove device from iommu driver handling
* @probe_finalize: Do final setup work after the device is added to an IOMMU
* group and attached to the groups domain
+ * @set_platform_dma_ops: Returning control back to the platform DMA ops. This op
+ * is to support old IOMMU drivers, new drivers should use
+ * default domains, and the common IOMMU DMA ops.
* @device_group: find iommu group for a particular device
* @get_resv_regions: Request list of reserved regions for a device
* @of_xlate: add OF master IDs to iommu grouping
@@ -256,6 +258,7 @@ struct iommu_ops {
struct iommu_device *(*probe_device)(struct device *dev);
void (*release_device)(struct device *dev);
void (*probe_finalize)(struct device *dev);
+ void (*set_platform_dma_ops)(struct device *dev);
struct iommu_group *(*device_group)(struct device *dev);
/* Request/Free a list of reserved regions for a device */
@@ -295,7 +298,6 @@ struct iommu_ops {
* * EBUSY - device is attached to a domain and cannot be changed
* * ENODEV - device specific errors, not able to be attached
* * <others> - treated as ENODEV by the caller. Use is discouraged
- * @detach_dev: detach an iommu domain from a device
* @set_dev_pasid: set an iommu domain to a pasid of device
* @map: map a physically contiguous memory region to an iommu domain
* @map_pages: map a physically contiguous set of pages of the same size to
@@ -316,7 +318,6 @@ struct iommu_ops {
*/
struct iommu_domain_ops {
int (*attach_dev)(struct iommu_domain *domain, struct device *dev);
- void (*detach_dev)(struct iommu_domain *domain, struct device *dev);
int (*set_dev_pasid)(struct iommu_domain *domain, struct device *dev,
ioasid_t pasid);
@@ -401,6 +402,7 @@ struct iommu_fault_param {
* @iommu_dev: IOMMU device this device is linked to
* @priv: IOMMU Driver private data
* @max_pasids: number of PASIDs this device can consume
+ * @attach_deferred: the dma domain attachment is deferred
*
* TODO: migrate other per device data pointers under iommu_dev_data, e.g.
* struct iommu_group *iommu_group;
@@ -413,6 +415,7 @@ struct dev_iommu {
struct iommu_device *iommu_dev;
void *priv;
u32 max_pasids;
+ u32 attach_deferred:1;
};
int iommu_device_register(struct iommu_device *iommu,
@@ -455,6 +458,7 @@ static inline const struct iommu_ops *dev_iommu_ops(struct device *dev)
extern int bus_iommu_probe(struct bus_type *bus);
extern bool iommu_present(struct bus_type *bus);
extern bool device_iommu_capable(struct device *dev, enum iommu_cap cap);
+extern bool iommu_group_has_isolated_msi(struct iommu_group *group);
extern struct iommu_domain *iommu_domain_alloc(struct bus_type *bus);
extern struct iommu_group *iommu_group_get_by_id(int id);
extern void iommu_domain_free(struct iommu_domain *domain);
@@ -467,19 +471,15 @@ extern int iommu_sva_unbind_gpasid(struct iommu_domain *domain,
extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev);
extern struct iommu_domain *iommu_get_dma_domain(struct device *dev);
extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot);
-extern int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot);
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp);
extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
size_t size);
extern size_t iommu_unmap_fast(struct iommu_domain *domain,
unsigned long iova, size_t size,
struct iommu_iotlb_gather *iotlb_gather);
extern ssize_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
- struct scatterlist *sg, unsigned int nents, int prot);
-extern ssize_t iommu_map_sg_atomic(struct iommu_domain *domain,
- unsigned long iova, struct scatterlist *sg,
- unsigned int nents, int prot);
+ struct scatterlist *sg, unsigned int nents,
+ int prot, gfp_t gfp);
extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova);
extern void iommu_set_fault_handler(struct iommu_domain *domain,
iommu_fault_handler_t handler, void *token);
@@ -773,14 +773,7 @@ static inline struct iommu_domain *iommu_get_domain_for_dev(struct device *dev)
}
static inline int iommu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot)
-{
- return -ENODEV;
-}
-
-static inline int iommu_map_atomic(struct iommu_domain *domain,
- unsigned long iova, phys_addr_t paddr,
- size_t size, int prot)
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
return -ENODEV;
}
@@ -800,14 +793,7 @@ static inline size_t iommu_unmap_fast(struct iommu_domain *domain,
static inline ssize_t iommu_map_sg(struct iommu_domain *domain,
unsigned long iova, struct scatterlist *sg,
- unsigned int nents, int prot)
-{
- return -ENODEV;
-}
-
-static inline ssize_t iommu_map_sg_atomic(struct iommu_domain *domain,
- unsigned long iova, struct scatterlist *sg,
- unsigned int nents, int prot)
+ unsigned int nents, int prot, gfp_t gfp)
{
return -ENODEV;
}
@@ -1118,7 +1104,8 @@ iommu_get_domain_for_dev_pasid(struct device *dev, ioasid_t pasid,
static inline size_t iommu_map_sgtable(struct iommu_domain *domain,
unsigned long iova, struct sg_table *sgt, int prot)
{
- return iommu_map_sg(domain, iova, sgt->sgl, sgt->orig_nents, prot);
+ return iommu_map_sg(domain, iova, sgt->sgl, sgt->orig_nents, prot,
+ GFP_KERNEL);
}
#ifdef CONFIG_IOMMU_DEBUGFS
diff --git a/include/linux/iommufd.h b/include/linux/iommufd.h
index 650d45629647..c0b5b3ac34f1 100644
--- a/include/linux/iommufd.h
+++ b/include/linux/iommufd.h
@@ -57,7 +57,9 @@ void iommufd_access_unpin_pages(struct iommufd_access *access,
unsigned long iova, unsigned long length);
int iommufd_access_rw(struct iommufd_access *access, unsigned long iova,
void *data, size_t len, unsigned int flags);
-int iommufd_vfio_compat_ioas_id(struct iommufd_ctx *ictx, u32 *out_ioas_id);
+int iommufd_vfio_compat_ioas_get_id(struct iommufd_ctx *ictx, u32 *out_ioas_id);
+int iommufd_vfio_compat_ioas_create(struct iommufd_ctx *ictx);
+int iommufd_vfio_compat_set_no_iommu(struct iommufd_ctx *ictx);
#else /* !CONFIG_IOMMUFD */
static inline struct iommufd_ctx *iommufd_ctx_from_file(struct file *file)
{
@@ -89,8 +91,12 @@ static inline int iommufd_access_rw(struct iommufd_access *access, unsigned long
return -EOPNOTSUPP;
}
-static inline int iommufd_vfio_compat_ioas_id(struct iommufd_ctx *ictx,
- u32 *out_ioas_id)
+static inline int iommufd_vfio_compat_ioas_create(struct iommufd_ctx *ictx)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int iommufd_vfio_compat_set_no_iommu(struct iommufd_ctx *ictx)
{
return -EOPNOTSUPP;
}
diff --git a/include/linux/ip.h b/include/linux/ip.h
index 3d9c6750af62..d11c25f5030a 100644
--- a/include/linux/ip.h
+++ b/include/linux/ip.h
@@ -35,4 +35,25 @@ static inline unsigned int ip_transport_len(const struct sk_buff *skb)
{
return ntohs(ip_hdr(skb)->tot_len) - skb_network_header_len(skb);
}
+
+static inline unsigned int iph_totlen(const struct sk_buff *skb, const struct iphdr *iph)
+{
+ u32 len = ntohs(iph->tot_len);
+
+ return (len || !skb_is_gso(skb) || !skb_is_gso_tcp(skb)) ?
+ len : skb->len - skb_network_offset(skb);
+}
+
+static inline unsigned int skb_ip_totlen(const struct sk_buff *skb)
+{
+ return iph_totlen(skb, ip_hdr(skb));
+}
+
+/* IPv4 datagram length is stored into 16bit field (tot_len) */
+#define IP_MAX_MTU 0xFFFFU
+
+static inline void iph_set_totlen(struct iphdr *iph, unsigned int len)
+{
+ iph->tot_len = len <= IP_MAX_MTU ? htons(len) : 0;
+}
#endif /* _LINUX_IP_H */
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index d320d15d4fba..51c254b7fec2 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -194,8 +194,10 @@ enum {
/* Irq domain implements MSIs */
IRQ_DOMAIN_FLAG_MSI = (1 << 4),
- /* Irq domain implements MSI remapping */
- IRQ_DOMAIN_FLAG_MSI_REMAP = (1 << 5),
+ /*
+ * Irq domain implements isolated MSI, see msi_device_has_isolated_msi()
+ */
+ IRQ_DOMAIN_FLAG_ISOLATED_MSI = (1 << 5),
/* Irq domain doesn't translate anything */
IRQ_DOMAIN_FLAG_NO_MAP = (1 << 6),
@@ -278,7 +280,6 @@ struct irq_domain *irq_domain_create_legacy(struct fwnode_handle *fwnode,
void *host_data);
extern struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec,
enum irq_domain_bus_token bus_token);
-extern bool irq_domain_check_msi_remap(void);
extern void irq_set_default_host(struct irq_domain *host);
extern struct irq_domain *irq_get_default_host(void);
extern int irq_domain_alloc_descs(int virq, unsigned int nr_irqs,
@@ -561,13 +562,6 @@ static inline bool irq_domain_is_msi(struct irq_domain *domain)
return domain->flags & IRQ_DOMAIN_FLAG_MSI;
}
-static inline bool irq_domain_is_msi_remap(struct irq_domain *domain)
-{
- return domain->flags & IRQ_DOMAIN_FLAG_MSI_REMAP;
-}
-
-extern bool irq_domain_hierarchical_is_msi_remap(struct irq_domain *domain);
-
static inline bool irq_domain_is_msi_parent(struct irq_domain *domain)
{
return domain->flags & IRQ_DOMAIN_FLAG_MSI_PARENT;
@@ -613,17 +607,6 @@ static inline bool irq_domain_is_msi(struct irq_domain *domain)
return false;
}
-static inline bool irq_domain_is_msi_remap(struct irq_domain *domain)
-{
- return false;
-}
-
-static inline bool
-irq_domain_hierarchical_is_msi_remap(struct irq_domain *domain)
-{
- return false;
-}
-
static inline bool irq_domain_is_msi_parent(struct irq_domain *domain)
{
return false;
@@ -643,10 +626,6 @@ static inline struct irq_domain *irq_find_matching_fwnode(
{
return NULL;
}
-static inline bool irq_domain_check_msi_remap(void)
-{
- return false;
-}
#endif /* !CONFIG_IRQ_DOMAIN */
#endif /* _LINUX_IRQDOMAIN_H */
diff --git a/include/linux/ism.h b/include/linux/ism.h
new file mode 100644
index 000000000000..ea2bcdae7401
--- /dev/null
+++ b/include/linux/ism.h
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Internal Shared Memory
+ *
+ * Definitions for the ISM module
+ *
+ * Copyright IBM Corp. 2022
+ */
+#ifndef _ISM_H
+#define _ISM_H
+
+#include <linux/workqueue.h>
+
+struct ism_dmb {
+ u64 dmb_tok;
+ u64 rgid;
+ u32 dmb_len;
+ u32 sba_idx;
+ u32 vlan_valid;
+ u32 vlan_id;
+ void *cpu_addr;
+ dma_addr_t dma_addr;
+};
+
+/* Unless we gain unexpected popularity, this limit should hold for a while */
+#define MAX_CLIENTS 8
+#define ISM_NR_DMBS 1920
+
+struct ism_dev {
+ spinlock_t lock; /* protects the ism device */
+ struct list_head list;
+ struct pci_dev *pdev;
+
+ struct ism_sba *sba;
+ dma_addr_t sba_dma_addr;
+ DECLARE_BITMAP(sba_bitmap, ISM_NR_DMBS);
+ u8 *sba_client_arr; /* entries are indices into 'clients' array */
+ void *priv[MAX_CLIENTS];
+
+ struct ism_eq *ieq;
+ dma_addr_t ieq_dma_addr;
+
+ struct device dev;
+ u64 local_gid;
+ int ieq_idx;
+
+ atomic_t free_clients_cnt;
+ atomic_t add_dev_cnt;
+ wait_queue_head_t waitq;
+};
+
+struct ism_event {
+ u32 type;
+ u32 code;
+ u64 tok;
+ u64 time;
+ u64 info;
+};
+
+struct ism_client {
+ const char *name;
+ void (*add)(struct ism_dev *dev);
+ void (*remove)(struct ism_dev *dev);
+ void (*handle_event)(struct ism_dev *dev, struct ism_event *event);
+ /* Parameter dmbemask contains a bit vector with updated DMBEs, if sent
+ * via ism_move_data(). Callback function must handle all active bits
+ * indicated by dmbemask.
+ */
+ void (*handle_irq)(struct ism_dev *dev, unsigned int bit, u16 dmbemask);
+ /* Private area - don't touch! */
+ struct work_struct remove_work;
+ struct work_struct add_work;
+ struct ism_dev *tgt_ism;
+ u8 id;
+};
+
+int ism_register_client(struct ism_client *client);
+int ism_unregister_client(struct ism_client *client);
+static inline void *ism_get_priv(struct ism_dev *dev,
+ struct ism_client *client) {
+ return dev->priv[client->id];
+}
+
+static inline void ism_set_priv(struct ism_dev *dev, struct ism_client *client,
+ void *priv) {
+ dev->priv[client->id] = priv;
+}
+
+int ism_register_dmb(struct ism_dev *dev, struct ism_dmb *dmb,
+ struct ism_client *client);
+int ism_unregister_dmb(struct ism_dev *dev, struct ism_dmb *dmb);
+int ism_move(struct ism_dev *dev, u64 dmb_tok, unsigned int idx, bool sf,
+ unsigned int offset, void *data, unsigned int size);
+u8 *ism_get_seid(void);
+
+const struct smcd_ops *ism_get_smcd_ops(void);
+
+#endif /* _ISM_H */
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index 2170e0cc279d..5962072a4b19 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -1570,8 +1570,6 @@ extern int jbd2_journal_inode_ranged_write(handle_t *handle,
extern int jbd2_journal_inode_ranged_wait(handle_t *handle,
struct jbd2_inode *inode, loff_t start_byte,
loff_t length);
-extern int jbd2_journal_submit_inode_data_buffers(
- struct jbd2_inode *jinode);
extern int jbd2_journal_finish_inode_data_buffers(
struct jbd2_inode *jinode);
extern int jbd2_journal_begin_ordered_truncate(journal_t *journal,
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 96c9d56e5510..f7ef70661ce2 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -96,15 +96,6 @@ static inline bool kasan_has_integrated_init(void)
}
#ifdef CONFIG_KASAN
-
-struct kasan_cache {
-#ifdef CONFIG_KASAN_GENERIC
- int alloc_meta_offset;
- int free_meta_offset;
-#endif
- bool is_kmalloc;
-};
-
void __kasan_unpoison_range(const void *addr, size_t size);
static __always_inline void kasan_unpoison_range(const void *addr, size_t size)
{
@@ -120,19 +111,13 @@ static __always_inline void kasan_poison_pages(struct page *page,
__kasan_poison_pages(page, order, init);
}
-void __kasan_unpoison_pages(struct page *page, unsigned int order, bool init);
-static __always_inline void kasan_unpoison_pages(struct page *page,
+bool __kasan_unpoison_pages(struct page *page, unsigned int order, bool init);
+static __always_inline bool kasan_unpoison_pages(struct page *page,
unsigned int order, bool init)
{
if (kasan_enabled())
- __kasan_unpoison_pages(page, order, init);
-}
-
-void __kasan_cache_create_kmalloc(struct kmem_cache *cache);
-static __always_inline void kasan_cache_create_kmalloc(struct kmem_cache *cache)
-{
- if (kasan_enabled())
- __kasan_cache_create_kmalloc(cache);
+ return __kasan_unpoison_pages(page, order, init);
+ return false;
}
void __kasan_poison_slab(struct slab *slab);
@@ -249,9 +234,11 @@ static __always_inline bool kasan_check_byte(const void *addr)
static inline void kasan_unpoison_range(const void *address, size_t size) {}
static inline void kasan_poison_pages(struct page *page, unsigned int order,
bool init) {}
-static inline void kasan_unpoison_pages(struct page *page, unsigned int order,
- bool init) {}
-static inline void kasan_cache_create_kmalloc(struct kmem_cache *cache) {}
+static inline bool kasan_unpoison_pages(struct page *page, unsigned int order,
+ bool init)
+{
+ return false;
+}
static inline void kasan_poison_slab(struct slab *slab) {}
static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
void *object) {}
@@ -302,6 +289,11 @@ static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
#ifdef CONFIG_KASAN_GENERIC
+struct kasan_cache {
+ int alloc_meta_offset;
+ int free_meta_offset;
+};
+
size_t kasan_metadata_size(struct kmem_cache *cache, bool in_object);
slab_flags_t kasan_never_merge(void);
void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index fe6efb24d151..40bce7495af8 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -297,7 +297,7 @@ bool mac_pton(const char *s, u8 *mac);
*
* Use tracing_on/tracing_off when you want to quickly turn on or off
* tracing. It simply enables or disables the recording of the trace events.
- * This also corresponds to the user space /sys/kernel/debug/tracing/tracing_on
+ * This also corresponds to the user space /sys/kernel/tracing/tracing_on
* file, which gives a means for the kernel and userspace to interact.
* Place a tracing_off() in the kernel where you want tracing to end.
* From user space, examine the trace, and then echo 1 > tracing_on
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index 5dd4343c1bbe..6883c5922701 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -403,7 +403,8 @@ extern int kimage_crash_copy_vmcoreinfo(struct kimage *image);
extern struct kimage *kexec_image;
extern struct kimage *kexec_crash_image;
-extern int kexec_load_disabled;
+
+bool kexec_load_permitted(int kexec_image_type);
#ifndef kexec_flush_icache_page
#define kexec_flush_icache_page(page)
diff --git a/include/linux/kobject.h b/include/linux/kobject.h
index 58a5b75612e3..bdab370a24f4 100644
--- a/include/linux/kobject.h
+++ b/include/linux/kobject.h
@@ -137,7 +137,7 @@ struct kobj_uevent_env {
struct kset_uevent_ops {
int (* const filter)(const struct kobject *kobj);
const char *(* const name)(const struct kobject *kobj);
- int (* const uevent)(struct kobject *kobj, struct kobj_uevent_env *env);
+ int (* const uevent)(const struct kobject *kobj, struct kobj_uevent_env *env);
};
struct kobj_attribute {
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index a0b92be98984..85a64cb95d75 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -378,6 +378,8 @@ extern void opt_pre_handler(struct kprobe *p, struct pt_regs *regs);
DEFINE_INSN_CACHE_OPS(optinsn);
extern void wait_for_kprobe_optimizer(void);
+bool optprobe_queued_unopt(struct optimized_kprobe *op);
+bool kprobe_disarmed(struct kprobe *p);
#else /* !CONFIG_OPTPROBES */
static inline void wait_for_kprobe_optimizer(void) { }
#endif /* CONFIG_OPTPROBES */
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 4f26b244f6d0..8ada23756b0e 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -324,7 +324,7 @@ struct kvm_vcpu {
#endif
int cpu;
int vcpu_id; /* id given by userspace at creation */
- int vcpu_idx; /* index in kvm->vcpus array */
+ int vcpu_idx; /* index into kvm->vcpu_array */
int ____srcu_idx; /* Don't use this directly. You've been warned. */
#ifdef CONFIG_PROVE_RCU
int srcu_depth;
@@ -956,8 +956,7 @@ static inline void kvm_irqfd_exit(void)
{
}
#endif
-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
- struct module *module);
+int kvm_init(unsigned vcpu_size, unsigned vcpu_align, struct module *module);
void kvm_exit(void);
void kvm_get_kvm(struct kvm *kvm);
@@ -1354,7 +1353,7 @@ void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu);
bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu);
void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
int kvm_vcpu_yield_to(struct kvm_vcpu *target);
-void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu, bool usermode_vcpu_not_eligible);
+void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu, bool yield_to_kernel_mode);
void kvm_flush_remote_tlbs(struct kvm *kvm);
@@ -1423,9 +1422,6 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
struct kvm_guest_debug *dbg);
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu);
-int kvm_arch_init(void *opaque);
-void kvm_arch_exit(void);
-
void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu);
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
@@ -1445,11 +1441,10 @@ void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu, struct dentry *debugfs_
static inline void kvm_create_vcpu_debugfs(struct kvm_vcpu *vcpu) {}
#endif
+#ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
int kvm_arch_hardware_enable(void);
void kvm_arch_hardware_disable(void);
-int kvm_arch_hardware_setup(void *opaque);
-void kvm_arch_hardware_unsetup(void);
-int kvm_arch_check_processor_compat(void *opaque);
+#endif
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu);
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
@@ -1466,7 +1461,7 @@ int kvm_arch_create_vm_debugfs(struct kvm *kvm);
*/
static inline struct kvm *kvm_arch_alloc_vm(void)
{
- return kzalloc(sizeof(struct kvm), GFP_KERNEL);
+ return kzalloc(sizeof(struct kvm), GFP_KERNEL_ACCOUNT);
}
#endif
@@ -2081,7 +2076,9 @@ static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
}
}
+#ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
extern bool kvm_rebooting;
+#endif
extern unsigned int halt_poll_ns;
extern unsigned int halt_poll_ns_grow;
diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h
index 76de36e56cdf..2728d49bbdf6 100644
--- a/include/linux/kvm_types.h
+++ b/include/linux/kvm_types.h
@@ -40,7 +40,7 @@ typedef unsigned long gva_t;
typedef u64 gpa_t;
typedef u64 gfn_t;
-#define GPA_INVALID (~(gpa_t)0)
+#define INVALID_GPA (~(gpa_t)0)
typedef unsigned long hva_t;
typedef u64 hpa_t;
diff --git a/include/linux/leds.h b/include/linux/leds.h
index 31cb74b90ffc..d71201a968b6 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -10,17 +10,21 @@
#include <dt-bindings/leds/common.h>
#include <linux/device.h>
-#include <linux/kernfs.h>
-#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/rwsem.h>
#include <linux/spinlock.h>
#include <linux/timer.h>
+#include <linux/types.h>
#include <linux/workqueue.h>
-struct device;
-struct led_pattern;
+struct attribute_group;
struct device_node;
+struct fwnode_handle;
+struct gpio_desc;
+struct kernfs_node;
+struct led_pattern;
+struct platform_device;
+
/*
* LED Core
*/
@@ -78,6 +82,8 @@ struct led_init_data {
bool devname_mandatory;
};
+enum led_default_state led_init_default_state_get(struct fwnode_handle *fwnode);
+
struct led_hw_trigger_type {
int dummy;
};
@@ -529,7 +535,6 @@ struct led_properties {
const char *label;
};
-struct gpio_desc;
typedef int (*gpio_blink_set_t)(struct gpio_desc *desc, int state,
unsigned long *delay_on,
unsigned long *delay_off);
diff --git a/include/linux/libata.h b/include/linux/libata.h
index c9149ebe7423..a759dfbdcc91 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -90,32 +90,32 @@ enum {
ATA_DFLAG_ACPI_FAILED = (1 << 6), /* ACPI on devcfg has failed */
ATA_DFLAG_AN = (1 << 7), /* AN configured */
ATA_DFLAG_TRUSTED = (1 << 8), /* device supports trusted send/recv */
+ ATA_DFLAG_FUA = (1 << 9), /* device supports FUA */
ATA_DFLAG_DMADIR = (1 << 10), /* device requires DMADIR */
- ATA_DFLAG_CFG_MASK = (1 << 12) - 1,
+ ATA_DFLAG_NCQ_SEND_RECV = (1 << 11), /* device supports NCQ SEND and RECV */
+ ATA_DFLAG_NCQ_PRIO = (1 << 12), /* device supports NCQ priority */
+ ATA_DFLAG_CFG_MASK = (1 << 13) - 1,
- ATA_DFLAG_PIO = (1 << 12), /* device limited to PIO mode */
- ATA_DFLAG_NCQ_OFF = (1 << 13), /* device limited to non-NCQ mode */
+ ATA_DFLAG_PIO = (1 << 13), /* device limited to PIO mode */
+ ATA_DFLAG_NCQ_OFF = (1 << 14), /* device limited to non-NCQ mode */
ATA_DFLAG_SLEEPING = (1 << 15), /* device is sleeping */
ATA_DFLAG_DUBIOUS_XFER = (1 << 16), /* data transfer not verified */
ATA_DFLAG_NO_UNLOAD = (1 << 17), /* device doesn't support unload */
ATA_DFLAG_UNLOCK_HPA = (1 << 18), /* unlock HPA */
- ATA_DFLAG_NCQ_SEND_RECV = (1 << 19), /* device supports NCQ SEND and RECV */
- ATA_DFLAG_NCQ_PRIO = (1 << 20), /* device supports NCQ priority */
- ATA_DFLAG_NCQ_PRIO_ENABLED = (1 << 21), /* Priority cmds sent to dev */
- ATA_DFLAG_INIT_MASK = (1 << 24) - 1,
+ ATA_DFLAG_INIT_MASK = (1 << 19) - 1,
+ ATA_DFLAG_NCQ_PRIO_ENABLED = (1 << 19), /* Priority cmds sent to dev */
ATA_DFLAG_DETACH = (1 << 24),
ATA_DFLAG_DETACHED = (1 << 25),
-
ATA_DFLAG_DA = (1 << 26), /* device supports Device Attention */
ATA_DFLAG_DEVSLP = (1 << 27), /* device supports Device Sleep */
ATA_DFLAG_ACPI_DISABLED = (1 << 28), /* ACPI for the device is disabled */
ATA_DFLAG_D_SENSE = (1 << 29), /* Descriptor sense requested */
ATA_DFLAG_ZAC = (1 << 30), /* ZAC device */
- ATA_DFLAG_FEATURES_MASK = ATA_DFLAG_TRUSTED | ATA_DFLAG_DA | \
- ATA_DFLAG_DEVSLP | ATA_DFLAG_NCQ_SEND_RECV | \
- ATA_DFLAG_NCQ_PRIO,
+ ATA_DFLAG_FEATURES_MASK = (ATA_DFLAG_TRUSTED | ATA_DFLAG_DA | \
+ ATA_DFLAG_DEVSLP | ATA_DFLAG_NCQ_SEND_RECV | \
+ ATA_DFLAG_NCQ_PRIO | ATA_DFLAG_FUA),
ATA_DEV_UNKNOWN = 0, /* unknown device */
ATA_DEV_ATA = 1, /* ATA device */
@@ -200,13 +200,14 @@ enum {
/* struct ata_queued_cmd flags */
ATA_QCFLAG_ACTIVE = (1 << 0), /* cmd not yet ack'd to scsi lyer */
ATA_QCFLAG_DMAMAP = (1 << 1), /* SG table is DMA mapped */
+ ATA_QCFLAG_RTF_FILLED = (1 << 2), /* result TF has been filled */
ATA_QCFLAG_IO = (1 << 3), /* standard IO command */
ATA_QCFLAG_RESULT_TF = (1 << 4), /* result TF requested */
ATA_QCFLAG_CLEAR_EXCL = (1 << 5), /* clear excl_link on completion */
ATA_QCFLAG_QUIET = (1 << 6), /* don't report device error */
ATA_QCFLAG_RETRY = (1 << 7), /* retry after failure */
- ATA_QCFLAG_FAILED = (1 << 16), /* cmd failed and is owned by EH */
+ ATA_QCFLAG_EH = (1 << 16), /* cmd aborted and owned by EH */
ATA_QCFLAG_SENSE_VALID = (1 << 17), /* sense data valid */
ATA_QCFLAG_EH_SCHEDULED = (1 << 18), /* EH scheduled (obsolete) */
@@ -381,6 +382,7 @@ enum {
ATA_HORKAGE_NO_NCQ_ON_ATI = (1 << 27), /* Disable NCQ on ATI chipset */
ATA_HORKAGE_NO_ID_DEV_LOG = (1 << 28), /* Identify device log missing */
ATA_HORKAGE_NO_LOG_DIR = (1 << 29), /* Do not read log directory */
+ ATA_HORKAGE_NO_FUA = (1 << 30), /* Do not use FUA */
/* DMA mask for user DMA control: User visible values; DO NOT
renumber */
@@ -876,7 +878,8 @@ struct ata_port_operations {
int (*check_atapi_dma)(struct ata_queued_cmd *qc);
enum ata_completion_errors (*qc_prep)(struct ata_queued_cmd *qc);
unsigned int (*qc_issue)(struct ata_queued_cmd *qc);
- bool (*qc_fill_rtf)(struct ata_queued_cmd *qc);
+ void (*qc_fill_rtf)(struct ata_queued_cmd *qc);
+ void (*qc_ncq_fill_rtf)(struct ata_port *ap, u64 done_mask);
/*
* Configuration and exception handling
@@ -1690,21 +1693,35 @@ extern struct ata_device *ata_dev_next(struct ata_device *dev,
(dev) = ata_dev_next((dev), (link), ATA_DITER_##mode))
/**
- * ata_ncq_enabled - Test whether NCQ is enabled
- * @dev: ATA device to test for
+ * ata_ncq_supported - Test whether NCQ is supported
+ * @dev: ATA device to test
*
* LOCKING:
* spin_lock_irqsave(host lock)
*
* RETURNS:
- * 1 if NCQ is enabled for @dev, 0 otherwise.
+ * true if @dev supports NCQ, false otherwise.
*/
-static inline int ata_ncq_enabled(struct ata_device *dev)
+static inline bool ata_ncq_supported(struct ata_device *dev)
{
if (!IS_ENABLED(CONFIG_SATA_HOST))
- return 0;
- return (dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ_OFF |
- ATA_DFLAG_NCQ)) == ATA_DFLAG_NCQ;
+ return false;
+ return (dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ)) == ATA_DFLAG_NCQ;
+}
+
+/**
+ * ata_ncq_enabled - Test whether NCQ is enabled
+ * @dev: ATA device to test
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ *
+ * RETURNS:
+ * true if NCQ is enabled for @dev, false otherwise.
+ */
+static inline bool ata_ncq_enabled(struct ata_device *dev)
+{
+ return ata_ncq_supported(dev) && !(dev->flags & ATA_DFLAG_NCQ_OFF);
}
static inline bool ata_fpdma_dsm_supported(struct ata_device *dev)
@@ -1756,7 +1773,7 @@ static inline struct ata_queued_cmd *ata_qc_from_tag(struct ata_port *ap,
return qc;
if ((qc->flags & (ATA_QCFLAG_ACTIVE |
- ATA_QCFLAG_FAILED)) == ATA_QCFLAG_ACTIVE)
+ ATA_QCFLAG_EH)) == ATA_QCFLAG_ACTIVE)
return qc;
return NULL;
@@ -1936,7 +1953,7 @@ extern void ata_sff_queue_delayed_work(struct delayed_work *dwork,
unsigned long delay);
extern void ata_sff_queue_pio_task(struct ata_link *link, unsigned long delay);
extern unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc);
-extern bool ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc);
+extern void ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc);
extern unsigned int ata_sff_port_intr(struct ata_port *ap,
struct ata_queued_cmd *qc);
extern irqreturn_t ata_sff_interrupt(int irq, void *dev_instance);
diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h
index af38252ad704..e772aae71843 100644
--- a/include/linux/libnvdimm.h
+++ b/include/linux/libnvdimm.h
@@ -41,6 +41,9 @@ enum {
*/
NDD_INCOHERENT = 7,
+ /* dimm provider wants synchronous registration by __nvdimm_create() */
+ NDD_REGISTER_SYNC = 8,
+
/* need to set a limit somewhere, but yes, this is likely overkill */
ND_IOCTL_MAX_BUFLEN = SZ_4M,
ND_CMD_MAX_ELEM = 5,
diff --git a/include/linux/list.h b/include/linux/list.h
index 61762054b4be..f10344dbad4d 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -656,6 +656,21 @@ static inline void list_splice_tail_init(struct list_head *list,
pos = n, n = pos->prev)
/**
+ * list_count_nodes - count nodes in the list
+ * @head: the head for your list.
+ */
+static inline size_t list_count_nodes(struct list_head *head)
+{
+ struct list_head *pos;
+ size_t count = 0;
+
+ list_for_each(pos, head)
+ count++;
+
+ return count;
+}
+
+/**
* list_entry_is_head - test if the entry points to the head of the list
* @pos: the type * to cursor
* @head: the head for your list.
diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h
index 2b7f067af3c4..0168ac9fdda8 100644
--- a/include/linux/lockd/lockd.h
+++ b/include/linux/lockd/lockd.h
@@ -196,9 +196,9 @@ struct nlm_block {
* Global variables
*/
extern const struct rpc_program nlm_program;
-extern const struct svc_procedure nlmsvc_procedures[];
+extern const struct svc_procedure nlmsvc_procedures[24];
#ifdef CONFIG_LOCKD_V4
-extern const struct svc_procedure nlmsvc_procedures4[];
+extern const struct svc_procedure nlmsvc_procedures4[24];
#endif
extern int nlmsvc_grace_period;
extern unsigned long nlmsvc_timeout;
diff --git a/include/linux/maple_tree.h b/include/linux/maple_tree.h
index e594db58a0f1..1fadb5f5978b 100644
--- a/include/linux/maple_tree.h
+++ b/include/linux/maple_tree.h
@@ -12,7 +12,6 @@
#include <linux/rcupdate.h>
#include <linux/spinlock.h>
/* #define CONFIG_MAPLE_RCU_DISABLED */
-/* #define CONFIG_DEBUG_MAPLE_TREE_VERBOSE */
/*
* Allocated nodes are mutable until they have been inserted into the tree,
@@ -433,6 +432,7 @@ struct ma_wr_state {
.min = 0, \
.max = ULONG_MAX, \
.alloc = NULL, \
+ .mas_flags = 0, \
}
#define MA_WR_STATE(name, ma_state, wr_entry) \
@@ -456,7 +456,7 @@ int mas_store_gfp(struct ma_state *mas, void *entry, gfp_t gfp);
void mas_store_prealloc(struct ma_state *mas, void *entry);
void *mas_find(struct ma_state *mas, unsigned long max);
void *mas_find_rev(struct ma_state *mas, unsigned long min);
-int mas_preallocate(struct ma_state *mas, void *entry, gfp_t gfp);
+int mas_preallocate(struct ma_state *mas, gfp_t gfp);
bool mas_is_err(struct ma_state *mas);
bool mas_nomem(struct ma_state *mas, gfp_t gfp);
@@ -471,6 +471,16 @@ void *mas_next(struct ma_state *mas, unsigned long max);
int mas_empty_area(struct ma_state *mas, unsigned long min, unsigned long max,
unsigned long size);
+static inline void mas_init(struct ma_state *mas, struct maple_tree *tree,
+ unsigned long addr)
+{
+ memset(mas, 0, sizeof(struct ma_state));
+ mas->tree = tree;
+ mas->index = mas->last = addr;
+ mas->max = ULONG_MAX;
+ mas->node = MAS_START;
+}
+
/* Checks if a mas has not found anything */
static inline bool mas_is_none(struct ma_state *mas)
{
@@ -483,9 +493,6 @@ static inline bool mas_is_paused(struct ma_state *mas)
return mas->node == MAS_PAUSE;
}
-void mas_dup_tree(struct ma_state *oldmas, struct ma_state *mas);
-void mas_dup_store(struct ma_state *mas, void *entry);
-
/*
* This finds an empty area from the highest address to the lowest.
* AKA "Topdown" version,
@@ -517,7 +524,6 @@ static inline void mas_reset(struct ma_state *mas)
* entry.
*
* Note: may return the zero entry.
- *
*/
#define mas_for_each(__mas, __entry, __max) \
while (((__entry) = mas_find((__mas), (__max))) != NULL)
@@ -639,7 +645,6 @@ static inline void mt_set_in_rcu(struct maple_tree *mt)
}
static inline unsigned int mt_height(const struct maple_tree *mt)
-
{
return (mt->ma_flags & MT_FLAGS_HEIGHT_MASK) >> MT_FLAGS_HEIGHT_OFFSET;
}
diff --git a/include/linux/mcb.h b/include/linux/mcb.h
index f6efb16f9d1b..1e5893138afe 100644
--- a/include/linux/mcb.h
+++ b/include/linux/mcb.h
@@ -76,10 +76,7 @@ struct mcb_device {
struct device *dma_dev;
};
-static inline struct mcb_device *to_mcb_device(struct device *dev)
-{
- return container_of(dev, struct mcb_device, dev);
-}
+#define to_mcb_device(__dev) container_of_const(__dev, struct mcb_device, dev)
/**
* struct mcb_driver - MEN Chameleon Bus device driver
diff --git a/include/linux/mdio-bitbang.h b/include/linux/mdio-bitbang.h
index 373630fe5c28..cffabdbce075 100644
--- a/include/linux/mdio-bitbang.h
+++ b/include/linux/mdio-bitbang.h
@@ -38,8 +38,10 @@ struct mdiobb_ctrl {
u8 op_c22_write;
};
-int mdiobb_read(struct mii_bus *bus, int phy, int reg);
-int mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val);
+int mdiobb_read_c22(struct mii_bus *bus, int phy, int reg);
+int mdiobb_write_c22(struct mii_bus *bus, int phy, int reg, u16 val);
+int mdiobb_read_c45(struct mii_bus *bus, int devad, int phy, int reg);
+int mdiobb_write_c45(struct mii_bus *bus, int devad, int phy, int reg, u16 val);
/* The returned bus is not yet registered with the phy layer. */
struct mii_bus *alloc_mdio_bitbang(struct mdiobb_ctrl *ctrl);
diff --git a/include/linux/mdio.h b/include/linux/mdio.h
index f7fbbf3069e7..27013d6bf24a 100644
--- a/include/linux/mdio.h
+++ b/include/linux/mdio.h
@@ -10,14 +10,6 @@
#include <linux/bitfield.h>
#include <linux/mod_devicetable.h>
-/* Or MII_ADDR_C45 into regnum for read/write on mii_bus to enable the 21 bit
- * IEEE 802.3ae clause 45 addressing mode used by 10GIGE phy chips.
- */
-#define MII_ADDR_C45 (1<<30)
-#define MII_DEVADDR_C45_SHIFT 16
-#define MII_DEVADDR_C45_MASK GENMASK(20, 16)
-#define MII_REGADDR_C45_MASK GENMASK(15, 0)
-
struct gpio_desc;
struct mii_bus;
struct reset_control;
@@ -410,6 +402,90 @@ static inline u32 linkmode_adv_to_mii_t1_adv_m_t(unsigned long *advertising)
return result;
}
+/**
+ * mii_eee_cap1_mod_linkmode_t()
+ * @adv: target the linkmode advertisement settings
+ * @val: register value
+ *
+ * A function that translates value of following registers to the linkmode:
+ * IEEE 802.3-2018 45.2.3.10 "EEE control and capability 1" register (3.20)
+ * IEEE 802.3-2018 45.2.7.13 "EEE advertisement 1" register (7.60)
+ * IEEE 802.3-2018 45.2.7.14 "EEE "link partner ability 1 register (7.61)
+ */
+static inline void mii_eee_cap1_mod_linkmode_t(unsigned long *adv, u32 val)
+{
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ adv, val & MDIO_EEE_100TX);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ adv, val & MDIO_EEE_1000T);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+ adv, val & MDIO_EEE_10GT);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
+ adv, val & MDIO_EEE_1000KX);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
+ adv, val & MDIO_EEE_10GKX4);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
+ adv, val & MDIO_EEE_10GKR);
+}
+
+/**
+ * linkmode_to_mii_eee_cap1_t()
+ * @adv: the linkmode advertisement settings
+ *
+ * A function that translates linkmode to value for IEEE 802.3-2018 45.2.7.13
+ * "EEE advertisement 1" register (7.60)
+ */
+static inline u32 linkmode_to_mii_eee_cap1_t(unsigned long *adv)
+{
+ u32 result = 0;
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, adv))
+ result |= MDIO_EEE_100TX;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, adv))
+ result |= MDIO_EEE_1000T;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, adv))
+ result |= MDIO_EEE_10GT;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, adv))
+ result |= MDIO_EEE_1000KX;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, adv))
+ result |= MDIO_EEE_10GKX4;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, adv))
+ result |= MDIO_EEE_10GKR;
+
+ return result;
+}
+
+/**
+ * mii_10base_t1_adv_mod_linkmode_t()
+ * @adv: linkmode advertisement settings
+ * @val: register value
+ *
+ * A function that translates IEEE 802.3cg-2019 45.2.7.26 "10BASE-T1 AN status"
+ * register (7.527) value to the linkmode.
+ */
+static inline void mii_10base_t1_adv_mod_linkmode_t(unsigned long *adv, u16 val)
+{
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT1L_Full_BIT,
+ adv, val & MDIO_AN_10BT1_AN_CTRL_ADV_EEE_T1L);
+}
+
+/**
+ * linkmode_adv_to_mii_10base_t1_t()
+ * @adv: linkmode advertisement settings
+ *
+ * A function that translates the linkmode to IEEE 802.3cg-2019 45.2.7.25
+ * "10BASE-T1 AN control" register (7.526) value.
+ */
+static inline u32 linkmode_adv_to_mii_10base_t1_t(unsigned long *adv)
+{
+ u32 result = 0;
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT1L_Full_BIT, adv))
+ result |= MDIO_AN_10BT1_AN_CTRL_ADV_EEE_T1L;
+
+ return result;
+}
+
int __mdiobus_read(struct mii_bus *bus, int addr, u32 regnum);
int __mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val);
int __mdiobus_modify_changed(struct mii_bus *bus, int addr, u32 regnum,
@@ -423,6 +499,21 @@ int mdiobus_modify(struct mii_bus *bus, int addr, u32 regnum, u16 mask,
u16 set);
int mdiobus_modify_changed(struct mii_bus *bus, int addr, u32 regnum,
u16 mask, u16 set);
+int __mdiobus_c45_read(struct mii_bus *bus, int addr, int devad, u32 regnum);
+int mdiobus_c45_read(struct mii_bus *bus, int addr, int devad, u32 regnum);
+int mdiobus_c45_read_nested(struct mii_bus *bus, int addr, int devad,
+ u32 regnum);
+int __mdiobus_c45_write(struct mii_bus *bus, int addr, int devad, u32 regnum,
+ u16 val);
+int mdiobus_c45_write(struct mii_bus *bus, int addr, int devad, u32 regnum,
+ u16 val);
+int mdiobus_c45_write_nested(struct mii_bus *bus, int addr, int devad,
+ u32 regnum, u16 val);
+int mdiobus_c45_modify(struct mii_bus *bus, int addr, int devad, u32 regnum,
+ u16 mask, u16 set);
+
+int mdiobus_c45_modify_changed(struct mii_bus *bus, int addr, int devad,
+ u32 regnum, u16 mask, u16 set);
static inline int mdiodev_read(struct mdio_device *mdiodev, u32 regnum)
{
@@ -448,44 +539,19 @@ static inline int mdiodev_modify_changed(struct mdio_device *mdiodev,
mask, set);
}
-static inline u32 mdiobus_c45_addr(int devad, u16 regnum)
-{
- return MII_ADDR_C45 | devad << MII_DEVADDR_C45_SHIFT | regnum;
-}
-
-static inline u16 mdiobus_c45_regad(u32 regnum)
-{
- return FIELD_GET(MII_REGADDR_C45_MASK, regnum);
-}
-
-static inline u16 mdiobus_c45_devad(u32 regnum)
+static inline int mdiodev_c45_modify(struct mdio_device *mdiodev, int devad,
+ u32 regnum, u16 mask, u16 set)
{
- return FIELD_GET(MII_DEVADDR_C45_MASK, regnum);
+ return mdiobus_c45_modify(mdiodev->bus, mdiodev->addr, devad, regnum,
+ mask, set);
}
-static inline int __mdiobus_c45_read(struct mii_bus *bus, int prtad, int devad,
- u16 regnum)
-{
- return __mdiobus_read(bus, prtad, mdiobus_c45_addr(devad, regnum));
-}
-
-static inline int __mdiobus_c45_write(struct mii_bus *bus, int prtad, int devad,
- u16 regnum, u16 val)
-{
- return __mdiobus_write(bus, prtad, mdiobus_c45_addr(devad, regnum),
- val);
-}
-
-static inline int mdiobus_c45_read(struct mii_bus *bus, int prtad, int devad,
- u16 regnum)
-{
- return mdiobus_read(bus, prtad, mdiobus_c45_addr(devad, regnum));
-}
-
-static inline int mdiobus_c45_write(struct mii_bus *bus, int prtad, int devad,
- u16 regnum, u16 val)
+static inline int mdiodev_c45_modify_changed(struct mdio_device *mdiodev,
+ int devad, u32 regnum, u16 mask,
+ u16 set)
{
- return mdiobus_write(bus, prtad, mdiobus_c45_addr(devad, regnum), val);
+ return mdiobus_c45_modify_changed(mdiodev->bus, mdiodev->addr, devad,
+ regnum, mask, set);
}
static inline int mdiodev_c45_read(struct mdio_device *mdiodev, int devad,
diff --git a/include/linux/mdio/mdio-mscc-miim.h b/include/linux/mdio/mdio-mscc-miim.h
index 5b4ed2c3cbb9..1ce699740af6 100644
--- a/include/linux/mdio/mdio-mscc-miim.h
+++ b/include/linux/mdio/mdio-mscc-miim.h
@@ -14,6 +14,6 @@
int mscc_miim_setup(struct device *device, struct mii_bus **bus,
const char *name, struct regmap *mii_regmap,
- int status_offset);
+ int status_offset, bool ignore_read_errors);
#endif
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 85dc9b88ea37..b6eda2ab205d 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -466,34 +466,34 @@ static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio)
}
/*
- * page_memcg_check - get the memory cgroup associated with a page
- * @page: a pointer to the page struct
+ * folio_memcg_check - Get the memory cgroup associated with a folio.
+ * @folio: Pointer to the folio.
*
- * Returns a pointer to the memory cgroup associated with the page,
- * or NULL. This function unlike page_memcg() can take any page
- * as an argument. It has to be used in cases when it's not known if a page
+ * Returns a pointer to the memory cgroup associated with the folio,
+ * or NULL. This function unlike folio_memcg() can take any folio
+ * as an argument. It has to be used in cases when it's not known if a folio
* has an associated memory cgroup pointer or an object cgroups vector or
* an object cgroup.
*
- * For a non-kmem page any of the following ensures page and memcg binding
+ * For a non-kmem folio any of the following ensures folio and memcg binding
* stability:
*
- * - the page lock
+ * - the folio lock
* - LRU isolation
- * - lock_page_memcg()
+ * - lock_folio_memcg()
* - exclusive reference
* - mem_cgroup_trylock_pages()
*
- * For a kmem page a caller should hold an rcu read lock to protect memcg
- * associated with a kmem page from being released.
+ * For a kmem folio a caller should hold an rcu read lock to protect memcg
+ * associated with a kmem folio from being released.
*/
-static inline struct mem_cgroup *page_memcg_check(struct page *page)
+static inline struct mem_cgroup *folio_memcg_check(struct folio *folio)
{
/*
- * Because page->memcg_data might be changed asynchronously
- * for slab pages, READ_ONCE() should be used here.
+ * Because folio->memcg_data might be changed asynchronously
+ * for slabs, READ_ONCE() should be used here.
*/
- unsigned long memcg_data = READ_ONCE(page->memcg_data);
+ unsigned long memcg_data = READ_ONCE(folio->memcg_data);
if (memcg_data & MEMCG_DATA_OBJCGS)
return NULL;
@@ -508,6 +508,13 @@ static inline struct mem_cgroup *page_memcg_check(struct page *page)
return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
}
+static inline struct mem_cgroup *page_memcg_check(struct page *page)
+{
+ if (PageTail(page))
+ return NULL;
+ return folio_memcg_check((struct folio *)page);
+}
+
static inline struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *objcg)
{
struct mem_cgroup *memcg;
@@ -794,6 +801,11 @@ static inline void obj_cgroup_put(struct obj_cgroup *objcg)
percpu_ref_put(&objcg->refcnt);
}
+static inline bool mem_cgroup_tryget(struct mem_cgroup *memcg)
+{
+ return !memcg || css_tryget(&memcg->css);
+}
+
static inline void mem_cgroup_put(struct mem_cgroup *memcg)
{
if (memcg)
@@ -878,7 +890,7 @@ static inline bool mm_match_cgroup(struct mm_struct *mm,
return match;
}
-struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page);
+struct cgroup_subsys_state *mem_cgroup_css_from_folio(struct folio *folio);
ino_t page_cgroup_ino(struct page *page);
static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
@@ -1165,6 +1177,11 @@ static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio)
return NULL;
}
+static inline struct mem_cgroup *folio_memcg_check(struct folio *folio)
+{
+ return NULL;
+}
+
static inline struct mem_cgroup *page_memcg_check(struct page *page)
{
return NULL;
@@ -1301,6 +1318,11 @@ static inline void obj_cgroup_put(struct obj_cgroup *objcg)
{
}
+static inline bool mem_cgroup_tryget(struct mem_cgroup *memcg)
+{
+ return true;
+}
+
static inline void mem_cgroup_put(struct mem_cgroup *memcg)
{
}
@@ -1754,24 +1776,30 @@ struct obj_cgroup *get_obj_cgroup_from_page(struct page *page);
int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size);
void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size);
-extern struct static_key_false memcg_kmem_enabled_key;
+extern struct static_key_false memcg_bpf_enabled_key;
+static inline bool memcg_bpf_enabled(void)
+{
+ return static_branch_likely(&memcg_bpf_enabled_key);
+}
+
+extern struct static_key_false memcg_kmem_online_key;
-static inline bool memcg_kmem_enabled(void)
+static inline bool memcg_kmem_online(void)
{
- return static_branch_likely(&memcg_kmem_enabled_key);
+ return static_branch_likely(&memcg_kmem_online_key);
}
static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp,
int order)
{
- if (memcg_kmem_enabled())
+ if (memcg_kmem_online())
return __memcg_kmem_charge_page(page, gfp, order);
return 0;
}
static inline void memcg_kmem_uncharge_page(struct page *page, int order)
{
- if (memcg_kmem_enabled())
+ if (memcg_kmem_online())
__memcg_kmem_uncharge_page(page, order);
}
@@ -1792,7 +1820,7 @@ static inline void count_objcg_event(struct obj_cgroup *objcg,
{
struct mem_cgroup *memcg;
- if (!memcg_kmem_enabled())
+ if (!memcg_kmem_online())
return;
rcu_read_lock();
@@ -1832,7 +1860,12 @@ static inline struct obj_cgroup *get_obj_cgroup_from_page(struct page *page)
return NULL;
}
-static inline bool memcg_kmem_enabled(void)
+static inline bool memcg_bpf_enabled(void)
+{
+ return false;
+}
+
+static inline bool memcg_kmem_online(void)
{
return false;
}
diff --git a/include/linux/memregion.h b/include/linux/memregion.h
index bf83363807ac..c01321467789 100644
--- a/include/linux/memregion.h
+++ b/include/linux/memregion.h
@@ -3,10 +3,12 @@
#define _MEMREGION_H_
#include <linux/types.h>
#include <linux/errno.h>
+#include <linux/range.h>
#include <linux/bug.h>
struct memregion_info {
int target_node;
+ struct range range;
};
#ifdef CONFIG_MEMREGION
diff --git a/include/linux/memremap.h b/include/linux/memremap.h
index 7fcaf3180a5b..1314d9c5f05b 100644
--- a/include/linux/memremap.h
+++ b/include/linux/memremap.h
@@ -135,7 +135,7 @@ struct dev_pagemap {
int nr_range;
union {
struct range range;
- struct range ranges[0];
+ DECLARE_FLEX_ARRAY(struct range, ranges);
};
};
diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h
index 9ab0e2fca7ea..2058194807bd 100644
--- a/include/linux/mfd/axp20x.h
+++ b/include/linux/mfd/axp20x.h
@@ -432,8 +432,9 @@ enum {
AXP152_IRQ_PEK_SHORT,
AXP152_IRQ_PEK_LONG,
AXP152_IRQ_TIMER,
- AXP152_IRQ_PEK_RIS_EDGE,
+ /* out of bit order to make sure the press event is handled first */
AXP152_IRQ_PEK_FAL_EDGE,
+ AXP152_IRQ_PEK_RIS_EDGE,
AXP152_IRQ_GPIO3_INPUT,
AXP152_IRQ_GPIO2_INPUT,
AXP152_IRQ_GPIO1_INPUT,
@@ -472,8 +473,9 @@ enum {
AXP20X_IRQ_LOW_PWR_LVL1,
AXP20X_IRQ_LOW_PWR_LVL2,
AXP20X_IRQ_TIMER,
- AXP20X_IRQ_PEK_RIS_EDGE,
+ /* out of bit order to make sure the press event is handled first */
AXP20X_IRQ_PEK_FAL_EDGE,
+ AXP20X_IRQ_PEK_RIS_EDGE,
AXP20X_IRQ_GPIO3_INPUT,
AXP20X_IRQ_GPIO2_INPUT,
AXP20X_IRQ_GPIO1_INPUT,
@@ -502,8 +504,9 @@ enum axp22x_irqs {
AXP22X_IRQ_LOW_PWR_LVL1,
AXP22X_IRQ_LOW_PWR_LVL2,
AXP22X_IRQ_TIMER,
- AXP22X_IRQ_PEK_RIS_EDGE,
+ /* out of bit order to make sure the press event is handled first */
AXP22X_IRQ_PEK_FAL_EDGE,
+ AXP22X_IRQ_PEK_RIS_EDGE,
AXP22X_IRQ_GPIO1_INPUT,
AXP22X_IRQ_GPIO0_INPUT,
};
@@ -571,8 +574,9 @@ enum axp803_irqs {
AXP803_IRQ_LOW_PWR_LVL1,
AXP803_IRQ_LOW_PWR_LVL2,
AXP803_IRQ_TIMER,
- AXP803_IRQ_PEK_RIS_EDGE,
+ /* out of bit order to make sure the press event is handled first */
AXP803_IRQ_PEK_FAL_EDGE,
+ AXP803_IRQ_PEK_RIS_EDGE,
AXP803_IRQ_PEK_SHORT,
AXP803_IRQ_PEK_LONG,
AXP803_IRQ_PEK_OVER_OFF,
@@ -623,8 +627,9 @@ enum axp809_irqs {
AXP809_IRQ_LOW_PWR_LVL1,
AXP809_IRQ_LOW_PWR_LVL2,
AXP809_IRQ_TIMER,
- AXP809_IRQ_PEK_RIS_EDGE,
+ /* out of bit order to make sure the press event is handled first */
AXP809_IRQ_PEK_FAL_EDGE,
+ AXP809_IRQ_PEK_RIS_EDGE,
AXP809_IRQ_PEK_SHORT,
AXP809_IRQ_PEK_LONG,
AXP809_IRQ_PEK_OVER_OFF,
diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h
index 0bc7cba798a3..14ca7b471576 100644
--- a/include/linux/mfd/core.h
+++ b/include/linux/mfd/core.h
@@ -88,7 +88,7 @@ struct mfd_cell {
const char *of_compatible;
/*
- * Address as defined in Device Tree. Used to compement 'of_compatible'
+ * Address as defined in Device Tree. Used to complement 'of_compatible'
* (above) when matching OF nodes with devices that have identical
* compatible strings
*/
diff --git a/include/linux/mfd/intel-m10-bmc.h b/include/linux/mfd/intel-m10-bmc.h
index f0044b14136e..1812ebfa11a8 100644
--- a/include/linux/mfd/intel-m10-bmc.h
+++ b/include/linux/mfd/intel-m10-bmc.h
@@ -7,40 +7,43 @@
#ifndef __MFD_INTEL_M10_BMC_H
#define __MFD_INTEL_M10_BMC_H
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/dev_printk.h>
#include <linux/regmap.h>
-#define M10BMC_LEGACY_BUILD_VER 0x300468
-#define M10BMC_SYS_BASE 0x300800
-#define M10BMC_SYS_END 0x300fff
-#define M10BMC_FLASH_BASE 0x10000000
-#define M10BMC_FLASH_END 0x1fffffff
-#define M10BMC_MEM_END M10BMC_FLASH_END
+#define M10BMC_N3000_LEGACY_BUILD_VER 0x300468
+#define M10BMC_N3000_SYS_BASE 0x300800
+#define M10BMC_N3000_SYS_END 0x300fff
+#define M10BMC_N3000_FLASH_BASE 0x10000000
+#define M10BMC_N3000_FLASH_END 0x1fffffff
+#define M10BMC_N3000_MEM_END M10BMC_N3000_FLASH_END
#define M10BMC_STAGING_BASE 0x18000000
#define M10BMC_STAGING_SIZE 0x3800000
/* Register offset of system registers */
-#define NIOS2_FW_VERSION 0x0
-#define M10BMC_MAC_LOW 0x10
-#define M10BMC_MAC_BYTE4 GENMASK(7, 0)
-#define M10BMC_MAC_BYTE3 GENMASK(15, 8)
-#define M10BMC_MAC_BYTE2 GENMASK(23, 16)
-#define M10BMC_MAC_BYTE1 GENMASK(31, 24)
-#define M10BMC_MAC_HIGH 0x14
-#define M10BMC_MAC_BYTE6 GENMASK(7, 0)
-#define M10BMC_MAC_BYTE5 GENMASK(15, 8)
-#define M10BMC_MAC_COUNT GENMASK(23, 16)
-#define M10BMC_TEST_REG 0x3c
-#define M10BMC_BUILD_VER 0x68
-#define M10BMC_VER_MAJOR_MSK GENMASK(23, 16)
-#define M10BMC_VER_PCB_INFO_MSK GENMASK(31, 24)
-#define M10BMC_VER_LEGACY_INVALID 0xffffffff
+#define NIOS2_N3000_FW_VERSION 0x0
+#define M10BMC_N3000_MAC_LOW 0x10
+#define M10BMC_N3000_MAC_BYTE4 GENMASK(7, 0)
+#define M10BMC_N3000_MAC_BYTE3 GENMASK(15, 8)
+#define M10BMC_N3000_MAC_BYTE2 GENMASK(23, 16)
+#define M10BMC_N3000_MAC_BYTE1 GENMASK(31, 24)
+#define M10BMC_N3000_MAC_HIGH 0x14
+#define M10BMC_N3000_MAC_BYTE6 GENMASK(7, 0)
+#define M10BMC_N3000_MAC_BYTE5 GENMASK(15, 8)
+#define M10BMC_N3000_MAC_COUNT GENMASK(23, 16)
+#define M10BMC_N3000_TEST_REG 0x3c
+#define M10BMC_N3000_BUILD_VER 0x68
+#define M10BMC_N3000_VER_MAJOR_MSK GENMASK(23, 16)
+#define M10BMC_N3000_VER_PCB_INFO_MSK GENMASK(31, 24)
+#define M10BMC_N3000_VER_LEGACY_INVALID 0xffffffff
/* Secure update doorbell register, in system register region */
-#define M10BMC_DOORBELL 0x400
+#define M10BMC_N3000_DOORBELL 0x400
/* Authorization Result register, in system register region */
-#define M10BMC_AUTH_RESULT 0x404
+#define M10BMC_N3000_AUTH_RESULT 0x404
/* Doorbell register fields */
#define DRBL_RSU_REQUEST BIT(0)
@@ -88,7 +91,6 @@
#define HOST_STATUS_ABORT_RSU 0x2
#define rsu_prog(doorbell) FIELD_GET(DRBL_RSU_PROGRESS, doorbell)
-#define rsu_stat(doorbell) FIELD_GET(DRBL_RSU_STATUS, doorbell)
/* interval 100ms and timeout 5s */
#define NIOS_HANDSHAKE_INTERVAL_US (100 * 1000)
@@ -103,29 +105,145 @@
#define RSU_COMPLETE_TIMEOUT_MS (40 * 60 * 1000)
/* Addresses for security related data in FLASH */
-#define BMC_REH_ADDR 0x17ffc004
-#define BMC_PROG_ADDR 0x17ffc000
-#define BMC_PROG_MAGIC 0x5746
+#define M10BMC_N3000_BMC_REH_ADDR 0x17ffc004
+#define M10BMC_N3000_BMC_PROG_ADDR 0x17ffc000
+#define M10BMC_N3000_BMC_PROG_MAGIC 0x5746
-#define SR_REH_ADDR 0x17ffd004
-#define SR_PROG_ADDR 0x17ffd000
-#define SR_PROG_MAGIC 0x5253
+#define M10BMC_N3000_SR_REH_ADDR 0x17ffd004
+#define M10BMC_N3000_SR_PROG_ADDR 0x17ffd000
+#define M10BMC_N3000_SR_PROG_MAGIC 0x5253
-#define PR_REH_ADDR 0x17ffe004
-#define PR_PROG_ADDR 0x17ffe000
-#define PR_PROG_MAGIC 0x5250
+#define M10BMC_N3000_PR_REH_ADDR 0x17ffe004
+#define M10BMC_N3000_PR_PROG_ADDR 0x17ffe000
+#define M10BMC_N3000_PR_PROG_MAGIC 0x5250
/* Address of 4KB inverted bit vector containing staging area FLASH count */
-#define STAGING_FLASH_COUNT 0x17ffb000
+#define M10BMC_N3000_STAGING_FLASH_COUNT 0x17ffb000
+
+#define M10BMC_N6000_INDIRECT_BASE 0x400
+
+#define M10BMC_N6000_SYS_BASE 0x0
+#define M10BMC_N6000_SYS_END 0xfff
+
+#define M10BMC_N6000_DOORBELL 0x1c0
+#define M10BMC_N6000_AUTH_RESULT 0x1c4
+#define AUTH_RESULT_RSU_STATUS GENMASK(23, 16)
+
+#define M10BMC_N6000_BUILD_VER 0x0
+#define NIOS2_N6000_FW_VERSION 0x4
+#define M10BMC_N6000_MAC_LOW 0x20
+#define M10BMC_N6000_MAC_HIGH (M10BMC_N6000_MAC_LOW + 4)
+
+/* Addresses for security related data in FLASH */
+#define M10BMC_N6000_BMC_REH_ADDR 0x7ffc004
+#define M10BMC_N6000_BMC_PROG_ADDR 0x7ffc000
+#define M10BMC_N6000_BMC_PROG_MAGIC 0x5746
+
+#define M10BMC_N6000_SR_REH_ADDR 0x7ffd004
+#define M10BMC_N6000_SR_PROG_ADDR 0x7ffd000
+#define M10BMC_N6000_SR_PROG_MAGIC 0x5253
+
+#define M10BMC_N6000_PR_REH_ADDR 0x7ffe004
+#define M10BMC_N6000_PR_PROG_ADDR 0x7ffe000
+#define M10BMC_N6000_PR_PROG_MAGIC 0x5250
+
+#define M10BMC_N6000_STAGING_FLASH_COUNT 0x7ff5000
+
+#define M10BMC_N6000_FLASH_MUX_CTRL 0x1d0
+#define M10BMC_N6000_FLASH_MUX_SELECTION GENMASK(2, 0)
+#define M10BMC_N6000_FLASH_MUX_IDLE 0
+#define M10BMC_N6000_FLASH_MUX_NIOS 1
+#define M10BMC_N6000_FLASH_MUX_HOST 2
+#define M10BMC_N6000_FLASH_MUX_PFL 4
+#define get_flash_mux(mux) FIELD_GET(M10BMC_N6000_FLASH_MUX_SELECTION, mux)
+
+#define M10BMC_N6000_FLASH_NIOS_REQUEST BIT(4)
+#define M10BMC_N6000_FLASH_HOST_REQUEST BIT(5)
+
+#define M10BMC_N6000_FLASH_CTRL 0x40
+#define M10BMC_N6000_FLASH_WR_MODE BIT(0)
+#define M10BMC_N6000_FLASH_RD_MODE BIT(1)
+#define M10BMC_N6000_FLASH_BUSY BIT(2)
+#define M10BMC_N6000_FLASH_FIFO_SPACE GENMASK(13, 4)
+#define M10BMC_N6000_FLASH_READ_COUNT GENMASK(25, 16)
+
+#define M10BMC_N6000_FLASH_ADDR 0x44
+#define M10BMC_N6000_FLASH_FIFO 0x800
+#define M10BMC_N6000_READ_BLOCK_SIZE 0x800
+#define M10BMC_N6000_FIFO_MAX_BYTES 0x800
+#define M10BMC_N6000_FIFO_WORD_SIZE 4
+#define M10BMC_N6000_FIFO_MAX_WORDS (M10BMC_N6000_FIFO_MAX_BYTES / \
+ M10BMC_N6000_FIFO_WORD_SIZE)
+
+#define M10BMC_FLASH_INT_US 1
+#define M10BMC_FLASH_TIMEOUT_US 10000
+
+/**
+ * struct m10bmc_csr_map - Intel MAX 10 BMC CSR register map
+ */
+struct m10bmc_csr_map {
+ unsigned int base;
+ unsigned int build_version;
+ unsigned int fw_version;
+ unsigned int mac_low;
+ unsigned int mac_high;
+ unsigned int doorbell;
+ unsigned int auth_result;
+ unsigned int bmc_prog_addr;
+ unsigned int bmc_reh_addr;
+ unsigned int bmc_magic;
+ unsigned int sr_prog_addr;
+ unsigned int sr_reh_addr;
+ unsigned int sr_magic;
+ unsigned int pr_prog_addr;
+ unsigned int pr_reh_addr;
+ unsigned int pr_magic;
+ unsigned int rsu_update_counter;
+};
+
+/**
+ * struct intel_m10bmc_platform_info - Intel MAX 10 BMC platform specific information
+ * @cells: MFD cells
+ * @n_cells: MFD cells ARRAY_SIZE()
+ * @csr_map: the mappings for register definition of MAX10 BMC
+ */
+struct intel_m10bmc_platform_info {
+ struct mfd_cell *cells;
+ int n_cells;
+ const struct m10bmc_csr_map *csr_map;
+};
+
+struct intel_m10bmc;
+
+/**
+ * struct intel_m10bmc_flash_bulk_ops - device specific operations for flash R/W
+ * @read: read a block of data from flash
+ * @write: write a block of data to flash
+ * @lock_write: locks flash access for erase+write
+ * @unlock_write: unlock flash access
+ *
+ * Write must be protected with @lock_write and @unlock_write. While the flash
+ * is locked, @read returns -EBUSY.
+ */
+struct intel_m10bmc_flash_bulk_ops {
+ int (*read)(struct intel_m10bmc *m10bmc, u8 *buf, u32 addr, u32 size);
+ int (*write)(struct intel_m10bmc *m10bmc, const u8 *buf, u32 offset, u32 size);
+ int (*lock_write)(struct intel_m10bmc *m10bmc);
+ void (*unlock_write)(struct intel_m10bmc *m10bmc);
+};
/**
* struct intel_m10bmc - Intel MAX 10 BMC parent driver data structure
* @dev: this device
* @regmap: the regmap used to access registers by m10bmc itself
+ * @info: the platform information for MAX10 BMC
+ * @flash_bulk_ops: optional device specific operations for flash R/W
*/
struct intel_m10bmc {
struct device *dev;
struct regmap *regmap;
+ const struct intel_m10bmc_platform_info *info;
+ const struct intel_m10bmc_flash_bulk_ops *flash_bulk_ops;
};
/*
@@ -152,11 +270,22 @@ m10bmc_raw_read(struct intel_m10bmc *m10bmc, unsigned int addr,
* The base of the system registers could be configured by HW developers, and
* in HW SPEC, the base is not added to the addresses of the system registers.
*
- * This macro helps to simplify the accessing of the system registers. And if
+ * This function helps to simplify the accessing of the system registers. And if
* the base is reconfigured in HW, SW developers could simply change the
- * M10BMC_SYS_BASE accordingly.
+ * csr_map's base accordingly.
+ */
+static inline int m10bmc_sys_read(struct intel_m10bmc *m10bmc, unsigned int offset,
+ unsigned int *val)
+{
+ const struct m10bmc_csr_map *csr_map = m10bmc->info->csr_map;
+
+ return m10bmc_raw_read(m10bmc, csr_map->base + offset, val);
+}
+
+/*
+ * MAX10 BMC Core support
*/
-#define m10bmc_sys_read(m10bmc, offset, val) \
- m10bmc_raw_read(m10bmc, M10BMC_SYS_BASE + (offset), val)
+int m10bmc_dev_init(struct intel_m10bmc *m10bmc, const struct intel_m10bmc_platform_info *info);
+extern const struct attribute_group *m10bmc_dev_groups[];
#endif /* __MFD_INTEL_M10_BMC_H */
diff --git a/include/linux/mfd/intel_soc_pmic.h b/include/linux/mfd/intel_soc_pmic.h
index 945bde1fe55c..9ba2c1a8d836 100644
--- a/include/linux/mfd/intel_soc_pmic.h
+++ b/include/linux/mfd/intel_soc_pmic.h
@@ -18,6 +18,7 @@ enum intel_cht_wc_models {
INTEL_CHT_WC_GPD_WIN_POCKET,
INTEL_CHT_WC_XIAOMI_MIPAD2,
INTEL_CHT_WC_LENOVO_YOGABOOK1,
+ INTEL_CHT_WC_LENOVO_YT3_X90,
};
/**
diff --git a/include/linux/mfd/ntxec.h b/include/linux/mfd/ntxec.h
index cc6f07bfa2b3..e5880c346da9 100644
--- a/include/linux/mfd/ntxec.h
+++ b/include/linux/mfd/ntxec.h
@@ -34,5 +34,5 @@ static inline u16 ntxec_reg8(u8 value)
/* Known firmware versions */
#define NTXEC_VERSION_KOBO_AURA 0xd726 /* found in Kobo Aura */
#define NTXEC_VERSION_TOLINO_SHINE2 0xf110 /* found in Tolino Shine 2 HD */
-
+#define NTXEC_VERSION_TOLINO_VISION 0xe135 /* found in Tolino Vision, contains RTC, ADC, PWM, home pad */
#endif
diff --git a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
index d4b5e527a7a3..09c6b3184bb0 100644
--- a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
+++ b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
@@ -451,8 +451,10 @@
#define IMX6SX_GPR12_PCIE_RX_EQ_2 (0x2 << 0)
/* For imx6ul iomux gpr register field define */
-#define IMX6UL_GPR1_ENET1_CLK_DIR (0x1 << 17)
-#define IMX6UL_GPR1_ENET2_CLK_DIR (0x1 << 18)
+#define IMX6UL_GPR1_ENET2_TX_CLK_DIR BIT(18)
+#define IMX6UL_GPR1_ENET1_TX_CLK_DIR BIT(17)
+#define IMX6UL_GPR1_ENET2_CLK_SEL BIT(14)
+#define IMX6UL_GPR1_ENET1_CLK_SEL BIT(13)
#define IMX6UL_GPR1_ENET1_CLK_OUTPUT (0x1 << 17)
#define IMX6UL_GPR1_ENET2_CLK_OUTPUT (0x1 << 18)
#define IMX6UL_GPR1_ENET_CLK_DIR (0x3 << 17)
diff --git a/include/linux/mfd/twl.h b/include/linux/mfd/twl.h
index eaa233038254..6e3d99b7a0ee 100644
--- a/include/linux/mfd/twl.h
+++ b/include/linux/mfd/twl.h
@@ -69,6 +69,8 @@ enum twl6030_module_ids {
TWL6030_MODULE_GPADC,
TWL6030_MODULE_GASGAUGE,
+ /* A few extra registers before the registers shared with the 6030 */
+ TWL6032_MODULE_CHARGE,
TWL6030_MODULE_LAST,
};
diff --git a/include/linux/mhi_ep.h b/include/linux/mhi_ep.h
index 478aece17046..f198a8ac7ee7 100644
--- a/include/linux/mhi_ep.h
+++ b/include/linux/mhi_ep.h
@@ -70,8 +70,8 @@ struct mhi_ep_db_info {
* @cmd_ctx_cache_phys: Physical address of the host command context cache
* @chdb: Array of channel doorbell interrupt info
* @event_lock: Lock for protecting event rings
- * @list_lock: Lock for protecting state transition and channel doorbell lists
* @state_lock: Lock for protecting state transitions
+ * @list_lock: Lock for protecting state transition and channel doorbell lists
* @st_transition_list: List of state transitions
* @ch_db_list: List of queued channel doorbells
* @wq: Dedicated workqueue for handling rings and state changes
@@ -117,8 +117,8 @@ struct mhi_ep_cntrl {
struct mhi_ep_db_info chdb[4];
struct mutex event_lock;
+ struct mutex state_lock;
spinlock_t list_lock;
- spinlock_t state_lock;
struct list_head st_transition_list;
struct list_head ch_db_list;
diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h
index 1f7c33b2f5a3..8bef1ab62bba 100644
--- a/include/linux/micrel_phy.h
+++ b/include/linux/micrel_phy.h
@@ -8,6 +8,8 @@
#ifndef _MICREL_PHY_H
#define _MICREL_PHY_H
+#define MICREL_OUI 0x0885
+
#define MICREL_PHY_ID_MASK 0x00fffff0
#define PHY_ID_KSZ8873MLL 0x000e7237
@@ -29,6 +31,7 @@
#define PHY_ID_KSZ9131 0x00221640
#define PHY_ID_LAN8814 0x00221660
#define PHY_ID_LAN8804 0x00221670
+#define PHY_ID_LAN8841 0x00221650
#define PHY_ID_KSZ886X 0x00221430
#define PHY_ID_KSZ8863 0x00221435
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 3ef77f52a4f0..6241a1596a75 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -18,6 +18,7 @@ struct migration_target_control;
* - zero on page migration success;
*/
#define MIGRATEPAGE_SUCCESS 0
+#define MIGRATEPAGE_UNMAP 1
/**
* struct movable_operations - Driver page migration
@@ -61,16 +62,16 @@ extern const char *migrate_reason_names[MR_TYPES];
#ifdef CONFIG_MIGRATION
-extern void putback_movable_pages(struct list_head *l);
+void putback_movable_pages(struct list_head *l);
int migrate_folio_extra(struct address_space *mapping, struct folio *dst,
struct folio *src, enum migrate_mode mode, int extra_count);
int migrate_folio(struct address_space *mapping, struct folio *dst,
struct folio *src, enum migrate_mode mode);
-extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
- unsigned long private, enum migrate_mode mode, int reason,
- unsigned int *ret_succeeded);
-extern struct page *alloc_migration_target(struct page *page, unsigned long private);
-extern int isolate_movable_page(struct page *page, isolate_mode_t mode);
+int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
+ unsigned long private, enum migrate_mode mode, int reason,
+ unsigned int *ret_succeeded);
+struct page *alloc_migration_target(struct page *page, unsigned long private);
+bool isolate_movable_page(struct page *page, isolate_mode_t mode);
int migrate_huge_page_move_mapping(struct address_space *mapping,
struct folio *dst, struct folio *src);
@@ -91,8 +92,8 @@ static inline int migrate_pages(struct list_head *l, new_page_t new,
static inline struct page *alloc_migration_target(struct page *page,
unsigned long private)
{ return NULL; }
-static inline int isolate_movable_page(struct page *page, isolate_mode_t mode)
- { return -EBUSY; }
+static inline bool isolate_movable_page(struct page *page, isolate_mode_t mode)
+ { return false; }
static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
struct folio *dst, struct folio *src)
@@ -123,6 +124,15 @@ static inline bool folio_test_movable(struct folio *folio)
}
static inline
+const struct movable_operations *folio_movable_ops(struct folio *folio)
+{
+ VM_BUG_ON(!__folio_test_movable(folio));
+
+ return (const struct movable_operations *)
+ ((unsigned long)folio->mapping - PAGE_MAPPING_MOVABLE);
+}
+
+static inline
const struct movable_operations *page_movable_ops(struct page *page)
{
VM_BUG_ON(!__PageMovable(page));
@@ -132,8 +142,8 @@ const struct movable_operations *page_movable_ops(struct page *page)
}
#ifdef CONFIG_NUMA_BALANCING
-extern int migrate_misplaced_page(struct page *page,
- struct vm_area_struct *vma, int node);
+int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
+ int node);
#else
static inline int migrate_misplaced_page(struct page *page,
struct vm_area_struct *vma, int node)
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index 9db93e487496..b9a7b1319f5d 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -446,6 +446,7 @@ enum {
struct mlx4_wqe_inline_seg {
__be32 byte_count;
+ __u8 data[];
};
enum mlx4_update_qp_attr {
@@ -503,4 +504,5 @@ static inline u16 folded_qp(u32 q)
u16 mlx4_qp_roce_entropy(struct mlx4_dev *dev, u32 qpn);
+void mlx4_put_qp(struct mlx4_qp *qp);
#endif /* MLX4_QP_H */
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 29d4b201c7b2..71b06ebad402 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -362,11 +362,13 @@ enum mlx5_event {
enum mlx5_driver_event {
MLX5_DRIVER_EVENT_TYPE_TRAP = 0,
+ MLX5_DRIVER_EVENT_UPLINK_NETDEV,
};
enum {
MLX5_TRACER_SUBTYPE_OWNERSHIP_CHANGE = 0x0,
MLX5_TRACER_SUBTYPE_TRACES_AVAILABLE = 0x1,
+ MLX5_TRACER_SUBTYPE_STRINGS_DB_UPDATE = 0x2,
};
enum {
@@ -1204,6 +1206,7 @@ enum mlx5_cap_type {
MLX5_CAP_VDPA_EMULATION = 0x13,
MLX5_CAP_DEV_EVENT = 0x14,
MLX5_CAP_IPSEC,
+ MLX5_CAP_CRYPTO = 0x1a,
MLX5_CAP_DEV_SHAMPO = 0x1d,
MLX5_CAP_MACSEC = 0x1f,
MLX5_CAP_GENERAL_2 = 0x20,
@@ -1460,6 +1463,9 @@ enum mlx5_qcam_feature_groups {
#define MLX5_CAP_IPSEC(mdev, cap)\
MLX5_GET(ipsec_cap, (mdev)->caps.hca[MLX5_CAP_IPSEC]->cur, cap)
+#define MLX5_CAP_CRYPTO(mdev, cap)\
+ MLX5_GET(crypto_cap, (mdev)->caps.hca[MLX5_CAP_CRYPTO]->cur, cap)
+
#define MLX5_CAP_DEV_SHAMPO(mdev, cap)\
MLX5_GET(shampo_cap, mdev->caps.hca_cur[MLX5_CAP_DEV_SHAMPO], cap)
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 333c1fec72f8..f33389b42209 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -49,6 +49,7 @@
#include <linux/notifier.h>
#include <linux/refcount.h>
#include <linux/auxiliary_bus.h>
+#include <linux/mutex.h>
#include <linux/mlx5/device.h>
#include <linux/mlx5/doorbell.h>
@@ -100,6 +101,8 @@ enum {
};
enum {
+ MLX5_REG_SBPR = 0xb001,
+ MLX5_REG_SBCM = 0xb002,
MLX5_REG_QPTS = 0x4002,
MLX5_REG_QETCR = 0x4005,
MLX5_REG_QTCT = 0x400a,
@@ -214,6 +217,7 @@ struct mlx5_rsc_debug {
enum mlx5_dev_event {
MLX5_DEV_EVENT_SYS_ERROR = 128, /* 0 - 127 are FW events */
MLX5_DEV_EVENT_PORT_AFFINITY = 129,
+ MLX5_DEV_EVENT_MULTIPORT_ESW = 130,
};
enum mlx5_port_status {
@@ -308,6 +312,7 @@ struct mlx5_cmd {
struct workqueue_struct *wq;
struct semaphore sem;
struct semaphore pages_sem;
+ struct semaphore throttle_sem;
int mode;
u16 allowed_opcode;
struct mlx5_cmd_work_ent *ent_arr[MLX5_MAX_COMMANDS];
@@ -427,8 +432,6 @@ struct mlx5_core_health {
u8 synd;
u32 fatal_error;
u32 crdump_size;
- /* wq spinlock to synchronize draining */
- spinlock_t wq_lock;
struct workqueue_struct *wq;
unsigned long flags;
struct work_struct fatal_report_work;
@@ -513,6 +516,7 @@ struct mlx5_vhca_state_notifier;
struct mlx5_sf_dev_table;
struct mlx5_sf_hw_table;
struct mlx5_sf_table;
+struct mlx5_crypto_dek_priv;
struct mlx5_rate_limit {
u32 rate;
@@ -551,10 +555,6 @@ enum {
* creation/deletion on drivers rescan. Unset during device attach.
*/
MLX5_PRIV_FLAGS_DETACH = 1 << 2,
- /* Distinguish between mlx5e_probe/remove called by module init/cleanup
- * and called by other flows which can already hold devlink lock
- */
- MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW = 1 << 3,
};
struct mlx5_adev {
@@ -679,8 +679,9 @@ struct mlx5e_resources {
u32 mkey;
struct mlx5_sq_bfreg bfreg;
} hw_objs;
- struct devlink_port dl_port;
struct net_device *uplink_netdev;
+ struct mutex uplink_netdev_lock;
+ struct mlx5_crypto_dek_priv *dek_priv;
};
enum mlx5_sw_icm_type {
@@ -741,7 +742,6 @@ enum {
enum {
MKEY_CACHE_LAST_STD_ENTRY = 20,
- MLX5_IMR_MTT_CACHE_ENTRY,
MLX5_IMR_KSM_CACHE_ENTRY,
MAX_MKEY_CACHE_ENTRIES
};
@@ -1018,6 +1018,9 @@ int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size,
void *out, int out_size);
bool mlx5_cmd_is_down(struct mlx5_core_dev *dev);
+void mlx5_core_uplink_netdev_set(struct mlx5_core_dev *mdev, struct net_device *netdev);
+void mlx5_core_uplink_netdev_event_replay(struct mlx5_core_dev *mdev);
+
int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type);
void mlx5_health_cleanup(struct mlx5_core_dev *dev);
int mlx5_health_init(struct mlx5_core_dev *dev);
@@ -1157,6 +1160,7 @@ bool mlx5_lag_is_active(struct mlx5_core_dev *dev);
bool mlx5_lag_mode_is_hash(struct mlx5_core_dev *dev);
bool mlx5_lag_is_master(struct mlx5_core_dev *dev);
bool mlx5_lag_is_shared_fdb(struct mlx5_core_dev *dev);
+bool mlx5_lag_is_mpesw(struct mlx5_core_dev *dev);
struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev);
u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev,
struct net_device *slave);
@@ -1207,6 +1211,11 @@ static inline bool mlx5_core_is_vf(const struct mlx5_core_dev *dev)
return dev->coredev_type == MLX5_COREDEV_VF;
}
+static inline bool mlx5_core_is_management_pf(const struct mlx5_core_dev *dev)
+{
+ return MLX5_CAP_GEN(dev, num_ports) == 1 && !MLX5_CAP_GEN(dev, native_port_num);
+}
+
static inline bool mlx5_core_is_ecpf(const struct mlx5_core_dev *dev)
{
return dev->caps.embedded_cpu;
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index ba6958b49a8e..2cb404c7ea13 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -51,6 +51,7 @@ enum mlx5_flow_destination_type {
MLX5_FLOW_DESTINATION_TYPE_COUNTER,
MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM,
MLX5_FLOW_DESTINATION_TYPE_RANGE,
+ MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE,
};
enum {
@@ -102,6 +103,8 @@ enum mlx5_flow_namespace_type {
MLX5_FLOW_NAMESPACE_PORT_SEL,
MLX5_FLOW_NAMESPACE_RDMA_RX_COUNTERS,
MLX5_FLOW_NAMESPACE_RDMA_TX_COUNTERS,
+ MLX5_FLOW_NAMESPACE_RDMA_RX_IPSEC,
+ MLX5_FLOW_NAMESPACE_RDMA_TX_IPSEC,
};
enum {
@@ -296,6 +299,8 @@ void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter);
u64 mlx5_fc_query_lastuse(struct mlx5_fc *counter);
void mlx5_fc_query_cached(struct mlx5_fc *counter,
u64 *bytes, u64 *packets, u64 *lastuse);
+void mlx5_fc_query_cached_raw(struct mlx5_fc *counter,
+ u64 *bytes, u64 *packets, u64 *lastuse);
int mlx5_fc_query(struct mlx5_core_dev *dev, struct mlx5_fc *counter,
u64 *packets, u64 *bytes);
u32 mlx5_fc_id(struct mlx5_fc *counter);
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index a9ee7bc59c90..66d76e97a087 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -306,6 +306,7 @@ enum {
MLX5_CMD_OP_SYNC_STEERING = 0xb00,
MLX5_CMD_OP_QUERY_VHCA_STATE = 0xb0d,
MLX5_CMD_OP_MODIFY_VHCA_STATE = 0xb0e,
+ MLX5_CMD_OP_SYNC_CRYPTO = 0xb12,
MLX5_CMD_OP_MAX
};
@@ -315,6 +316,11 @@ enum {
MLX5_CMD_OP_GENERAL_END = 0xd00,
};
+enum {
+ MLX5_FT_NIC_RX_2_NIC_RX_RDMA = BIT(0),
+ MLX5_FT_NIC_TX_RDMA_2_NIC_TX = BIT(1),
+};
+
struct mlx5_ifc_flow_table_fields_supported_bits {
u8 outer_dmac[0x1];
u8 outer_smac[0x1];
@@ -1112,6 +1118,30 @@ struct mlx5_ifc_sync_steering_out_bits {
u8 reserved_at_40[0x40];
};
+struct mlx5_ifc_sync_crypto_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x20];
+
+ u8 reserved_at_60[0x10];
+ u8 crypto_type[0x10];
+
+ u8 reserved_at_80[0x80];
+};
+
+struct mlx5_ifc_sync_crypto_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
struct mlx5_ifc_device_mem_cap_bits {
u8 memic[0x1];
u8 reserved_at_1[0x1f];
@@ -1480,7 +1510,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 relaxed_ordering_write[0x1];
u8 relaxed_ordering_read[0x1];
u8 log_max_mkey[0x6];
- u8 reserved_at_f0[0x8];
+ u8 reserved_at_f0[0x6];
+ u8 terminate_scatter_list_mkey[0x1];
+ u8 repeated_mkey[0x1];
u8 dump_fill_mkey[0x1];
u8 reserved_at_f9[0x2];
u8 fast_teardown[0x1];
@@ -1496,7 +1528,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 null_mkey[0x1];
u8 log_max_klm_list_size[0x6];
- u8 reserved_at_120[0xa];
+ u8 reserved_at_120[0x2];
+ u8 qpc_extension[0x1];
+ u8 reserved_at_123[0x7];
u8 log_max_ra_req_dc[0x6];
u8 reserved_at_130[0x2];
u8 eth_wqe_too_small[0x1];
@@ -1662,7 +1696,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 log_bf_reg_size[0x5];
- u8 reserved_at_270[0x6];
+ u8 reserved_at_270[0x3];
+ u8 qp_error_syndrome[0x1];
+ u8 reserved_at_274[0x2];
u8 lag_dct[0x2];
u8 lag_tx_port_affinity[0x1];
u8 lag_native_fdb_selection[0x1];
@@ -1768,7 +1804,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 ats[0x1];
u8 reserved_at_462[0x1];
u8 log_max_uctx[0x5];
- u8 reserved_at_468[0x2];
+ u8 reserved_at_468[0x1];
+ u8 crypto[0x1];
u8 ipsec_offload[0x1];
u8 log_max_umem[0x5];
u8 max_num_eqs[0x10];
@@ -1899,7 +1936,8 @@ struct mlx5_ifc_cmd_hca_cap_2_bits {
u8 reserved_at_e0[0xc0];
- u8 reserved_at_1a0[0xb];
+ u8 flow_table_type_2_type[0x8];
+ u8 reserved_at_1a8[0x3];
u8 log_min_mkey_entity_size[0x5];
u8 reserved_at_1b0[0x10];
@@ -1923,6 +1961,7 @@ enum mlx5_ifc_flow_destination_type {
MLX5_IFC_FLOW_DESTINATION_TYPE_TIR = 0x2,
MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_SAMPLER = 0x6,
MLX5_IFC_FLOW_DESTINATION_TYPE_UPLINK = 0x8,
+ MLX5_IFC_FLOW_DESTINATION_TYPE_TABLE_TYPE = 0xA,
};
enum mlx5_flow_table_miss_action {
@@ -1937,7 +1976,8 @@ struct mlx5_ifc_dest_format_struct_bits {
u8 destination_eswitch_owner_vhca_id_valid[0x1];
u8 packet_reformat[0x1];
- u8 reserved_at_22[0xe];
+ u8 reserved_at_22[0x6];
+ u8 destination_table_type[0x8];
u8 destination_eswitch_owner_vhca_id[0x10];
};
@@ -2145,6 +2185,17 @@ struct mlx5_ifc_cong_control_r_roce_ecn_rp_bits {
u8 reserved_at_360[0x4a0];
};
+struct mlx5_ifc_cong_control_r_roce_general_bits {
+ u8 reserved_at_0[0x80];
+
+ u8 reserved_at_80[0x10];
+ u8 rtt_resp_dscp_valid[0x1];
+ u8 reserved_at_91[0x9];
+ u8 rtt_resp_dscp[0x6];
+
+ u8 reserved_at_a0[0x760];
+};
+
struct mlx5_ifc_cong_control_802_1qau_rp_bits {
u8 reserved_at_0[0x80];
@@ -3351,6 +3402,30 @@ struct mlx5_ifc_shampo_cap_bits {
u8 reserved_at_40[0x7c0];
};
+struct mlx5_ifc_crypto_cap_bits {
+ u8 reserved_at_0[0x3];
+ u8 synchronize_dek[0x1];
+ u8 int_kek_manual[0x1];
+ u8 int_kek_auto[0x1];
+ u8 reserved_at_6[0x1a];
+
+ u8 reserved_at_20[0x3];
+ u8 log_dek_max_alloc[0x5];
+ u8 reserved_at_28[0x3];
+ u8 log_max_num_deks[0x5];
+ u8 reserved_at_30[0x10];
+
+ u8 reserved_at_40[0x20];
+
+ u8 reserved_at_60[0x3];
+ u8 log_dek_granularity[0x5];
+ u8 reserved_at_68[0x3];
+ u8 log_max_num_int_kek[0x5];
+ u8 sw_wrapped_dek[0x10];
+
+ u8 reserved_at_80[0x780];
+};
+
union mlx5_ifc_hca_cap_union_bits {
struct mlx5_ifc_cmd_hca_cap_bits cmd_hca_cap;
struct mlx5_ifc_cmd_hca_cap_2_bits cmd_hca_cap_2;
@@ -3371,6 +3446,7 @@ union mlx5_ifc_hca_cap_union_bits {
struct mlx5_ifc_virtio_emulation_cap_bits virtio_emulation_cap;
struct mlx5_ifc_shampo_cap_bits shampo_cap;
struct mlx5_ifc_macsec_cap_bits macsec_cap;
+ struct mlx5_ifc_crypto_cap_bits crypto_cap;
u8 reserved_at_0[0x8000];
};
@@ -4290,6 +4366,7 @@ union mlx5_ifc_cong_control_roce_ecn_auto_bits {
struct mlx5_ifc_cong_control_802_1qau_rp_bits cong_control_802_1qau_rp;
struct mlx5_ifc_cong_control_r_roce_ecn_rp_bits cong_control_r_roce_ecn_rp;
struct mlx5_ifc_cong_control_r_roce_ecn_np_bits cong_control_r_roce_ecn_np;
+ struct mlx5_ifc_cong_control_r_roce_general_bits cong_control_r_roce_general;
u8 reserved_at_0[0x800];
};
@@ -5198,7 +5275,11 @@ struct mlx5_ifc_query_special_contexts_out_bits {
u8 null_mkey[0x20];
- u8 reserved_at_a0[0x60];
+ u8 terminate_scatter_list_mkey[0x20];
+
+ u8 repeated_mkey[0x20];
+
+ u8 reserved_at_a0[0x20];
};
struct mlx5_ifc_query_special_contexts_in_bits {
@@ -5342,6 +5423,37 @@ struct mlx5_ifc_query_rmp_in_bits {
u8 reserved_at_60[0x20];
};
+struct mlx5_ifc_cqe_error_syndrome_bits {
+ u8 hw_error_syndrome[0x8];
+ u8 hw_syndrome_type[0x4];
+ u8 reserved_at_c[0x4];
+ u8 vendor_error_syndrome[0x8];
+ u8 syndrome[0x8];
+};
+
+struct mlx5_ifc_qp_context_extension_bits {
+ u8 reserved_at_0[0x60];
+
+ struct mlx5_ifc_cqe_error_syndrome_bits error_syndrome;
+
+ u8 reserved_at_80[0x580];
+};
+
+struct mlx5_ifc_qpc_extension_and_pas_list_in_bits {
+ struct mlx5_ifc_qp_context_extension_bits qpc_data_extension;
+
+ u8 pas[0][0x40];
+};
+
+struct mlx5_ifc_qp_pas_list_in_bits {
+ struct mlx5_ifc_cmd_pas_bits pas[0];
+};
+
+union mlx5_ifc_qp_pas_or_qpc_ext_and_pas_bits {
+ struct mlx5_ifc_qp_pas_list_in_bits qp_pas_list;
+ struct mlx5_ifc_qpc_extension_and_pas_list_in_bits qpc_ext_and_pas_list;
+};
+
struct mlx5_ifc_query_qp_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
@@ -5358,7 +5470,7 @@ struct mlx5_ifc_query_qp_out_bits {
u8 reserved_at_800[0x80];
- u8 pas[][0x40];
+ union mlx5_ifc_qp_pas_or_qpc_ext_and_pas_bits qp_pas_or_qpc_ext_and_pas;
};
struct mlx5_ifc_query_qp_in_bits {
@@ -5368,7 +5480,8 @@ struct mlx5_ifc_query_qp_in_bits {
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_at_40[0x8];
+ u8 qpc_ext[0x1];
+ u8 reserved_at_41[0x7];
u8 qpn[0x18];
u8 reserved_at_60[0x20];
@@ -6196,6 +6309,18 @@ struct mlx5_ifc_match_definer_bits {
};
};
+struct mlx5_ifc_general_obj_create_param_bits {
+ u8 alias_object[0x1];
+ u8 reserved_at_1[0x2];
+ u8 log_obj_range[0x5];
+ u8 reserved_at_8[0x18];
+};
+
+struct mlx5_ifc_general_obj_query_param_bits {
+ u8 alias_object[0x1];
+ u8 obj_offset[0x1f];
+};
+
struct mlx5_ifc_general_obj_in_cmd_hdr_bits {
u8 opcode[0x10];
u8 uid[0x10];
@@ -6205,9 +6330,10 @@ struct mlx5_ifc_general_obj_in_cmd_hdr_bits {
u8 obj_id[0x20];
- u8 reserved_at_60[0x3];
- u8 log_obj_range[0x5];
- u8 reserved_at_68[0x18];
+ union {
+ struct mlx5_ifc_general_obj_create_param_bits create;
+ struct mlx5_ifc_general_obj_query_param_bits query;
+ } op_param;
};
struct mlx5_ifc_general_obj_out_cmd_hdr_bits {
@@ -8571,7 +8697,8 @@ struct mlx5_ifc_create_qp_in_bits {
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_at_40[0x8];
+ u8 qpc_ext[0x1];
+ u8 reserved_at_41[0x7];
u8 input_qpn[0x18];
u8 reserved_at_60[0x20];
@@ -9862,13 +9989,20 @@ struct mlx5_ifc_mpegc_reg_bits {
};
enum {
+ MLX5_MTUTC_FREQ_ADJ_UNITS_PPB = 0x0,
+ MLX5_MTUTC_FREQ_ADJ_UNITS_SCALED_PPM = 0x1,
+};
+
+enum {
MLX5_MTUTC_OPERATION_SET_TIME_IMMEDIATE = 0x1,
MLX5_MTUTC_OPERATION_ADJUST_TIME = 0x2,
MLX5_MTUTC_OPERATION_ADJUST_FREQ_UTC = 0x3,
};
struct mlx5_ifc_mtutc_reg_bits {
- u8 reserved_at_0[0x1c];
+ u8 reserved_at_0[0x5];
+ u8 freq_adj_units[0x3];
+ u8 reserved_at_8[0x14];
u8 operation[0x4];
u8 freq_adjustment[0x20];
@@ -9941,7 +10075,10 @@ struct mlx5_ifc_pcam_reg_bits {
};
struct mlx5_ifc_mcam_enhanced_features_bits {
- u8 reserved_at_0[0x5d];
+ u8 reserved_at_0[0x50];
+ u8 mtutc_freq_adj_units[0x1];
+ u8 mtutc_time_adjustment_extended_range[0x1];
+ u8 reserved_at_52[0xb];
u8 mcia_32dwords[0x1];
u8 out_pulse_duration_ns[0x1];
u8 npps_period[0x1];
@@ -11000,6 +11137,67 @@ struct mlx5_ifc_pbmc_reg_bits {
u8 reserved_at_2e0[0x80];
};
+struct mlx5_ifc_sbpr_reg_bits {
+ u8 desc[0x1];
+ u8 snap[0x1];
+ u8 reserved_at_2[0x4];
+ u8 dir[0x2];
+ u8 reserved_at_8[0x14];
+ u8 pool[0x4];
+
+ u8 infi_size[0x1];
+ u8 reserved_at_21[0x7];
+ u8 size[0x18];
+
+ u8 reserved_at_40[0x1c];
+ u8 mode[0x4];
+
+ u8 reserved_at_60[0x8];
+ u8 buff_occupancy[0x18];
+
+ u8 clr[0x1];
+ u8 reserved_at_81[0x7];
+ u8 max_buff_occupancy[0x18];
+
+ u8 reserved_at_a0[0x8];
+ u8 ext_buff_occupancy[0x18];
+};
+
+struct mlx5_ifc_sbcm_reg_bits {
+ u8 desc[0x1];
+ u8 snap[0x1];
+ u8 reserved_at_2[0x6];
+ u8 local_port[0x8];
+ u8 pnat[0x2];
+ u8 pg_buff[0x6];
+ u8 reserved_at_18[0x6];
+ u8 dir[0x2];
+
+ u8 reserved_at_20[0x1f];
+ u8 exc[0x1];
+
+ u8 reserved_at_40[0x40];
+
+ u8 reserved_at_80[0x8];
+ u8 buff_occupancy[0x18];
+
+ u8 clr[0x1];
+ u8 reserved_at_a1[0x7];
+ u8 max_buff_occupancy[0x18];
+
+ u8 reserved_at_c0[0x8];
+ u8 min_buff[0x18];
+
+ u8 infi_max[0x1];
+ u8 reserved_at_e1[0x7];
+ u8 max_buff[0x18];
+
+ u8 reserved_at_100[0x20];
+
+ u8 reserved_at_120[0x1c];
+ u8 pool[0x4];
+};
+
struct mlx5_ifc_qtct_reg_bits {
u8 reserved_at_0[0x8];
u8 port_number[0x8];
@@ -11639,6 +11837,7 @@ enum {
MLX5_GENERAL_OBJECT_TYPES_SAMPLER = 0x20,
MLX5_GENERAL_OBJECT_TYPES_FLOW_METER_ASO = 0x24,
MLX5_GENERAL_OBJECT_TYPES_MACSEC = 0x27,
+ MLX5_GENERAL_OBJECT_TYPES_INT_KEK = 0x47,
};
enum {
@@ -11818,21 +12017,62 @@ struct mlx5_ifc_query_macsec_obj_out_bits {
struct mlx5_ifc_macsec_offload_obj_bits macsec_object;
};
+struct mlx5_ifc_wrapped_dek_bits {
+ u8 gcm_iv[0x60];
+
+ u8 reserved_at_60[0x20];
+
+ u8 const0[0x1];
+ u8 key_size[0x1];
+ u8 reserved_at_82[0x2];
+ u8 key2_invalid[0x1];
+ u8 reserved_at_85[0x3];
+ u8 pd[0x18];
+
+ u8 key_purpose[0x5];
+ u8 reserved_at_a5[0x13];
+ u8 kek_id[0x8];
+
+ u8 reserved_at_c0[0x40];
+
+ u8 key1[0x8][0x20];
+
+ u8 key2[0x8][0x20];
+
+ u8 reserved_at_300[0x40];
+
+ u8 const1[0x1];
+ u8 reserved_at_341[0x1f];
+
+ u8 reserved_at_360[0x20];
+
+ u8 auth_tag[0x80];
+};
+
struct mlx5_ifc_encryption_key_obj_bits {
u8 modify_field_select[0x40];
- u8 reserved_at_40[0x14];
+ u8 state[0x8];
+ u8 sw_wrapped[0x1];
+ u8 reserved_at_49[0xb];
u8 key_size[0x4];
u8 reserved_at_58[0x4];
- u8 key_type[0x4];
+ u8 key_purpose[0x4];
u8 reserved_at_60[0x8];
u8 pd[0x18];
- u8 reserved_at_80[0x180];
- u8 key[8][0x20];
+ u8 reserved_at_80[0x100];
+
+ u8 opaque[0x40];
+
+ u8 reserved_at_1c0[0x40];
+
+ u8 key[8][0x80];
+
+ u8 sw_wrapped_dek[8][0x80];
- u8 reserved_at_300[0x500];
+ u8 reserved_at_a00[0x600];
};
struct mlx5_ifc_create_encryption_key_in_bits {
@@ -11840,6 +12080,11 @@ struct mlx5_ifc_create_encryption_key_in_bits {
struct mlx5_ifc_encryption_key_obj_bits encryption_key_object;
};
+struct mlx5_ifc_modify_encryption_key_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
+ struct mlx5_ifc_encryption_key_obj_bits encryption_key_object;
+};
+
enum {
MLX5_FLOW_METER_MODE_BYTES_IP_LENGTH = 0x0,
MLX5_FLOW_METER_MODE_BYTES_CALC_WITH_L2 = 0x1,
@@ -11895,6 +12140,34 @@ struct mlx5_ifc_create_flow_meter_aso_obj_in_bits {
struct mlx5_ifc_flow_meter_aso_obj_bits flow_meter_aso_obj;
};
+struct mlx5_ifc_int_kek_obj_bits {
+ u8 modify_field_select[0x40];
+
+ u8 state[0x8];
+ u8 auto_gen[0x1];
+ u8 reserved_at_49[0xb];
+ u8 key_size[0x4];
+ u8 reserved_at_58[0x8];
+
+ u8 reserved_at_60[0x8];
+ u8 pd[0x18];
+
+ u8 reserved_at_80[0x180];
+ u8 key[8][0x80];
+
+ u8 reserved_at_600[0x200];
+};
+
+struct mlx5_ifc_create_int_kek_obj_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
+ struct mlx5_ifc_int_kek_obj_bits int_kek_object;
+};
+
+struct mlx5_ifc_create_int_kek_obj_out_bits {
+ struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr;
+ struct mlx5_ifc_int_kek_obj_bits int_kek_object;
+};
+
struct mlx5_ifc_sampler_obj_bits {
u8 modify_field_select[0x40];
@@ -11933,9 +12206,9 @@ enum {
};
enum {
- MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_TLS = 0x1,
- MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_IPSEC = 0x2,
- MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_MACSEC = 0x4,
+ MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_PURPOSE_TLS = 0x1,
+ MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_PURPOSE_IPSEC = 0x2,
+ MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_PURPOSE_MACSEC = 0x4,
};
struct mlx5_ifc_tls_static_params_bits {
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
index 4657d5c54abe..df55fbb65717 100644
--- a/include/linux/mlx5/qp.h
+++ b/include/linux/mlx5/qp.h
@@ -36,7 +36,7 @@
#include <linux/mlx5/device.h>
#include <linux/mlx5/driver.h>
-#define MLX5_INVALID_LKEY 0x100
+#define MLX5_TERMINATE_SCATTER_LIST_LKEY cpu_to_be32(0x100)
/* UMR (3 WQE_BB's) + SIG (3 WQE_BB's) + PSV (mem) + PSV (wire) */
#define MLX5_SIG_WQE_SIZE (MLX5_SEND_WQE_BB * 8)
#define MLX5_DIF_SIZE 8
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 716d30d93616..1f79667824eb 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -282,7 +282,12 @@ extern unsigned int kobjsize(const void *objp);
#define VM_MAYSHARE 0x00000080
#define VM_GROWSDOWN 0x00000100 /* general info on the segment */
+#ifdef CONFIG_MMU
#define VM_UFFD_MISSING 0x00000200 /* missing pages tracking */
+#else /* CONFIG_MMU */
+#define VM_MAYOVERLAY 0x00000200 /* nommu: R/O MAP_PRIVATE mapping that might overlay a file mapping */
+#define VM_UFFD_MISSING 0
+#endif /* CONFIG_MMU */
#define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */
#define VM_UFFD_WP 0x00001000 /* wrprotect pages tracking */
@@ -422,8 +427,8 @@ extern unsigned int kobjsize(const void *objp);
/* This mask defines which mm->def_flags a process can inherit its parent */
#define VM_INIT_DEF_MASK VM_NOHUGEPAGE
-/* This mask is used to clear all the VMA flags used by mlock */
-#define VM_LOCKED_CLEAR_MASK (~(VM_LOCKED | VM_LOCKONFAULT))
+/* This mask represents all the VMA flag bits used by mlock */
+#define VM_LOCKED_MASK (VM_LOCKED | VM_LOCKONFAULT)
/* Arch-specific flags to clear when updating VM flags on protection change */
#ifndef VM_ARCH_CLEAR
@@ -628,6 +633,63 @@ static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
INIT_LIST_HEAD(&vma->anon_vma_chain);
}
+/* Use when VMA is not part of the VMA tree and needs no locking */
+static inline void vm_flags_init(struct vm_area_struct *vma,
+ vm_flags_t flags)
+{
+ ACCESS_PRIVATE(vma, __vm_flags) = flags;
+}
+
+/* Use when VMA is part of the VMA tree and modifications need coordination */
+static inline void vm_flags_reset(struct vm_area_struct *vma,
+ vm_flags_t flags)
+{
+ mmap_assert_write_locked(vma->vm_mm);
+ vm_flags_init(vma, flags);
+}
+
+static inline void vm_flags_reset_once(struct vm_area_struct *vma,
+ vm_flags_t flags)
+{
+ mmap_assert_write_locked(vma->vm_mm);
+ WRITE_ONCE(ACCESS_PRIVATE(vma, __vm_flags), flags);
+}
+
+static inline void vm_flags_set(struct vm_area_struct *vma,
+ vm_flags_t flags)
+{
+ mmap_assert_write_locked(vma->vm_mm);
+ ACCESS_PRIVATE(vma, __vm_flags) |= flags;
+}
+
+static inline void vm_flags_clear(struct vm_area_struct *vma,
+ vm_flags_t flags)
+{
+ mmap_assert_write_locked(vma->vm_mm);
+ ACCESS_PRIVATE(vma, __vm_flags) &= ~flags;
+}
+
+/*
+ * Use only if VMA is not part of the VMA tree or has no other users and
+ * therefore needs no locking.
+ */
+static inline void __vm_flags_mod(struct vm_area_struct *vma,
+ vm_flags_t set, vm_flags_t clear)
+{
+ vm_flags_init(vma, (vma->vm_flags | set) & ~clear);
+}
+
+/*
+ * Use only when the order of set/clear operations is unimportant, otherwise
+ * use vm_flags_{set|clear} explicitly.
+ */
+static inline void vm_flags_mod(struct vm_area_struct *vma,
+ vm_flags_t set, vm_flags_t clear)
+{
+ mmap_assert_write_locked(vma->vm_mm);
+ __vm_flags_mod(vma, set, clear);
+}
+
static inline void vma_set_anonymous(struct vm_area_struct *vma)
{
vma->vm_ops = NULL;
@@ -671,16 +733,16 @@ static inline bool vma_is_accessible(struct vm_area_struct *vma)
static inline
struct vm_area_struct *vma_find(struct vma_iterator *vmi, unsigned long max)
{
- return mas_find(&vmi->mas, max);
+ return mas_find(&vmi->mas, max - 1);
}
static inline struct vm_area_struct *vma_next(struct vma_iterator *vmi)
{
/*
- * Uses vma_find() to get the first VMA when the iterator starts.
+ * Uses mas_find() to get the first VMA when the iterator starts.
* Calling mas_next() could skip the first entry.
*/
- return vma_find(vmi, ULONG_MAX);
+ return mas_find(&vmi->mas, ULONG_MAX);
}
static inline struct vm_area_struct *vma_prev(struct vma_iterator *vmi)
@@ -693,12 +755,50 @@ static inline unsigned long vma_iter_addr(struct vma_iterator *vmi)
return vmi->mas.index;
}
+static inline unsigned long vma_iter_end(struct vma_iterator *vmi)
+{
+ return vmi->mas.last + 1;
+}
+static inline int vma_iter_bulk_alloc(struct vma_iterator *vmi,
+ unsigned long count)
+{
+ return mas_expected_entries(&vmi->mas, count);
+}
+
+/* Free any unused preallocations */
+static inline void vma_iter_free(struct vma_iterator *vmi)
+{
+ mas_destroy(&vmi->mas);
+}
+
+static inline int vma_iter_bulk_store(struct vma_iterator *vmi,
+ struct vm_area_struct *vma)
+{
+ vmi->mas.index = vma->vm_start;
+ vmi->mas.last = vma->vm_end - 1;
+ mas_store(&vmi->mas, vma);
+ if (unlikely(mas_is_err(&vmi->mas)))
+ return -ENOMEM;
+
+ return 0;
+}
+
+static inline void vma_iter_invalidate(struct vma_iterator *vmi)
+{
+ mas_pause(&vmi->mas);
+}
+
+static inline void vma_iter_set(struct vma_iterator *vmi, unsigned long addr)
+{
+ mas_set(&vmi->mas, addr);
+}
+
#define for_each_vma(__vmi, __vma) \
while (((__vma) = vma_next(&(__vmi))) != NULL)
/* The MM code likes to work with exclusive end addresses */
#define for_each_vma_range(__vmi, __vma, __end) \
- while (((__vma) = vma_find(&(__vmi), (__end) - 1)) != NULL)
+ while (((__vma) = vma_find(&(__vmi), (__end))) != NULL)
#ifdef CONFIG_SHMEM
/*
@@ -720,11 +820,20 @@ int vma_is_stack_for_current(struct vm_area_struct *vma);
struct mmu_gather;
struct inode;
+/*
+ * compound_order() can be called without holding a reference, which means
+ * that niceties like page_folio() don't work. These callers should be
+ * prepared to handle wild return values. For example, PG_head may be
+ * set before _folio_order is initialised, or this may be a tail page.
+ * See compaction.c for some good examples.
+ */
static inline unsigned int compound_order(struct page *page)
{
- if (!PageHead(page))
+ struct folio *folio = (struct folio *)page;
+
+ if (!test_bit(PG_head, &folio->flags))
return 0;
- return page[1].compound_order;
+ return folio->_folio_order;
}
/**
@@ -783,6 +892,13 @@ static inline bool get_page_unless_zero(struct page *page)
return page_ref_add_unless(page, 1, 0);
}
+static inline struct folio *folio_get_nontail_page(struct page *page)
+{
+ if (unlikely(!get_page_unless_zero(page)))
+ return NULL;
+ return (struct folio *)page;
+}
+
extern int page_is_ram(unsigned long pfn);
enum {
@@ -832,34 +948,7 @@ static inline int is_vmalloc_or_module_addr(const void *x)
static inline int folio_entire_mapcount(struct folio *folio)
{
VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
- return atomic_read(folio_mapcount_ptr(folio)) + 1;
-}
-
-/*
- * Mapcount of compound page as a whole, does not include mapped sub-pages.
- * Must be called only on head of compound page.
- */
-static inline int head_compound_mapcount(struct page *head)
-{
- return atomic_read(compound_mapcount_ptr(head)) + 1;
-}
-
-/*
- * If a 16GB hugetlb page were mapped by PTEs of all of its 4kB sub-pages,
- * its subpages_mapcount would be 0x400000: choose the COMPOUND_MAPPED bit
- * above that range, instead of 2*(PMD_SIZE/PAGE_SIZE). Hugetlb currently
- * leaves subpages_mapcount at 0, but avoid surprise if it participates later.
- */
-#define COMPOUND_MAPPED 0x800000
-#define SUBPAGES_MAPPED (COMPOUND_MAPPED - 1)
-
-/*
- * Number of sub-pages mapped by PTE, does not include compound mapcount.
- * Must be called only on head of compound page.
- */
-static inline int head_subpages_mapcount(struct page *head)
-{
- return atomic_read(subpages_mapcount_ptr(head)) & SUBPAGES_MAPPED;
+ return atomic_read(&folio->_entire_mapcount) + 1;
}
/*
@@ -872,25 +961,29 @@ static inline void page_mapcount_reset(struct page *page)
atomic_set(&(page)->_mapcount, -1);
}
-/*
- * Mapcount of 0-order page; when compound sub-page, includes
- * compound_mapcount of compound_head of page.
+/**
+ * page_mapcount() - Number of times this precise page is mapped.
+ * @page: The page.
+ *
+ * The number of times this page is mapped. If this page is part of
+ * a large folio, it includes the number of times this page is mapped
+ * as part of that folio.
*
- * Result is undefined for pages which cannot be mapped into userspace.
+ * The result is undefined for pages which cannot be mapped into userspace.
* For example SLAB or special types of pages. See function page_has_type().
- * They use this place in struct page differently.
+ * They use this field in struct page differently.
*/
static inline int page_mapcount(struct page *page)
{
int mapcount = atomic_read(&page->_mapcount) + 1;
- if (likely(!PageCompound(page)))
- return mapcount;
- page = compound_head(page);
- return head_compound_mapcount(page) + mapcount;
+ if (unlikely(PageCompound(page)))
+ mapcount += folio_entire_mapcount(page_folio(page));
+
+ return mapcount;
}
-int total_compound_mapcount(struct page *head);
+int folio_total_mapcount(struct folio *folio);
/**
* folio_mapcount() - Calculate the number of mappings of this folio.
@@ -907,24 +1000,24 @@ static inline int folio_mapcount(struct folio *folio)
{
if (likely(!folio_test_large(folio)))
return atomic_read(&folio->_mapcount) + 1;
- return total_compound_mapcount(&folio->page);
+ return folio_total_mapcount(folio);
}
static inline int total_mapcount(struct page *page)
{
if (likely(!PageCompound(page)))
return atomic_read(&page->_mapcount) + 1;
- return total_compound_mapcount(compound_head(page));
+ return folio_total_mapcount(page_folio(page));
}
static inline bool folio_large_is_mapped(struct folio *folio)
{
/*
- * Reading folio_mapcount_ptr() below could be omitted if hugetlb
- * participated in incrementing subpages_mapcount when compound mapped.
+ * Reading _entire_mapcount below could be omitted if hugetlb
+ * participated in incrementing nr_pages_mapped when compound mapped.
*/
- return atomic_read(folio_subpages_mapcount_ptr(folio)) > 0 ||
- atomic_read(folio_mapcount_ptr(folio)) >= 0;
+ return atomic_read(&folio->_nr_pages_mapped) > 0 ||
+ atomic_read(&folio->_entire_mapcount) >= 0;
}
/**
@@ -999,8 +1092,11 @@ extern compound_page_dtor * const compound_page_dtors[NR_COMPOUND_DTORS];
static inline void set_compound_page_dtor(struct page *page,
enum compound_dtor_id compound_dtor)
{
+ struct folio *folio = (struct folio *)page;
+
VM_BUG_ON_PAGE(compound_dtor >= NR_COMPOUND_DTORS, page);
- page[1].compound_dtor = compound_dtor;
+ VM_BUG_ON_PAGE(!PageHead(page), page);
+ folio->_folio_dtor = compound_dtor;
}
static inline void folio_set_compound_dtor(struct folio *folio,
@@ -1012,44 +1108,13 @@ static inline void folio_set_compound_dtor(struct folio *folio,
void destroy_large_folio(struct folio *folio);
-static inline int head_compound_pincount(struct page *head)
-{
- return atomic_read(compound_pincount_ptr(head));
-}
-
static inline void set_compound_order(struct page *page, unsigned int order)
{
- page[1].compound_order = order;
-#ifdef CONFIG_64BIT
- page[1].compound_nr = 1U << order;
-#endif
-}
-
-/*
- * folio_set_compound_order is generally passed a non-zero order to
- * initialize a large folio. However, hugetlb code abuses this by
- * passing in zero when 'dissolving' a large folio.
- */
-static inline void folio_set_compound_order(struct folio *folio,
- unsigned int order)
-{
- VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
+ struct folio *folio = (struct folio *)page;
folio->_folio_order = order;
#ifdef CONFIG_64BIT
- folio->_folio_nr_pages = order ? 1U << order : 0;
-#endif
-}
-
-/* Returns the number of pages in this potentially compound page. */
-static inline unsigned long compound_nr(struct page *page)
-{
- if (!PageHead(page))
- return 1;
-#ifdef CONFIG_64BIT
- return page[1].compound_nr;
-#else
- return 1UL << compound_order(page);
+ folio->_folio_nr_pages = 1U << order;
#endif
}
@@ -1076,16 +1141,6 @@ static inline unsigned int thp_order(struct page *page)
}
/**
- * thp_nr_pages - The number of regular pages in this huge page.
- * @page: The head page of a huge page.
- */
-static inline int thp_nr_pages(struct page *page)
-{
- VM_BUG_ON_PGFLAGS(PageTail(page), page);
- return compound_nr(page);
-}
-
-/**
* thp_size - Size of a transparent huge page.
* @page: Head page of a transparent huge page.
*
@@ -1226,8 +1281,6 @@ static inline void get_page(struct page *page)
folio_get(page_folio(page));
}
-int __must_check try_grab_page(struct page *page, unsigned int flags);
-
static inline __must_check bool try_get_page(struct page *page)
{
page = compound_head(page);
@@ -1369,6 +1422,21 @@ static inline bool is_cow_mapping(vm_flags_t flags)
return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
}
+#ifndef CONFIG_MMU
+static inline bool is_nommu_shared_mapping(vm_flags_t flags)
+{
+ /*
+ * NOMMU shared mappings are ordinary MAP_SHARED mappings and selected
+ * R/O MAP_PRIVATE file mappings that are an effective R/O overlay of
+ * a file mapping. R/O MAP_PRIVATE mappings might still modify
+ * underlying memory if ptrace is active, so this is only possible if
+ * ptrace does not apply. Note that there is no mprotect() to upgrade
+ * write permissions later.
+ */
+ return flags & (VM_MAYSHARE | VM_MAYOVERLAY);
+}
+#endif
+
#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
#define SECTION_IN_PAGE_FLAGS
#endif
@@ -1643,11 +1711,6 @@ static inline struct folio *pfn_folio(unsigned long pfn)
return page_folio(pfn_to_page(pfn));
}
-static inline atomic_t *folio_pincount_ptr(struct folio *folio)
-{
- return &folio_page(folio, 1)->compound_pincount;
-}
-
/**
* folio_maybe_dma_pinned - Report if a folio may be pinned for DMA.
* @folio: The folio.
@@ -1665,7 +1728,7 @@ static inline atomic_t *folio_pincount_ptr(struct folio *folio)
* expected to be able to deal gracefully with a false positive.
*
* For large folios, the result will be exactly correct. That's because
- * we have more tracking data available: the compound_pincount is used
+ * we have more tracking data available: the _pincount field is used
* instead of the GUP_PIN_COUNTING_BIAS scheme.
*
* For more information, please see Documentation/core-api/pin_user_pages.rst.
@@ -1676,7 +1739,7 @@ static inline atomic_t *folio_pincount_ptr(struct folio *folio)
static inline bool folio_maybe_dma_pinned(struct folio *folio)
{
if (folio_test_large(folio))
- return atomic_read(folio_pincount_ptr(folio)) > 0;
+ return atomic_read(&folio->_pincount) > 0;
/*
* folio_ref_count() is signed. If that refcount overflows, then
@@ -1784,6 +1847,33 @@ static inline long folio_nr_pages(struct folio *folio)
#endif
}
+/*
+ * compound_nr() returns the number of pages in this potentially compound
+ * page. compound_nr() can be called on a tail page, and is defined to
+ * return 1 in that case.
+ */
+static inline unsigned long compound_nr(struct page *page)
+{
+ struct folio *folio = (struct folio *)page;
+
+ if (!test_bit(PG_head, &folio->flags))
+ return 1;
+#ifdef CONFIG_64BIT
+ return folio->_folio_nr_pages;
+#else
+ return 1L << folio->_folio_order;
+#endif
+}
+
+/**
+ * thp_nr_pages - The number of regular pages in this huge page.
+ * @page: The head page of a huge page.
+ */
+static inline int thp_nr_pages(struct page *page)
+{
+ return folio_nr_pages((struct folio *)page);
+}
+
/**
* folio_next - Move to the next physical folio.
* @folio: The folio we're currently operating on.
@@ -1833,6 +1923,24 @@ static inline size_t folio_size(struct folio *folio)
return PAGE_SIZE << folio_order(folio);
}
+/**
+ * folio_estimated_sharers - Estimate the number of sharers of a folio.
+ * @folio: The folio.
+ *
+ * folio_estimated_sharers() aims to serve as a function to efficiently
+ * estimate the number of processes sharing a folio. This is done by
+ * looking at the precise mapcount of the first subpage in the folio, and
+ * assuming the other subpages are the same. This may not be true for large
+ * folios. If you want exact mapcounts for exact calculations, look at
+ * page_mapcount() or folio_total_mapcount().
+ *
+ * Return: The estimated number of processes sharing a folio.
+ */
+static inline int folio_estimated_sharers(struct folio *folio)
+{
+ return page_mapcount(folio_page(folio, 0));
+}
+
#ifndef HAVE_ARCH_MAKE_PAGE_ACCESSIBLE
static inline int arch_make_page_accessible(struct page *page)
{
@@ -1929,6 +2037,21 @@ static inline bool page_is_pfmemalloc(const struct page *page)
}
/*
+ * Return true only if the folio has been allocated with
+ * ALLOC_NO_WATERMARKS and the low watermark was not
+ * met implying that the system is under some pressure.
+ */
+static inline bool folio_is_pfmemalloc(const struct folio *folio)
+{
+ /*
+ * lru.next has bit 1 set if the page is allocated from the
+ * pfmemalloc reserves. Callers may simply overwrite it if
+ * they do not need to preserve that information.
+ */
+ return (uintptr_t)folio->lru.next & BIT(1);
+}
+
+/*
* Only to be called by the page allocator on a freshly allocated
* page.
*/
@@ -2015,6 +2138,8 @@ static inline bool can_do_mlock(void) { return false; }
extern int user_shm_lock(size_t, struct ucounts *);
extern void user_shm_unlock(size_t, struct ucounts *);
+struct folio *vm_normal_folio(struct vm_area_struct *vma, unsigned long addr,
+ pte_t pte);
struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
pte_t pte);
struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
@@ -2022,13 +2147,16 @@ struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
unsigned long size);
-void zap_page_range(struct vm_area_struct *vma, unsigned long address,
- unsigned long size);
void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
unsigned long size, struct zap_details *details);
+static inline void zap_vma_pages(struct vm_area_struct *vma)
+{
+ zap_page_range_single(vma, vma->vm_start,
+ vma->vm_end - vma->vm_start, NULL);
+}
void unmap_vmas(struct mmu_gather *tlb, struct maple_tree *mt,
struct vm_area_struct *start_vma, unsigned long start,
- unsigned long end);
+ unsigned long end, bool mm_wr_locked);
struct mmu_notifier_range;
@@ -2175,21 +2303,18 @@ static inline bool vma_wants_manual_pte_write_upgrade(struct vm_area_struct *vma
}
bool can_change_pte_writable(struct vm_area_struct *vma, unsigned long addr,
pte_t pte);
-extern unsigned long change_protection(struct mmu_gather *tlb,
+extern long change_protection(struct mmu_gather *tlb,
struct vm_area_struct *vma, unsigned long start,
- unsigned long end, pgprot_t newprot,
- unsigned long cp_flags);
-extern int mprotect_fixup(struct mmu_gather *tlb, struct vm_area_struct *vma,
- struct vm_area_struct **pprev, unsigned long start,
- unsigned long end, unsigned long newflags);
+ unsigned long end, unsigned long cp_flags);
+extern int mprotect_fixup(struct vma_iterator *vmi, struct mmu_gather *tlb,
+ struct vm_area_struct *vma, struct vm_area_struct **pprev,
+ unsigned long start, unsigned long end, unsigned long newflags);
/*
* doesn't attempt to fault and will return short.
*/
int get_user_pages_fast_only(unsigned long start, int nr_pages,
unsigned int gup_flags, struct page **pages);
-int pin_user_pages_fast_only(unsigned long start, int nr_pages,
- unsigned int gup_flags, struct page **pages);
static inline bool get_user_page_fast_only(unsigned long addr,
unsigned int gup_flags, struct page **pagep)
@@ -2813,23 +2938,21 @@ void anon_vma_interval_tree_verify(struct anon_vma_chain *node);
/* mmap.c */
extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
-extern int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
- unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert,
- struct vm_area_struct *expand);
-static inline int vma_adjust(struct vm_area_struct *vma, unsigned long start,
- unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert)
-{
- return __vma_adjust(vma, start, end, pgoff, insert, NULL);
-}
-extern struct vm_area_struct *vma_merge(struct mm_struct *,
- struct vm_area_struct *prev, unsigned long addr, unsigned long end,
- unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t,
- struct mempolicy *, struct vm_userfaultfd_ctx, struct anon_vma_name *);
+extern int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma,
+ unsigned long start, unsigned long end, pgoff_t pgoff,
+ struct vm_area_struct *next);
+extern int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma,
+ unsigned long start, unsigned long end, pgoff_t pgoff);
+extern struct vm_area_struct *vma_merge(struct vma_iterator *vmi,
+ struct mm_struct *, struct vm_area_struct *prev, unsigned long addr,
+ unsigned long end, unsigned long vm_flags, struct anon_vma *,
+ struct file *, pgoff_t, struct mempolicy *, struct vm_userfaultfd_ctx,
+ struct anon_vma_name *);
extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
-extern int __split_vma(struct mm_struct *, struct vm_area_struct *,
- unsigned long addr, int new_below);
-extern int split_vma(struct mm_struct *, struct vm_area_struct *,
- unsigned long addr, int new_below);
+extern int __split_vma(struct vma_iterator *vmi, struct vm_area_struct *,
+ unsigned long addr, int new_below);
+extern int split_vma(struct vma_iterator *vmi, struct vm_area_struct *,
+ unsigned long addr, int new_below);
extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
extern void unlink_file_vma(struct vm_area_struct *);
extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
@@ -2837,9 +2960,6 @@ extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
bool *need_rmap_locks);
extern void exit_mmap(struct mm_struct *);
-void vma_mas_store(struct vm_area_struct *vma, struct ma_state *mas);
-void vma_mas_remove(struct vm_area_struct *vma, struct ma_state *mas);
-
static inline int check_data_rlimit(unsigned long rlim,
unsigned long new,
unsigned long start,
@@ -2887,7 +3007,7 @@ extern unsigned long mmap_region(struct file *file, unsigned long addr,
extern unsigned long do_mmap(struct file *file, unsigned long addr,
unsigned long len, unsigned long prot, unsigned long flags,
unsigned long pgoff, unsigned long *populate, struct list_head *uf);
-extern int do_mas_munmap(struct ma_state *mas, struct mm_struct *mm,
+extern int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
unsigned long start, size_t len, struct list_head *uf,
bool downgrade);
extern int do_munmap(struct mm_struct *, unsigned long, size_t,
@@ -2895,6 +3015,9 @@ extern int do_munmap(struct mm_struct *, unsigned long, size_t,
extern int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int behavior);
#ifdef CONFIG_MMU
+extern int do_vma_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
+ unsigned long start, unsigned long end,
+ struct list_head *uf, bool downgrade);
extern int __mm_populate(unsigned long addr, unsigned long len,
int ignore_errors);
static inline void mm_populate(unsigned long addr, unsigned long len)
@@ -3100,81 +3223,6 @@ static inline vm_fault_t vmf_error(int err)
struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
unsigned int foll_flags);
-#define FOLL_WRITE 0x01 /* check pte is writable */
-#define FOLL_TOUCH 0x02 /* mark page accessed */
-#define FOLL_GET 0x04 /* do get_page on page */
-#define FOLL_DUMP 0x08 /* give error on hole if it would be zero */
-#define FOLL_FORCE 0x10 /* get_user_pages read/write w/o permission */
-#define FOLL_NOWAIT 0x20 /* if a disk transfer is needed, start the IO
- * and return without waiting upon it */
-#define FOLL_NOFAULT 0x80 /* do not fault in pages */
-#define FOLL_HWPOISON 0x100 /* check page is hwpoisoned */
-#define FOLL_TRIED 0x800 /* a retry, previous pass started an IO */
-#define FOLL_REMOTE 0x2000 /* we are working on non-current tsk/mm */
-#define FOLL_ANON 0x8000 /* don't do file mappings */
-#define FOLL_LONGTERM 0x10000 /* mapping lifetime is indefinite: see below */
-#define FOLL_SPLIT_PMD 0x20000 /* split huge pmd before returning */
-#define FOLL_PIN 0x40000 /* pages must be released via unpin_user_page */
-#define FOLL_FAST_ONLY 0x80000 /* gup_fast: prevent fall-back to slow gup */
-#define FOLL_PCI_P2PDMA 0x100000 /* allow returning PCI P2PDMA pages */
-#define FOLL_INTERRUPTIBLE 0x200000 /* allow interrupts from generic signals */
-
-/*
- * FOLL_PIN and FOLL_LONGTERM may be used in various combinations with each
- * other. Here is what they mean, and how to use them:
- *
- * FOLL_LONGTERM indicates that the page will be held for an indefinite time
- * period _often_ under userspace control. This is in contrast to
- * iov_iter_get_pages(), whose usages are transient.
- *
- * FIXME: For pages which are part of a filesystem, mappings are subject to the
- * lifetime enforced by the filesystem and we need guarantees that longterm
- * users like RDMA and V4L2 only establish mappings which coordinate usage with
- * the filesystem. Ideas for this coordination include revoking the longterm
- * pin, delaying writeback, bounce buffer page writeback, etc. As FS DAX was
- * added after the problem with filesystems was found FS DAX VMAs are
- * specifically failed. Filesystem pages are still subject to bugs and use of
- * FOLL_LONGTERM should be avoided on those pages.
- *
- * FIXME: Also NOTE that FOLL_LONGTERM is not supported in every GUP call.
- * Currently only get_user_pages() and get_user_pages_fast() support this flag
- * and calls to get_user_pages_[un]locked are specifically not allowed. This
- * is due to an incompatibility with the FS DAX check and
- * FAULT_FLAG_ALLOW_RETRY.
- *
- * In the CMA case: long term pins in a CMA region would unnecessarily fragment
- * that region. And so, CMA attempts to migrate the page before pinning, when
- * FOLL_LONGTERM is specified.
- *
- * FOLL_PIN indicates that a special kind of tracking (not just page->_refcount,
- * but an additional pin counting system) will be invoked. This is intended for
- * anything that gets a page reference and then touches page data (for example,
- * Direct IO). This lets the filesystem know that some non-file-system entity is
- * potentially changing the pages' data. In contrast to FOLL_GET (whose pages
- * are released via put_page()), FOLL_PIN pages must be released, ultimately, by
- * a call to unpin_user_page().
- *
- * FOLL_PIN is similar to FOLL_GET: both of these pin pages. They use different
- * and separate refcounting mechanisms, however, and that means that each has
- * its own acquire and release mechanisms:
- *
- * FOLL_GET: get_user_pages*() to acquire, and put_page() to release.
- *
- * FOLL_PIN: pin_user_pages*() to acquire, and unpin_user_pages to release.
- *
- * FOLL_PIN and FOLL_GET are mutually exclusive for a given function call.
- * (The underlying pages may experience both FOLL_GET-based and FOLL_PIN-based
- * calls applied to them, and that's perfectly OK. This is a constraint on the
- * callers, not on the pages.)
- *
- * FOLL_PIN should be set internally by the pin_user_pages*() APIs, never
- * directly by the caller. That's in order to help avoid mismatches when
- * releasing pages: get_user_pages*() pages must be released via put_page(),
- * while pin_user_pages*() pages must be released via unpin_user_page().
- *
- * Please see Documentation/core-api/pin_user_pages.rst for more information.
- */
-
static inline int vm_fault_to_errno(vm_fault_t vm_fault, int foll_flags)
{
if (vm_fault & VM_FAULT_OOM)
@@ -3187,71 +3235,6 @@ static inline int vm_fault_to_errno(vm_fault_t vm_fault, int foll_flags)
}
/*
- * Indicates for which pages that are write-protected in the page table,
- * whether GUP has to trigger unsharing via FAULT_FLAG_UNSHARE such that the
- * GUP pin will remain consistent with the pages mapped into the page tables
- * of the MM.
- *
- * Temporary unmapping of PageAnonExclusive() pages or clearing of
- * PageAnonExclusive() has to protect against concurrent GUP:
- * * Ordinary GUP: Using the PT lock
- * * GUP-fast and fork(): mm->write_protect_seq
- * * GUP-fast and KSM or temporary unmapping (swap, migration): see
- * page_try_share_anon_rmap()
- *
- * Must be called with the (sub)page that's actually referenced via the
- * page table entry, which might not necessarily be the head page for a
- * PTE-mapped THP.
- *
- * If the vma is NULL, we're coming from the GUP-fast path and might have
- * to fallback to the slow path just to lookup the vma.
- */
-static inline bool gup_must_unshare(struct vm_area_struct *vma,
- unsigned int flags, struct page *page)
-{
- /*
- * FOLL_WRITE is implicitly handled correctly as the page table entry
- * has to be writable -- and if it references (part of) an anonymous
- * folio, that part is required to be marked exclusive.
- */
- if ((flags & (FOLL_WRITE | FOLL_PIN)) != FOLL_PIN)
- return false;
- /*
- * Note: PageAnon(page) is stable until the page is actually getting
- * freed.
- */
- if (!PageAnon(page)) {
- /*
- * We only care about R/O long-term pining: R/O short-term
- * pinning does not have the semantics to observe successive
- * changes through the process page tables.
- */
- if (!(flags & FOLL_LONGTERM))
- return false;
-
- /* We really need the vma ... */
- if (!vma)
- return true;
-
- /*
- * ... because we only care about writable private ("COW")
- * mappings where we have to break COW early.
- */
- return is_cow_mapping(vma->vm_flags);
- }
-
- /* Paired with a memory barrier in page_try_share_anon_rmap(). */
- if (IS_ENABLED(CONFIG_HAVE_FAST_GUP))
- smp_rmb();
-
- /*
- * Note that PageKsm() pages cannot be exclusive, and consequently,
- * cannot get pinned.
- */
- return !PageAnonExclusive(page);
-}
-
-/*
* Indicates whether GUP can follow a PROT_NONE mapped page, or whether
* a (NUMA hinting) fault is required.
*/
@@ -3550,6 +3533,11 @@ enum mf_action_page_type {
MF_MSG_UNKNOWN,
};
+/*
+ * Sysfs entries for memory failure handling statistics.
+ */
+extern const struct attribute_group memory_failure_attr_group;
+
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
extern void clear_huge_page(struct page *page,
unsigned long addr_hint,
@@ -3667,7 +3655,7 @@ static inline int seal_check_future_write(int seals, struct vm_area_struct *vma)
* VM_MAYWRITE as we still want them to be COW-writable.
*/
if (vma->vm_flags & VM_SHARED)
- vma->vm_flags &= ~(VM_MAYWRITE);
+ vm_flags_clear(vma, VM_MAYWRITE);
}
return 0;
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index ff3f3f23f649..de1e622dd366 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -178,7 +178,7 @@ static inline void lru_gen_update_size(struct lruvec *lruvec, struct folio *foli
int zone = folio_zonenum(folio);
int delta = folio_nr_pages(folio);
enum lru_list lru = type * LRU_INACTIVE_FILE;
- struct lru_gen_struct *lrugen = &lruvec->lrugen;
+ struct lru_gen_folio *lrugen = &lruvec->lrugen;
VM_WARN_ON_ONCE(old_gen != -1 && old_gen >= MAX_NR_GENS);
VM_WARN_ON_ONCE(new_gen != -1 && new_gen >= MAX_NR_GENS);
@@ -224,7 +224,7 @@ static inline bool lru_gen_add_folio(struct lruvec *lruvec, struct folio *folio,
int gen = folio_lru_gen(folio);
int type = folio_is_file_lru(folio);
int zone = folio_zonenum(folio);
- struct lru_gen_struct *lrugen = &lruvec->lrugen;
+ struct lru_gen_folio *lrugen = &lruvec->lrugen;
VM_WARN_ON_ONCE_FOLIO(gen != -1, folio);
@@ -256,9 +256,9 @@ static inline bool lru_gen_add_folio(struct lruvec *lruvec, struct folio *folio,
lru_gen_update_size(lruvec, folio, -1, gen);
/* for folio_rotate_reclaimable() */
if (reclaiming)
- list_add_tail(&folio->lru, &lrugen->lists[gen][type][zone]);
+ list_add_tail(&folio->lru, &lrugen->folios[gen][type][zone]);
else
- list_add(&folio->lru, &lrugen->lists[gen][type][zone]);
+ list_add(&folio->lru, &lrugen->folios[gen][type][zone]);
return true;
}
@@ -577,4 +577,15 @@ pte_install_uffd_wp_if_needed(struct vm_area_struct *vma, unsigned long addr,
#endif
}
+static inline bool vma_has_recency(struct vm_area_struct *vma)
+{
+ if (vma->vm_flags & (VM_SEQ_READ | VM_RAND_READ))
+ return false;
+
+ if (vma->vm_file && (vma->vm_file->f_mode & FMODE_NOREUSE))
+ return false;
+
+ return true;
+}
+
#endif
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index af8119776ab1..0722859c3647 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -140,30 +140,6 @@ struct page {
};
struct { /* Tail pages of compound page */
unsigned long compound_head; /* Bit zero is set */
-
- /* First tail page only */
- unsigned char compound_dtor;
- unsigned char compound_order;
- atomic_t compound_mapcount;
- atomic_t subpages_mapcount;
- atomic_t compound_pincount;
-#ifdef CONFIG_64BIT
- unsigned int compound_nr; /* 1 << compound_order */
-#endif
- };
- struct { /* Second tail page of transparent huge page */
- unsigned long _compound_pad_1; /* compound_head */
- unsigned long _compound_pad_2;
- /* For both global and memcg */
- struct list_head deferred_list;
- };
- struct { /* Second tail page of hugetlb page */
- unsigned long _hugetlb_pad_1; /* compound_head */
- void *hugetlb_subpool;
- void *hugetlb_cgroup;
- void *hugetlb_cgroup_rsvd;
- void *hugetlb_hwpoison;
- /* No more space on 32-bit: use third tail if more */
};
struct { /* Page table pages */
unsigned long _pt_pad_1; /* compound_head */
@@ -302,20 +278,17 @@ static inline struct page *encoded_page_ptr(struct encoded_page *page)
* @_refcount: Do not access this member directly. Use folio_ref_count()
* to find how many references there are to this folio.
* @memcg_data: Memory Control Group data.
- * @_flags_1: For large folios, additional page flags.
- * @_head_1: Points to the folio. Do not use.
* @_folio_dtor: Which destructor to use for this folio.
* @_folio_order: Do not use directly, call folio_order().
- * @_compound_mapcount: Do not use directly, call folio_entire_mapcount().
- * @_subpages_mapcount: Do not use directly, call folio_mapcount().
+ * @_entire_mapcount: Do not use directly, call folio_entire_mapcount().
+ * @_nr_pages_mapped: Do not use directly, call folio_mapcount().
* @_pincount: Do not use directly, call folio_maybe_dma_pinned().
* @_folio_nr_pages: Do not use directly, call folio_nr_pages().
- * @_flags_2: For alignment. Do not use.
- * @_head_2: Points to the folio. Do not use.
* @_hugetlb_subpool: Do not use directly, use accessor in hugetlb.h.
* @_hugetlb_cgroup: Do not use directly, use accessor in hugetlb_cgroup.h.
* @_hugetlb_cgroup_rsvd: Do not use directly, use accessor in hugetlb_cgroup.h.
* @_hugetlb_hwpoison: Do not use directly, call raw_hwp_list_head().
+ * @_deferred_list: Folios to be split under memory pressure.
*
* A folio is a physically, virtually and logically contiguous set
* of bytes. It is a power-of-two in size, and it is aligned to that
@@ -358,14 +331,16 @@ struct folio {
struct {
unsigned long _flags_1;
unsigned long _head_1;
+ /* public: */
unsigned char _folio_dtor;
unsigned char _folio_order;
- atomic_t _compound_mapcount;
- atomic_t _subpages_mapcount;
+ atomic_t _entire_mapcount;
+ atomic_t _nr_pages_mapped;
atomic_t _pincount;
#ifdef CONFIG_64BIT
unsigned int _folio_nr_pages;
#endif
+ /* private: the union with struct page is transitional */
};
struct page __page_1;
};
@@ -373,10 +348,19 @@ struct folio {
struct {
unsigned long _flags_2;
unsigned long _head_2;
+ /* public: */
void *_hugetlb_subpool;
void *_hugetlb_cgroup;
void *_hugetlb_cgroup_rsvd;
void *_hugetlb_hwpoison;
+ /* private: the union with struct page is transitional */
+ };
+ struct {
+ unsigned long _flags_2a;
+ unsigned long _head_2a;
+ /* public: */
+ struct list_head _deferred_list;
+ /* private: the union with struct page is transitional */
};
struct page __page_2;
};
@@ -401,53 +385,14 @@ FOLIO_MATCH(memcg_data, memcg_data);
offsetof(struct page, pg) + sizeof(struct page))
FOLIO_MATCH(flags, _flags_1);
FOLIO_MATCH(compound_head, _head_1);
-FOLIO_MATCH(compound_dtor, _folio_dtor);
-FOLIO_MATCH(compound_order, _folio_order);
-FOLIO_MATCH(compound_mapcount, _compound_mapcount);
-FOLIO_MATCH(subpages_mapcount, _subpages_mapcount);
-FOLIO_MATCH(compound_pincount, _pincount);
-#ifdef CONFIG_64BIT
-FOLIO_MATCH(compound_nr, _folio_nr_pages);
-#endif
#undef FOLIO_MATCH
#define FOLIO_MATCH(pg, fl) \
static_assert(offsetof(struct folio, fl) == \
offsetof(struct page, pg) + 2 * sizeof(struct page))
FOLIO_MATCH(flags, _flags_2);
FOLIO_MATCH(compound_head, _head_2);
-FOLIO_MATCH(hugetlb_subpool, _hugetlb_subpool);
-FOLIO_MATCH(hugetlb_cgroup, _hugetlb_cgroup);
-FOLIO_MATCH(hugetlb_cgroup_rsvd, _hugetlb_cgroup_rsvd);
-FOLIO_MATCH(hugetlb_hwpoison, _hugetlb_hwpoison);
#undef FOLIO_MATCH
-static inline atomic_t *folio_mapcount_ptr(struct folio *folio)
-{
- struct page *tail = &folio->page + 1;
- return &tail->compound_mapcount;
-}
-
-static inline atomic_t *folio_subpages_mapcount_ptr(struct folio *folio)
-{
- struct page *tail = &folio->page + 1;
- return &tail->subpages_mapcount;
-}
-
-static inline atomic_t *compound_mapcount_ptr(struct page *page)
-{
- return &page[1].compound_mapcount;
-}
-
-static inline atomic_t *subpages_mapcount_ptr(struct page *page)
-{
- return &page[1].subpages_mapcount;
-}
-
-static inline atomic_t *compound_pincount_ptr(struct page *page)
-{
- return &page[1].compound_pincount;
-}
-
/*
* Used for sizing the vmemmap region on some architectures
*/
@@ -546,7 +491,15 @@ struct vm_area_struct {
* See vmf_insert_mixed_prot() for discussion.
*/
pgprot_t vm_page_prot;
- unsigned long vm_flags; /* Flags, see mm.h. */
+
+ /*
+ * Flags, see mm.h.
+ * To modify use vm_flags_{init|reset|set|clear|mod} functions.
+ */
+ union {
+ const vm_flags_t vm_flags;
+ vm_flags_t __private __vm_flags;
+ };
/*
* For areas with an address space and backing store,
@@ -658,7 +611,7 @@ struct mm_struct {
raw_spinlock_t cid_lock;
#endif
#ifdef CONFIG_MMU
- atomic_long_t pgtables_bytes; /* PTE page table pages */
+ atomic_long_t pgtables_bytes; /* size of all page tables */
#endif
int map_count; /* number of VMAs */
@@ -915,9 +868,7 @@ struct vma_iterator {
static inline void vma_iter_init(struct vma_iterator *vmi,
struct mm_struct *mm, unsigned long addr)
{
- vmi->mas.tree = &mm->mm_mt;
- vmi->mas.index = addr;
- vmi->mas.node = MAS_START;
+ mas_init(&vmi->mas, &mm->mm_mt, addr);
}
#ifdef CONFIG_SCHED_MM_CID
@@ -1126,4 +1077,87 @@ enum fault_flag {
typedef unsigned int __bitwise zap_flags_t;
+/*
+ * FOLL_PIN and FOLL_LONGTERM may be used in various combinations with each
+ * other. Here is what they mean, and how to use them:
+ *
+ *
+ * FIXME: For pages which are part of a filesystem, mappings are subject to the
+ * lifetime enforced by the filesystem and we need guarantees that longterm
+ * users like RDMA and V4L2 only establish mappings which coordinate usage with
+ * the filesystem. Ideas for this coordination include revoking the longterm
+ * pin, delaying writeback, bounce buffer page writeback, etc. As FS DAX was
+ * added after the problem with filesystems was found FS DAX VMAs are
+ * specifically failed. Filesystem pages are still subject to bugs and use of
+ * FOLL_LONGTERM should be avoided on those pages.
+ *
+ * In the CMA case: long term pins in a CMA region would unnecessarily fragment
+ * that region. And so, CMA attempts to migrate the page before pinning, when
+ * FOLL_LONGTERM is specified.
+ *
+ * FOLL_PIN indicates that a special kind of tracking (not just page->_refcount,
+ * but an additional pin counting system) will be invoked. This is intended for
+ * anything that gets a page reference and then touches page data (for example,
+ * Direct IO). This lets the filesystem know that some non-file-system entity is
+ * potentially changing the pages' data. In contrast to FOLL_GET (whose pages
+ * are released via put_page()), FOLL_PIN pages must be released, ultimately, by
+ * a call to unpin_user_page().
+ *
+ * FOLL_PIN is similar to FOLL_GET: both of these pin pages. They use different
+ * and separate refcounting mechanisms, however, and that means that each has
+ * its own acquire and release mechanisms:
+ *
+ * FOLL_GET: get_user_pages*() to acquire, and put_page() to release.
+ *
+ * FOLL_PIN: pin_user_pages*() to acquire, and unpin_user_pages to release.
+ *
+ * FOLL_PIN and FOLL_GET are mutually exclusive for a given function call.
+ * (The underlying pages may experience both FOLL_GET-based and FOLL_PIN-based
+ * calls applied to them, and that's perfectly OK. This is a constraint on the
+ * callers, not on the pages.)
+ *
+ * FOLL_PIN should be set internally by the pin_user_pages*() APIs, never
+ * directly by the caller. That's in order to help avoid mismatches when
+ * releasing pages: get_user_pages*() pages must be released via put_page(),
+ * while pin_user_pages*() pages must be released via unpin_user_page().
+ *
+ * Please see Documentation/core-api/pin_user_pages.rst for more information.
+ */
+
+enum {
+ /* check pte is writable */
+ FOLL_WRITE = 1 << 0,
+ /* do get_page on page */
+ FOLL_GET = 1 << 1,
+ /* give error on hole if it would be zero */
+ FOLL_DUMP = 1 << 2,
+ /* get_user_pages read/write w/o permission */
+ FOLL_FORCE = 1 << 3,
+ /*
+ * if a disk transfer is needed, start the IO and return without waiting
+ * upon it
+ */
+ FOLL_NOWAIT = 1 << 4,
+ /* do not fault in pages */
+ FOLL_NOFAULT = 1 << 5,
+ /* check page is hwpoisoned */
+ FOLL_HWPOISON = 1 << 6,
+ /* don't do file mappings */
+ FOLL_ANON = 1 << 7,
+ /*
+ * FOLL_LONGTERM indicates that the page will be held for an indefinite
+ * time period _often_ under userspace control. This is in contrast to
+ * iov_iter_get_pages(), whose usages are transient.
+ */
+ FOLL_LONGTERM = 1 << 8,
+ /* split huge pmd before returning */
+ FOLL_SPLIT_PMD = 1 << 9,
+ /* allow returning PCI P2PDMA pages */
+ FOLL_PCI_P2PDMA = 1 << 10,
+ /* allow interrupts from generic signals */
+ FOLL_INTERRUPTIBLE = 1 << 11,
+
+ /* See also internal only FOLL flags in mm/internal.h */
+};
+
#endif /* _LINUX_MM_TYPES_H */
diff --git a/include/linux/mman.h b/include/linux/mman.h
index 58b3abd457a3..cee1e4b566d8 100644
--- a/include/linux/mman.h
+++ b/include/linux/mman.h
@@ -156,4 +156,38 @@ calc_vm_flag_bits(unsigned long flags)
}
unsigned long vm_commit_limit(void);
+
+/*
+ * Denies creating a writable executable mapping or gaining executable permissions.
+ *
+ * This denies the following:
+ *
+ * a) mmap(PROT_WRITE | PROT_EXEC)
+ *
+ * b) mmap(PROT_WRITE)
+ * mprotect(PROT_EXEC)
+ *
+ * c) mmap(PROT_WRITE)
+ * mprotect(PROT_READ)
+ * mprotect(PROT_EXEC)
+ *
+ * But allows the following:
+ *
+ * d) mmap(PROT_READ | PROT_EXEC)
+ * mmap(PROT_READ | PROT_EXEC | PROT_BTI)
+ */
+static inline bool map_deny_write_exec(struct vm_area_struct *vma, unsigned long vm_flags)
+{
+ if (!test_bit(MMF_HAS_MDWE, &current->mm->flags))
+ return false;
+
+ if ((vm_flags & VM_EXEC) && (vm_flags & VM_WRITE))
+ return true;
+
+ if (!(vma->vm_flags & VM_EXEC) && (vm_flags & VM_EXEC))
+ return true;
+
+ return false;
+}
+
#endif /* _LINUX_MMAN_H */
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 8fdd3cf971a3..812e6b583b25 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -527,6 +527,7 @@ struct mmc_host {
struct device_node;
struct mmc_host *mmc_alloc_host(int extra, struct device *);
+struct mmc_host *devm_mmc_alloc_host(struct device *dev, int extra);
int mmc_add_host(struct mmc_host *);
void mmc_remove_host(struct mmc_host *);
void mmc_free_host(struct mmc_host *);
diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h
index 74f9d9a6d330..0e4ef9c5127a 100644
--- a/include/linux/mmc/sdio_ids.h
+++ b/include/linux/mmc/sdio_ids.h
@@ -102,6 +102,7 @@
#define SDIO_DEVICE_ID_MARVELL_8977_BT 0x9146
#define SDIO_DEVICE_ID_MARVELL_8987_WLAN 0x9149
#define SDIO_DEVICE_ID_MARVELL_8987_BT 0x914a
+#define SDIO_DEVICE_ID_MARVELL_8978_WLAN 0x9159
#define SDIO_VENDOR_ID_MEDIATEK 0x037a
#define SDIO_DEVICE_ID_MEDIATEK_MT7663 0x7663
diff --git a/include/linux/mmc/slot-gpio.h b/include/linux/mmc/slot-gpio.h
index 4ae2f2908f99..5d3d15e97868 100644
--- a/include/linux/mmc/slot-gpio.h
+++ b/include/linux/mmc/slot-gpio.h
@@ -15,6 +15,7 @@ struct mmc_host;
int mmc_gpio_get_ro(struct mmc_host *host);
int mmc_gpio_get_cd(struct mmc_host *host);
+void mmc_gpio_set_cd_irq(struct mmc_host *host, int irq);
int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id,
unsigned int idx, bool override_active_level,
unsigned int debounce);
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index d6c06e140277..64a3e051c3c4 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -269,7 +269,6 @@ extern struct lockdep_map __mmu_notifier_invalidate_range_start_map;
#endif
struct mmu_notifier_range {
- struct vm_area_struct *vma;
struct mm_struct *mm;
unsigned long start;
unsigned long end;
@@ -514,12 +513,10 @@ static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
static inline void mmu_notifier_range_init(struct mmu_notifier_range *range,
enum mmu_notifier_event event,
unsigned flags,
- struct vm_area_struct *vma,
struct mm_struct *mm,
unsigned long start,
unsigned long end)
{
- range->vma = vma;
range->event = event;
range->mm = mm;
range->start = start;
@@ -530,10 +527,10 @@ static inline void mmu_notifier_range_init(struct mmu_notifier_range *range,
static inline void mmu_notifier_range_init_owner(
struct mmu_notifier_range *range,
enum mmu_notifier_event event, unsigned int flags,
- struct vm_area_struct *vma, struct mm_struct *mm,
- unsigned long start, unsigned long end, void *owner)
+ struct mm_struct *mm, unsigned long start,
+ unsigned long end, void *owner)
{
- mmu_notifier_range_init(range, event, flags, vma, mm, start, end);
+ mmu_notifier_range_init(range, event, flags, mm, start, end);
range->owner = owner;
}
@@ -659,9 +656,9 @@ static inline void _mmu_notifier_range_init(struct mmu_notifier_range *range,
range->end = end;
}
-#define mmu_notifier_range_init(range,event,flags,vma,mm,start,end) \
+#define mmu_notifier_range_init(range,event,flags,mm,start,end) \
_mmu_notifier_range_init(range, start, end)
-#define mmu_notifier_range_init_owner(range, event, flags, vma, mm, start, \
+#define mmu_notifier_range_init_owner(range, event, flags, mm, start, \
end, owner) \
_mmu_notifier_range_init(range, start, end)
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index cd28a100d9e4..9fb1b03b83b2 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -7,6 +7,7 @@
#include <linux/spinlock.h>
#include <linux/list.h>
+#include <linux/list_nulls.h>
#include <linux/wait.h>
#include <linux/bitops.h>
#include <linux/cache.h>
@@ -312,7 +313,7 @@ enum lruvec_flags {
* They form a sliding window of a variable size [MIN_NR_GENS, MAX_NR_GENS]. An
* offset within MAX_NR_GENS, i.e., gen, indexes the LRU list of the
* corresponding generation. The gen counter in folio->flags stores gen+1 while
- * a page is on one of lrugen->lists[]. Otherwise it stores 0.
+ * a page is on one of lrugen->folios[]. Otherwise it stores 0.
*
* A page is added to the youngest generation on faulting. The aging needs to
* check the accessed bit at least twice before handing this page over to the
@@ -324,8 +325,8 @@ enum lruvec_flags {
* rest of generations, if they exist, are considered inactive. See
* lru_gen_is_active().
*
- * PG_active is always cleared while a page is on one of lrugen->lists[] so that
- * the aging needs not to worry about it. And it's set again when a page
+ * PG_active is always cleared while a page is on one of lrugen->folios[] so
+ * that the aging needs not to worry about it. And it's set again when a page
* considered active is isolated for non-reclaiming purposes, e.g., migration.
* See lru_gen_add_folio() and lru_gen_del_folio().
*
@@ -404,7 +405,7 @@ enum {
* The number of pages in each generation is eventually consistent and therefore
* can be transiently negative when reset_batch_size() is pending.
*/
-struct lru_gen_struct {
+struct lru_gen_folio {
/* the aging increments the youngest generation number */
unsigned long max_seq;
/* the eviction increments the oldest generation numbers */
@@ -412,7 +413,7 @@ struct lru_gen_struct {
/* the birth time of each generation in jiffies */
unsigned long timestamps[MAX_NR_GENS];
/* the multi-gen LRU lists, lazily sorted on eviction */
- struct list_head lists[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES];
+ struct list_head folios[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES];
/* the multi-gen LRU sizes, eventually consistent */
long nr_pages[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES];
/* the exponential moving average of refaulted */
@@ -426,6 +427,14 @@ struct lru_gen_struct {
atomic_long_t refaulted[NR_HIST_GENS][ANON_AND_FILE][MAX_NR_TIERS];
/* whether the multi-gen LRU is enabled */
bool enabled;
+#ifdef CONFIG_MEMCG
+ /* the memcg generation this lru_gen_folio belongs to */
+ u8 gen;
+ /* the list segment this lru_gen_folio belongs to */
+ u8 seg;
+ /* per-node lru_gen_folio list for global reclaim */
+ struct hlist_nulls_node list;
+#endif
};
enum {
@@ -461,7 +470,7 @@ struct lru_gen_mm_state {
struct lru_gen_mm_walk {
/* the lruvec under reclaim */
struct lruvec *lruvec;
- /* unstable max_seq from lru_gen_struct */
+ /* unstable max_seq from lru_gen_folio */
unsigned long max_seq;
/* the next address within an mm to scan */
unsigned long next_addr;
@@ -479,12 +488,87 @@ void lru_gen_init_lruvec(struct lruvec *lruvec);
void lru_gen_look_around(struct page_vma_mapped_walk *pvmw);
#ifdef CONFIG_MEMCG
+
+/*
+ * For each node, memcgs are divided into two generations: the old and the
+ * young. For each generation, memcgs are randomly sharded into multiple bins
+ * to improve scalability. For each bin, the hlist_nulls is virtually divided
+ * into three segments: the head, the tail and the default.
+ *
+ * An onlining memcg is added to the tail of a random bin in the old generation.
+ * The eviction starts at the head of a random bin in the old generation. The
+ * per-node memcg generation counter, whose reminder (mod MEMCG_NR_GENS) indexes
+ * the old generation, is incremented when all its bins become empty.
+ *
+ * There are four operations:
+ * 1. MEMCG_LRU_HEAD, which moves an memcg to the head of a random bin in its
+ * current generation (old or young) and updates its "seg" to "head";
+ * 2. MEMCG_LRU_TAIL, which moves an memcg to the tail of a random bin in its
+ * current generation (old or young) and updates its "seg" to "tail";
+ * 3. MEMCG_LRU_OLD, which moves an memcg to the head of a random bin in the old
+ * generation, updates its "gen" to "old" and resets its "seg" to "default";
+ * 4. MEMCG_LRU_YOUNG, which moves an memcg to the tail of a random bin in the
+ * young generation, updates its "gen" to "young" and resets its "seg" to
+ * "default".
+ *
+ * The events that trigger the above operations are:
+ * 1. Exceeding the soft limit, which triggers MEMCG_LRU_HEAD;
+ * 2. The first attempt to reclaim an memcg below low, which triggers
+ * MEMCG_LRU_TAIL;
+ * 3. The first attempt to reclaim an memcg below reclaimable size threshold,
+ * which triggers MEMCG_LRU_TAIL;
+ * 4. The second attempt to reclaim an memcg below reclaimable size threshold,
+ * which triggers MEMCG_LRU_YOUNG;
+ * 5. Attempting to reclaim an memcg below min, which triggers MEMCG_LRU_YOUNG;
+ * 6. Finishing the aging on the eviction path, which triggers MEMCG_LRU_YOUNG;
+ * 7. Offlining an memcg, which triggers MEMCG_LRU_OLD.
+ *
+ * Note that memcg LRU only applies to global reclaim, and the round-robin
+ * incrementing of their max_seq counters ensures the eventual fairness to all
+ * eligible memcgs. For memcg reclaim, it still relies on mem_cgroup_iter().
+ */
+#define MEMCG_NR_GENS 2
+#define MEMCG_NR_BINS 8
+
+struct lru_gen_memcg {
+ /* the per-node memcg generation counter */
+ unsigned long seq;
+ /* each memcg has one lru_gen_folio per node */
+ unsigned long nr_memcgs[MEMCG_NR_GENS];
+ /* per-node lru_gen_folio list for global reclaim */
+ struct hlist_nulls_head fifo[MEMCG_NR_GENS][MEMCG_NR_BINS];
+ /* protects the above */
+ spinlock_t lock;
+};
+
+void lru_gen_init_pgdat(struct pglist_data *pgdat);
+
void lru_gen_init_memcg(struct mem_cgroup *memcg);
void lru_gen_exit_memcg(struct mem_cgroup *memcg);
-#endif
+void lru_gen_online_memcg(struct mem_cgroup *memcg);
+void lru_gen_offline_memcg(struct mem_cgroup *memcg);
+void lru_gen_release_memcg(struct mem_cgroup *memcg);
+void lru_gen_soft_reclaim(struct lruvec *lruvec);
+
+#else /* !CONFIG_MEMCG */
+
+#define MEMCG_NR_GENS 1
+
+struct lru_gen_memcg {
+};
+
+static inline void lru_gen_init_pgdat(struct pglist_data *pgdat)
+{
+}
+
+#endif /* CONFIG_MEMCG */
#else /* !CONFIG_LRU_GEN */
+static inline void lru_gen_init_pgdat(struct pglist_data *pgdat)
+{
+}
+
static inline void lru_gen_init_lruvec(struct lruvec *lruvec)
{
}
@@ -494,6 +578,7 @@ static inline void lru_gen_look_around(struct page_vma_mapped_walk *pvmw)
}
#ifdef CONFIG_MEMCG
+
static inline void lru_gen_init_memcg(struct mem_cgroup *memcg)
{
}
@@ -501,7 +586,24 @@ static inline void lru_gen_init_memcg(struct mem_cgroup *memcg)
static inline void lru_gen_exit_memcg(struct mem_cgroup *memcg)
{
}
-#endif
+
+static inline void lru_gen_online_memcg(struct mem_cgroup *memcg)
+{
+}
+
+static inline void lru_gen_offline_memcg(struct mem_cgroup *memcg)
+{
+}
+
+static inline void lru_gen_release_memcg(struct mem_cgroup *memcg)
+{
+}
+
+static inline void lru_gen_soft_reclaim(struct lruvec *lruvec)
+{
+}
+
+#endif /* CONFIG_MEMCG */
#endif /* CONFIG_LRU_GEN */
@@ -524,7 +626,7 @@ struct lruvec {
unsigned long flags;
#ifdef CONFIG_LRU_GEN
/* evictable pages divided into generations */
- struct lru_gen_struct lrugen;
+ struct lru_gen_folio lrugen;
/* to concurrently iterate lru_gen_mm_list */
struct lru_gen_mm_state mm_state;
#endif
@@ -1110,6 +1212,31 @@ struct deferred_split {
};
#endif
+#ifdef CONFIG_MEMORY_FAILURE
+/*
+ * Per NUMA node memory failure handling statistics.
+ */
+struct memory_failure_stats {
+ /*
+ * Number of raw pages poisoned.
+ * Cases not accounted: memory outside kernel control, offline page,
+ * arch-specific memory_failure (SGX), hwpoison_filter() filtered
+ * error events, and unpoison actions from hwpoison_unpoison.
+ */
+ unsigned long total;
+ /*
+ * Recovery results of poisoned raw pages handled by memory_failure,
+ * in sync with mf_result.
+ * total = ignored + failed + delayed + recovered.
+ * total * PAGE_SIZE * #nodes = /proc/meminfo/HardwareCorrupted.
+ */
+ unsigned long ignored;
+ unsigned long failed;
+ unsigned long delayed;
+ unsigned long recovered;
+};
+#endif
+
/*
* On NUMA machines, each NUMA node would have a pg_data_t to describe
* it's memory layout. On UMA machines there is a single pglist_data which
@@ -1243,6 +1370,8 @@ typedef struct pglist_data {
#ifdef CONFIG_LRU_GEN
/* kswap mm walk data */
struct lru_gen_mm_walk mm_walk;
+ /* lru_gen_folio list */
+ struct lru_gen_memcg memcg_lru;
#endif
CACHELINE_PADDING(_pad2_);
@@ -1253,6 +1382,9 @@ typedef struct pglist_data {
#ifdef CONFIG_NUMA
struct memory_tier __rcu *memtier;
#endif
+#ifdef CONFIG_MEMORY_FAILURE
+ struct memory_failure_stats mf_stats;
+#endif
} pg_data_t;
#define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages)
diff --git a/include/linux/module.h b/include/linux/module.h
index 8c5909c0076c..4435ad9439ab 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -352,6 +352,14 @@ struct mod_kallsyms {
};
#ifdef CONFIG_LIVEPATCH
+/**
+ * struct klp_modinfo - Elf information preserved from the livepatch module
+ *
+ * @hdr: Elf header
+ * @sechdrs: Section header table
+ * @secstrings: String table for the section headers
+ * @symndx: The symbol table section index
+ */
struct klp_modinfo {
Elf_Ehdr hdr;
Elf_Shdr *sechdrs;
@@ -879,11 +887,13 @@ static inline bool module_sig_ok(struct module *module)
#endif /* CONFIG_MODULE_SIG */
#if defined(CONFIG_MODULES) && defined(CONFIG_KALLSYMS)
-int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
+int module_kallsyms_on_each_symbol(const char *modname,
+ int (*fn)(void *, const char *,
struct module *, unsigned long),
void *data);
#else
-static inline int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
+static inline int module_kallsyms_on_each_symbol(const char *modname,
+ int (*fn)(void *, const char *,
struct module *, unsigned long),
void *data)
{
diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
index 7b4587a19189..03be088fb439 100644
--- a/include/linux/moduleloader.h
+++ b/include/linux/moduleloader.h
@@ -75,6 +75,23 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
unsigned int symindex,
unsigned int relsec,
struct module *mod);
+#ifdef CONFIG_LIVEPATCH
+/*
+ * Some architectures (namely x86_64 and ppc64) perform sanity checks when
+ * applying relocations. If a patched module gets unloaded and then later
+ * reloaded (and re-patched), klp re-applies relocations to the replacement
+ * function(s). Any leftover relocations from the previous loading of the
+ * patched module might trigger the sanity checks.
+ *
+ * To prevent that, when unloading a patched module, clear out any relocations
+ * that might trigger arch-specific sanity checks on a future module reload.
+ */
+void clear_relocate_add(Elf_Shdr *sechdrs,
+ const char *strtab,
+ unsigned int symindex,
+ unsigned int relsec,
+ struct module *me);
+#endif
#else
static inline int apply_relocate_add(Elf_Shdr *sechdrs,
const char *strtab,
diff --git a/include/linux/mount.h b/include/linux/mount.h
index 52f452b2259a..1ea326c368f7 100644
--- a/include/linux/mount.h
+++ b/include/linux/mount.h
@@ -86,6 +86,7 @@ extern void mnt_drop_write(struct vfsmount *mnt);
extern void mnt_drop_write_file(struct file *file);
extern void mntput(struct vfsmount *mnt);
extern struct vfsmount *mntget(struct vfsmount *mnt);
+extern void mnt_make_shortterm(struct vfsmount *mnt);
extern struct vfsmount *mnt_clone_internal(const struct path *path);
extern bool __mnt_is_readonly(struct vfsmount *mnt);
extern bool mnt_may_suid(struct vfsmount *mnt);
diff --git a/include/linux/msi.h b/include/linux/msi.h
index a112b913fff9..13c9b74a4575 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -48,6 +48,10 @@ typedef struct arch_msi_msg_data {
} __attribute__ ((packed)) arch_msi_msg_data_t;
#endif
+#ifndef arch_is_isolated_msi
+#define arch_is_isolated_msi() false
+#endif
+
/**
* msi_msg - Representation of a MSI message
* @address_lo: Low 32 bits of msi message address
@@ -649,6 +653,19 @@ int platform_msi_device_domain_alloc(struct irq_domain *domain, unsigned int vir
void platform_msi_device_domain_free(struct irq_domain *domain, unsigned int virq,
unsigned int nvec);
void *platform_msi_get_host_data(struct irq_domain *domain);
+
+bool msi_device_has_isolated_msi(struct device *dev);
+#else /* CONFIG_GENERIC_MSI_IRQ */
+static inline bool msi_device_has_isolated_msi(struct device *dev)
+{
+ /*
+ * Arguably if the platform does not enable MSI support then it has
+ * "isolated MSI", as an interrupt controller that cannot receive MSIs
+ * is inherently isolated by our definition. The default definition for
+ * arch_is_isolated_msi() is conservative and returns false anyhow.
+ */
+ return arch_is_isolated_msi();
+}
#endif /* CONFIG_GENERIC_MSI_IRQ */
/* PCI specific interfaces */
diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h
index dcf90144d70b..f8d4be9c587a 100644
--- a/include/linux/mtd/rawnand.h
+++ b/include/linux/mtd/rawnand.h
@@ -67,6 +67,8 @@ struct gpio_desc;
/* Extended commands for large page devices */
#define NAND_CMD_READSTART 0x30
+#define NAND_CMD_READCACHESEQ 0x31
+#define NAND_CMD_READCACHEEND 0x3f
#define NAND_CMD_RNDOUTSTART 0xE0
#define NAND_CMD_CACHEDPROG 0x15
@@ -1094,10 +1096,20 @@ struct nand_controller_ops {
*
* @lock: lock used to serialize accesses to the NAND controller
* @ops: NAND controller operations.
+ * @supported_op: NAND controller known-to-be-supported operations,
+ * only writable by the core after initial checking.
+ * @supported_op.data_only_read: The controller supports reading more data from
+ * the bus without restarting an entire read operation nor
+ * changing the column.
+ * @supported_op.cont_read: The controller supports sequential cache reads.
*/
struct nand_controller {
struct mutex lock;
const struct nand_controller_ops *ops;
+ struct {
+ unsigned int data_only_read: 1;
+ unsigned int cont_read: 1;
+ } supported_op;
};
static inline void nand_controller_init(struct nand_controller *nfc)
@@ -1248,6 +1260,10 @@ struct nand_secure_region {
* @read_retries: The number of read retry modes supported
* @secure_regions: Structure containing the secure regions info
* @nr_secure_regions: Number of secure regions
+ * @cont_read: Sequential page read internals
+ * @cont_read.ongoing: Whether a continuous read is ongoing or not
+ * @cont_read.first_page: Start of the continuous read operation
+ * @cont_read.last_page: End of the continuous read operation
* @controller: The hardware controller structure which is shared among multiple
* independent devices
* @ecc: The ECC controller structure
@@ -1300,6 +1316,11 @@ struct nand_chip {
int read_retries;
struct nand_secure_region *secure_regions;
u8 nr_secure_regions;
+ struct {
+ bool ongoing;
+ unsigned int first_page;
+ unsigned int last_page;
+ } cont_read;
/* Externals */
struct nand_controller *controller;
diff --git a/include/linux/mtd/spinand.h b/include/linux/mtd/spinand.h
index 6d3392a7edc6..01be9f0f008a 100644
--- a/include/linux/mtd/spinand.h
+++ b/include/linux/mtd/spinand.h
@@ -260,6 +260,7 @@ struct spinand_manufacturer {
};
/* SPI NAND manufacturers */
+extern const struct spinand_manufacturer alliancememory_spinand_manufacturer;
extern const struct spinand_manufacturer ato_spinand_manufacturer;
extern const struct spinand_manufacturer gigadevice_spinand_manufacturer;
extern const struct spinand_manufacturer macronix_spinand_manufacturer;
diff --git a/include/linux/mtd/ubi.h b/include/linux/mtd/ubi.h
index 7d48ea368c5e..a529347fd75b 100644
--- a/include/linux/mtd/ubi.h
+++ b/include/linux/mtd/ubi.h
@@ -110,6 +110,7 @@ struct ubi_volume_info {
int name_len;
const char *name;
dev_t cdev;
+ struct device *dev;
};
/**
diff --git a/include/linux/namei.h b/include/linux/namei.h
index 0d4531fd46e7..0d797f3367ca 100644
--- a/include/linux/namei.h
+++ b/include/linux/namei.h
@@ -77,7 +77,7 @@ struct dentry *lookup_one_positive_unlocked(struct mnt_idmap *idmap,
struct dentry *base, int len);
extern int follow_down_one(struct path *);
-extern int follow_down(struct path *);
+extern int follow_down(struct path *path, unsigned int flags);
extern int follow_up(struct path *);
extern struct dentry *lock_rename(struct dentry *, struct dentry *);
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index e6e02184c25a..6a14b7b11766 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -47,6 +47,7 @@
#include <uapi/linux/netdevice.h>
#include <uapi/linux/if_bonding.h>
#include <uapi/linux/pkt_cls.h>
+#include <uapi/linux/netdev.h>
#include <linux/hashtable.h>
#include <linux/rbtree.h>
#include <net/net_trackers.h>
@@ -74,6 +75,7 @@ struct udp_tunnel_nic_info;
struct udp_tunnel_nic;
struct bpf_prog;
struct xdp_buff;
+struct xdp_md;
void synchronize_net(void);
void netdev_set_default_ethtool_ops(struct net_device *dev,
@@ -1035,14 +1037,14 @@ struct netdev_bpf {
#ifdef CONFIG_XFRM_OFFLOAD
struct xfrmdev_ops {
- int (*xdo_dev_state_add) (struct xfrm_state *x);
+ int (*xdo_dev_state_add) (struct xfrm_state *x, struct netlink_ext_ack *extack);
void (*xdo_dev_state_delete) (struct xfrm_state *x);
void (*xdo_dev_state_free) (struct xfrm_state *x);
bool (*xdo_dev_offload_ok) (struct sk_buff *skb,
struct xfrm_state *x);
void (*xdo_dev_state_advance_esn) (struct xfrm_state *x);
void (*xdo_dev_state_update_curlft) (struct xfrm_state *x);
- int (*xdo_dev_policy_add) (struct xfrm_policy *x);
+ int (*xdo_dev_policy_add) (struct xfrm_policy *x, struct netlink_ext_ack *extack);
void (*xdo_dev_policy_delete) (struct xfrm_policy *x);
void (*xdo_dev_policy_free) (struct xfrm_policy *x);
};
@@ -1618,6 +1620,11 @@ struct net_device_ops {
bool cycles);
};
+struct xdp_metadata_ops {
+ int (*xmo_rx_timestamp)(const struct xdp_md *ctx, u64 *timestamp);
+ int (*xmo_rx_hash)(const struct xdp_md *ctx, u32 *hash);
+};
+
/**
* enum netdev_priv_flags - &struct net_device priv_flags
*
@@ -1801,6 +1808,7 @@ enum netdev_ml_priv_type {
*
* @netdev_ops: Includes several pointers to callbacks,
* if one wants to override the ndo_*() functions
+ * @xdp_metadata_ops: Includes pointers to XDP metadata callbacks.
* @ethtool_ops: Management operations
* @l3mdev_ops: Layer 3 master device operations
* @ndisc_ops: Includes callbacks for different IPv6 neighbour
@@ -1811,6 +1819,7 @@ enum netdev_ml_priv_type {
* of Layer 2 headers.
*
* @flags: Interface flags (a la BSD)
+ * @xdp_features: XDP capability supported by the device
* @priv_flags: Like 'flags' but invisible to userspace,
* see if.h for the definitions
* @gflags: Global flags ( kept as legacy )
@@ -1957,6 +1966,8 @@ enum netdev_ml_priv_type {
* @gso_max_segs: Maximum number of segments that can be passed to the
* NIC for GSO
* @tso_max_segs: Device (as in HW) limit on the max TSO segment count
+ * @gso_ipv4_max_size: Maximum size of generic segmentation offload,
+ * for IPv4.
*
* @dcbnl_ops: Data Center Bridging netlink ops
* @num_tc: Number of traffic classes in the net device
@@ -1997,6 +2008,8 @@ enum netdev_ml_priv_type {
* keep a list of interfaces to be deleted.
* @gro_max_size: Maximum size of aggregated packet in generic
* receive offload (GRO)
+ * @gro_ipv4_max_size: Maximum size of aggregated packet in generic
+ * receive offload (GRO), for IPv4.
*
* @dev_addr_shadow: Copy of @dev_addr to catch direct writes.
* @linkwatch_dev_tracker: refcount tracker used by linkwatch.
@@ -2048,8 +2061,10 @@ struct net_device {
/* Read-mostly cache-line for fast-path access */
unsigned int flags;
+ xdp_features_t xdp_features;
unsigned long long priv_flags;
const struct net_device_ops *netdev_ops;
+ const struct xdp_metadata_ops *xdp_metadata_ops;
int ifindex;
unsigned short gflags;
unsigned short hard_header_len;
@@ -2199,6 +2214,7 @@ struct net_device {
*/
#define GRO_MAX_SIZE (8 * 65535u)
unsigned int gro_max_size;
+ unsigned int gro_ipv4_max_size;
rx_handler_func_t __rcu *rx_handler;
void __rcu *rx_handler_data;
@@ -2322,6 +2338,7 @@ struct net_device {
u16 gso_max_segs;
#define TSO_MAX_SEGS U16_MAX
u16 tso_max_segs;
+ unsigned int gso_ipv4_max_size;
#ifdef CONFIG_DCB
const struct dcbnl_rtnl_ops *dcbnl_ops;
@@ -2831,6 +2848,7 @@ enum netdev_cmd {
NETDEV_OFFLOAD_XSTATS_DISABLE,
NETDEV_OFFLOAD_XSTATS_REPORT_USED,
NETDEV_OFFLOAD_XSTATS_REPORT_DELTA,
+ NETDEV_XDP_FEAT_CHANGE,
};
const char *netdev_cmd_to_name(enum netdev_cmd cmd);
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index d8817d381c14..c8e03bcaecaa 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -437,11 +437,13 @@ nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
#include <linux/netfilter/nf_conntrack_zones_common.h>
void nf_ct_attach(struct sk_buff *, const struct sk_buff *);
+void nf_ct_set_closing(struct nf_conntrack *nfct);
struct nf_conntrack_tuple;
bool nf_ct_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple,
const struct sk_buff *skb);
#else
static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {}
+static inline void nf_ct_set_closing(struct nf_conntrack *nfct) {}
struct nf_conntrack_tuple;
static inline bool nf_ct_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple,
const struct sk_buff *skb)
@@ -459,6 +461,7 @@ struct nf_ct_hook {
bool (*get_tuple_skb)(struct nf_conntrack_tuple *,
const struct sk_buff *);
void (*attach)(struct sk_buff *nskb, const struct sk_buff *skb);
+ void (*set_closing)(struct nf_conntrack *nfct);
};
extern const struct nf_ct_hook __rcu *nf_ct_hook;
@@ -488,4 +491,9 @@ extern const struct nfnl_ct_hook __rcu *nfnl_ct_hook;
*/
DECLARE_PER_CPU(bool, nf_skb_duplicated);
+/**
+ * Contains bitmask of ctnetlink event subscribers, if any.
+ * Can't be pernet due to NETLINK_LISTEN_ALL_NSID setsockopt flag.
+ */
+extern u8 nf_ctnetlink_has_listener;
#endif /*__LINUX_NETFILTER_H*/
diff --git a/include/linux/netfs.h b/include/linux/netfs.h
index 4c76ddfb6a67..a1f3522daa69 100644
--- a/include/linux/netfs.h
+++ b/include/linux/netfs.h
@@ -17,6 +17,7 @@
#include <linux/workqueue.h>
#include <linux/fs.h>
#include <linux/pagemap.h>
+#include <linux/uio.h>
enum netfs_sreq_ref_trace;
@@ -296,6 +297,13 @@ void netfs_get_subrequest(struct netfs_io_subrequest *subreq,
void netfs_put_subrequest(struct netfs_io_subrequest *subreq,
bool was_async, enum netfs_sreq_ref_trace what);
void netfs_stats_show(struct seq_file *);
+ssize_t netfs_extract_user_iter(struct iov_iter *orig, size_t orig_len,
+ struct iov_iter *new,
+ iov_iter_extraction_t extraction_flags);
+struct sg_table;
+ssize_t netfs_extract_iter_to_sg(struct iov_iter *iter, size_t len,
+ struct sg_table *sgtable, unsigned int sg_max,
+ iov_iter_extraction_t extraction_flags);
/**
* netfs_inode - Get the netfs inode context from the inode
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index d81bde5a5844..c43ac7690eca 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -130,6 +130,16 @@ struct netlink_ext_ack {
#define NL_SET_ERR_MSG_FMT_MOD(extack, fmt, args...) \
NL_SET_ERR_MSG_FMT((extack), KBUILD_MODNAME ": " fmt, ##args)
+#define NL_SET_ERR_MSG_WEAK(extack, msg) do { \
+ if ((extack) && !(extack)->_msg) \
+ NL_SET_ERR_MSG((extack), msg); \
+} while (0)
+
+#define NL_SET_ERR_MSG_WEAK_MOD(extack, msg) do { \
+ if ((extack) && !(extack)->_msg) \
+ NL_SET_ERR_MSG_MOD((extack), msg); \
+} while (0)
+
#define NL_SET_BAD_ATTR_POLICY(extack, attr, pol) do { \
if ((extack)) { \
(extack)->bad_attr = (attr); \
@@ -263,6 +273,10 @@ struct netlink_callback {
};
};
+#define NL_ASSERT_DUMP_CTX_FITS(type_name) \
+ BUILD_BUG_ON(sizeof(type_name) > \
+ sizeof_field(struct netlink_callback, ctx))
+
struct netlink_notify {
struct net *net;
u32 portid;
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index d6c119e31d7a..bf89fe6fc3ba 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -569,8 +569,9 @@ extern void nfs_complete_unlink(struct dentry *dentry, struct inode *);
extern int nfs_congestion_kb;
extern int nfs_writepage(struct page *page, struct writeback_control *wbc);
extern int nfs_writepages(struct address_space *, struct writeback_control *);
-extern int nfs_flush_incompatible(struct file *file, struct page *page);
-extern int nfs_updatepage(struct file *, struct page *, unsigned int, unsigned int);
+extern int nfs_flush_incompatible(struct file *file, struct folio *folio);
+extern int nfs_update_folio(struct file *file, struct folio *folio,
+ unsigned int offset, unsigned int count);
/*
* Try to write back everything synchronously (but check the
@@ -578,7 +579,7 @@ extern int nfs_updatepage(struct file *, struct page *, unsigned int, unsigned
*/
extern int nfs_sync_inode(struct inode *inode);
extern int nfs_wb_all(struct inode *inode);
-extern int nfs_wb_page(struct inode *inode, struct page *page);
+extern int nfs_wb_folio(struct inode *inode, struct folio *folio);
int nfs_wb_folio_cancel(struct inode *inode, struct folio *folio);
extern int nfs_commit_inode(struct inode *, int);
extern struct nfs_commit_data *nfs_commitdata_alloc(void);
diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h
index ba7e2e4b0926..a2f1ca657623 100644
--- a/include/linux/nfs_page.h
+++ b/include/linux/nfs_page.h
@@ -25,6 +25,7 @@
enum {
PG_BUSY = 0, /* nfs_{un}lock_request */
PG_MAPPED, /* page private set for buffered io */
+ PG_FOLIO, /* Tracking a folio (unset for O_DIRECT) */
PG_CLEAN, /* write succeeded */
PG_COMMIT_TO_DS, /* used by pnfs layouts */
PG_INODE_REF, /* extra ref held by inode when in writeback */
@@ -41,7 +42,10 @@ enum {
struct nfs_inode;
struct nfs_page {
struct list_head wb_list; /* Defines state of page: */
- struct page *wb_page; /* page to read in/write out */
+ union {
+ struct page *wb_page; /* page to read in/write out */
+ struct folio *wb_folio;
+ };
struct nfs_lock_context *wb_lock_context; /* lock context info */
pgoff_t wb_index; /* Offset >> PAGE_SHIFT */
unsigned int wb_offset, /* Offset & ~PAGE_MASK */
@@ -117,10 +121,15 @@ struct nfs_pageio_descriptor {
#define NFS_WBACK_BUSY(req) (test_bit(PG_BUSY,&(req)->wb_flags))
-extern struct nfs_page *nfs_create_request(struct nfs_open_context *ctx,
- struct page *page,
- unsigned int offset,
- unsigned int count);
+extern struct nfs_page *nfs_page_create_from_page(struct nfs_open_context *ctx,
+ struct page *page,
+ unsigned int pgbase,
+ loff_t offset,
+ unsigned int count);
+extern struct nfs_page *nfs_page_create_from_folio(struct nfs_open_context *ctx,
+ struct folio *folio,
+ unsigned int offset,
+ unsigned int count);
extern void nfs_release_request(struct nfs_page *);
@@ -153,6 +162,66 @@ extern int nfs_page_set_headlock(struct nfs_page *req);
extern void nfs_page_clear_headlock(struct nfs_page *req);
extern bool nfs_async_iocounter_wait(struct rpc_task *, struct nfs_lock_context *);
+/**
+ * nfs_page_to_folio - Retrieve a struct folio for the request
+ * @req: pointer to a struct nfs_page
+ *
+ * If a folio was assigned to @req, then return it, otherwise return NULL.
+ */
+static inline struct folio *nfs_page_to_folio(const struct nfs_page *req)
+{
+ if (test_bit(PG_FOLIO, &req->wb_flags))
+ return req->wb_folio;
+ return NULL;
+}
+
+/**
+ * nfs_page_to_page - Retrieve a struct page for the request
+ * @req: pointer to a struct nfs_page
+ * @pgbase: folio byte offset
+ *
+ * Return the page containing the byte that is at offset @pgbase relative
+ * to the start of the folio.
+ * Note: The request starts at offset @req->wb_pgbase.
+ */
+static inline struct page *nfs_page_to_page(const struct nfs_page *req,
+ size_t pgbase)
+{
+ struct folio *folio = nfs_page_to_folio(req);
+
+ if (folio == NULL)
+ return req->wb_page;
+ return folio_page(folio, pgbase >> PAGE_SHIFT);
+}
+
+/**
+ * nfs_page_to_inode - Retrieve an inode for the request
+ * @req: pointer to a struct nfs_page
+ */
+static inline struct inode *nfs_page_to_inode(const struct nfs_page *req)
+{
+ struct folio *folio = nfs_page_to_folio(req);
+
+ if (folio == NULL)
+ return page_file_mapping(req->wb_page)->host;
+ return folio_file_mapping(folio)->host;
+}
+
+/**
+ * nfs_page_max_length - Retrieve the maximum possible length for a request
+ * @req: pointer to a struct nfs_page
+ *
+ * Returns the maximum possible length of a request
+ */
+static inline size_t nfs_page_max_length(const struct nfs_page *req)
+{
+ struct folio *folio = nfs_page_to_folio(req);
+
+ if (folio == NULL)
+ return PAGE_SIZE;
+ return folio_size(folio);
+}
+
/*
* Lock the page of an asynchronous request
*/
diff --git a/include/linux/nfs_ssc.h b/include/linux/nfs_ssc.h
index 75843c00f326..22265b1ff080 100644
--- a/include/linux/nfs_ssc.h
+++ b/include/linux/nfs_ssc.h
@@ -53,6 +53,7 @@ static inline void nfs42_ssc_close(struct file *filep)
if (nfs_ssc_client_tbl.ssc_nfs4_ops)
(*nfs_ssc_client_tbl.ssc_nfs4_ops->sco_close)(filep);
}
+#endif
struct nfsd4_ssc_umount_item {
struct list_head nsui_list;
@@ -66,7 +67,6 @@ struct nfsd4_ssc_umount_item {
struct vfsmount *nsui_vfsmount;
char nsui_ipaddr[RPC_MAX_ADDRBUFLEN + 1];
};
-#endif
/*
* NFS_FS
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index f700ff2df074..048c0b9aa623 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -214,4 +214,12 @@ int proc_watchdog_cpumask(struct ctl_table *, int, void *, size_t *, loff_t *);
#include <asm/nmi.h>
#endif
+#ifdef CONFIG_NMI_CHECK_CPU
+void nmi_backtrace_stall_snap(const struct cpumask *btp);
+void nmi_backtrace_stall_check(const struct cpumask *btp);
+#else
+static inline void nmi_backtrace_stall_snap(const struct cpumask *btp) {}
+static inline void nmi_backtrace_stall_check(const struct cpumask *btp) {}
+#endif
+
#endif
diff --git a/include/linux/nvmem-consumer.h b/include/linux/nvmem-consumer.h
index 980f9c9ac0bc..1f62f7ba71ca 100644
--- a/include/linux/nvmem-consumer.h
+++ b/include/linux/nvmem-consumer.h
@@ -18,15 +18,7 @@ struct device_node;
/* consumer cookie */
struct nvmem_cell;
struct nvmem_device;
-
-struct nvmem_cell_info {
- const char *name;
- unsigned int offset;
- unsigned int bytes;
- unsigned int bit_offset;
- unsigned int nbits;
- struct device_node *np;
-};
+struct nvmem_cell_info;
/**
* struct nvmem_cell_lookup - cell lookup entry
diff --git a/include/linux/nvmem-provider.h b/include/linux/nvmem-provider.h
index bb15c9234e21..0262b86194eb 100644
--- a/include/linux/nvmem-provider.h
+++ b/include/linux/nvmem-provider.h
@@ -14,14 +14,13 @@
#include <linux/gpio/consumer.h>
struct nvmem_device;
-struct nvmem_cell_info;
typedef int (*nvmem_reg_read_t)(void *priv, unsigned int offset,
void *val, size_t bytes);
typedef int (*nvmem_reg_write_t)(void *priv, unsigned int offset,
void *val, size_t bytes);
/* used for vendor specific post processing of cell data */
-typedef int (*nvmem_cell_post_process_t)(void *priv, const char *id, unsigned int offset,
- void *buf, size_t bytes);
+typedef int (*nvmem_cell_post_process_t)(void *priv, const char *id, int index,
+ unsigned int offset, void *buf, size_t bytes);
enum nvmem_type {
NVMEM_TYPE_UNKNOWN = 0,
@@ -48,6 +47,24 @@ struct nvmem_keepout {
};
/**
+ * struct nvmem_cell_info - NVMEM cell description
+ * @name: Name.
+ * @offset: Offset within the NVMEM device.
+ * @bytes: Length of the cell.
+ * @bit_offset: Bit offset if cell is smaller than a byte.
+ * @nbits: Number of bits.
+ * @np: Optional device_node pointer.
+ */
+struct nvmem_cell_info {
+ const char *name;
+ unsigned int offset;
+ unsigned int bytes;
+ unsigned int bit_offset;
+ unsigned int nbits;
+ struct device_node *np;
+};
+
+/**
* struct nvmem_config - NVMEM device configuration
*
* @dev: Parent device.
@@ -136,6 +153,9 @@ struct nvmem_device *devm_nvmem_register(struct device *dev,
void nvmem_add_cell_table(struct nvmem_cell_table *table);
void nvmem_del_cell_table(struct nvmem_cell_table *table);
+int nvmem_add_one_cell(struct nvmem_device *nvmem,
+ const struct nvmem_cell_info *info);
+
#else
static inline struct nvmem_device *nvmem_register(const struct nvmem_config *c)
@@ -153,6 +173,11 @@ devm_nvmem_register(struct device *dev, const struct nvmem_config *c)
static inline void nvmem_add_cell_table(struct nvmem_cell_table *table) {}
static inline void nvmem_del_cell_table(struct nvmem_cell_table *table) {}
+static inline int nvmem_add_one_cell(struct nvmem_device *nvmem,
+ const struct nvmem_cell_info *info)
+{
+ return -EOPNOTSUPP;
+}
#endif /* CONFIG_NVMEM */
#endif /* ifndef _LINUX_NVMEM_PROVIDER_H */
diff --git a/include/linux/of.h b/include/linux/of.h
index 8b9f94386dc3..0af611307db2 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -100,8 +100,19 @@ struct of_reconfig_data {
struct property *old_prop;
};
+/**
+ * of_node_init - initialize a devicetree node
+ * @node: Pointer to device node that has been created by kzalloc()
+ * @phandle_name: Name of property holding a phandle value
+ *
+ * On return the device_node refcount is set to one. Use of_node_put()
+ * on @node when done to free the memory allocated for it. If the node
+ * is NOT a dynamic node the memory will not be freed. The decision of
+ * whether to free the memory will be done by node->release(), which is
+ * of_node_release().
+ */
/* initialize a node */
-extern struct kobj_type of_node_ktype;
+extern const struct kobj_type of_node_ktype;
extern const struct fwnode_operations of_fwnode_ops;
static inline void of_node_init(struct device_node *node)
{
@@ -1009,6 +1020,31 @@ static inline int of_parse_phandle_with_fixed_args(const struct device_node *np,
}
/**
+ * of_parse_phandle_with_optional_args() - Find a node pointed by phandle in a list
+ * @np: pointer to a device tree node containing a list
+ * @list_name: property name that contains a list
+ * @cells_name: property name that specifies phandles' arguments count
+ * @index: index of a phandle to parse out
+ * @out_args: optional pointer to output arguments structure (will be filled)
+ *
+ * Same as of_parse_phandle_with_args() except that if the cells_name property
+ * is not found, cell_count of 0 is assumed.
+ *
+ * This is used to useful, if you have a phandle which didn't have arguments
+ * before and thus doesn't have a '#*-cells' property but is now migrated to
+ * having arguments while retaining backwards compatibility.
+ */
+static inline int of_parse_phandle_with_optional_args(const struct device_node *np,
+ const char *list_name,
+ const char *cells_name,
+ int index,
+ struct of_phandle_args *out_args)
+{
+ return __of_parse_phandle_with_args(np, list_name, cells_name,
+ 0, index, out_args);
+}
+
+/**
* of_property_count_u8_elems - Count the number of u8 elements in a property
*
* @np: device node from which the property value is to be read.
@@ -1155,7 +1191,8 @@ static inline int of_property_read_string_index(const struct device_node *np,
* @np: device node from which the property value is to be read.
* @propname: name of the property to be searched.
*
- * Search for a property in a device node.
+ * Search for a boolean property in a device node. Usage on non-boolean
+ * property types is deprecated.
*
* Return: true if the property exists false otherwise.
*/
@@ -1168,6 +1205,20 @@ static inline bool of_property_read_bool(const struct device_node *np,
}
/**
+ * of_property_present - Test if a property is present in a node
+ * @np: device node to search for the property.
+ * @propname: name of the property to be searched.
+ *
+ * Test for a property present in a device node.
+ *
+ * Return: true if the property exists false otherwise.
+ */
+static inline bool of_property_present(const struct device_node *np, const char *propname)
+{
+ return of_property_read_bool(np, propname);
+}
+
+/**
* of_property_read_u8_array - Find and read an array of u8 from a property.
*
* @np: device node from which the property value is to be read.
diff --git a/include/linux/of_address.h b/include/linux/of_address.h
index 265f26eeaf6b..376671594746 100644
--- a/include/linux/of_address.h
+++ b/include/linux/of_address.h
@@ -38,6 +38,8 @@ struct of_pci_range {
/* Translate a DMA address from device space to CPU space */
extern u64 of_translate_dma_address(struct device_node *dev,
const __be32 *in_addr);
+extern const __be32 *of_translate_dma_region(struct device_node *dev, const __be32 *addr,
+ phys_addr_t *start, size_t *length);
#ifdef CONFIG_OF_ADDRESS
extern u64 of_translate_address(struct device_node *np, const __be32 *addr);
diff --git a/include/linux/of_device.h b/include/linux/of_device.h
index ab7d557d541d..f4b57614979d 100644
--- a/include/linux/of_device.h
+++ b/include/linux/of_device.h
@@ -36,7 +36,7 @@ extern ssize_t of_device_modalias(struct device *dev, char *str, ssize_t len);
extern int of_device_request_module(struct device *dev);
extern void of_device_uevent(const struct device *dev, struct kobj_uevent_env *env);
-extern int of_device_uevent_modalias(struct device *dev, struct kobj_uevent_env *env);
+extern int of_device_uevent_modalias(const struct device *dev, struct kobj_uevent_env *env);
static inline struct device_node *of_cpu_device_node_get(int cpu)
{
@@ -83,7 +83,7 @@ static inline int of_device_request_module(struct device *dev)
return -ENODEV;
}
-static inline int of_device_uevent_modalias(struct device *dev,
+static inline int of_device_uevent_modalias(const struct device *dev,
struct kobj_uevent_env *env)
{
return -ENODEV;
diff --git a/include/linux/of_gpio.h b/include/linux/of_gpio.h
index 6db627257a7b..5d58b3b0a97e 100644
--- a/include/linux/of_gpio.h
+++ b/include/linux/of_gpio.h
@@ -17,21 +17,6 @@
struct device_node;
-/*
- * This is Linux-specific flags. By default controllers' and Linux' mapping
- * match, but GPIO controllers are free to translate their own flags to
- * Linux-specific in their .xlate callback. Though, 1:1 mapping is recommended.
- */
-enum of_gpio_flags {
- OF_GPIO_ACTIVE_LOW = 0x1,
- OF_GPIO_SINGLE_ENDED = 0x2,
- OF_GPIO_OPEN_DRAIN = 0x4,
- OF_GPIO_TRANSITORY = 0x8,
- OF_GPIO_PULL_UP = 0x10,
- OF_GPIO_PULL_DOWN = 0x20,
- OF_GPIO_PULL_DISABLE = 0x40,
-};
-
#ifdef CONFIG_OF_GPIO
#include <linux/container_of.h>
@@ -50,17 +35,12 @@ static inline struct of_mm_gpio_chip *to_of_mm_gpio_chip(struct gpio_chip *gc)
return container_of(gc, struct of_mm_gpio_chip, gc);
}
-extern int of_get_named_gpio_flags(const struct device_node *np,
- const char *list_name, int index, enum of_gpio_flags *flags);
+extern int of_get_named_gpio(const struct device_node *np,
+ const char *list_name, int index);
extern int of_mm_gpiochip_add_data(struct device_node *np,
struct of_mm_gpio_chip *mm_gc,
void *data);
-static inline int of_mm_gpiochip_add(struct device_node *np,
- struct of_mm_gpio_chip *mm_gc)
-{
- return of_mm_gpiochip_add_data(np, mm_gc, NULL);
-}
extern void of_mm_gpiochip_remove(struct of_mm_gpio_chip *mm_gc);
#else /* CONFIG_OF_GPIO */
@@ -68,86 +48,12 @@ extern void of_mm_gpiochip_remove(struct of_mm_gpio_chip *mm_gc);
#include <linux/errno.h>
/* Drivers may not strictly depend on the GPIO support, so let them link. */
-static inline int of_get_named_gpio_flags(const struct device_node *np,
- const char *list_name, int index, enum of_gpio_flags *flags)
-{
- if (flags)
- *flags = 0;
-
- return -ENOSYS;
-}
-
-#endif /* CONFIG_OF_GPIO */
-
-/**
- * of_gpio_named_count() - Count GPIOs for a device
- * @np: device node to count GPIOs for
- * @propname: property name containing gpio specifier(s)
- *
- * The function returns the count of GPIOs specified for a node.
- * Note that the empty GPIO specifiers count too. Returns either
- * Number of gpios defined in property,
- * -EINVAL for an incorrectly formed gpios property, or
- * -ENOENT for a missing gpios property
- *
- * Example:
- * gpios = <0
- * &gpio1 1 2
- * 0
- * &gpio2 3 4>;
- *
- * The above example defines four GPIOs, two of which are not specified.
- * This function will return '4'
- */
-static inline int of_gpio_named_count(const struct device_node *np,
- const char *propname)
-{
- return of_count_phandle_with_args(np, propname, "#gpio-cells");
-}
-
-/**
- * of_gpio_count() - Count GPIOs for a device
- * @np: device node to count GPIOs for
- *
- * Same as of_gpio_named_count, but hard coded to use the 'gpios' property
- */
-static inline int of_gpio_count(const struct device_node *np)
-{
- return of_gpio_named_count(np, "gpios");
-}
-
-static inline int of_get_gpio_flags(const struct device_node *np, int index,
- enum of_gpio_flags *flags)
-{
- return of_get_named_gpio_flags(np, "gpios", index, flags);
-}
-
-/**
- * of_get_named_gpio() - Get a GPIO number to use with GPIO API
- * @np: device node to get GPIO from
- * @propname: Name of property containing gpio specifier(s)
- * @index: index of the GPIO
- *
- * Returns GPIO number to use with Linux generic GPIO API, or one of the errno
- * value on the error condition.
- */
static inline int of_get_named_gpio(const struct device_node *np,
const char *propname, int index)
{
- return of_get_named_gpio_flags(np, propname, index, NULL);
+ return -ENOSYS;
}
-/**
- * of_get_gpio() - Get a GPIO number to use with GPIO API
- * @np: device node to get GPIO from
- * @index: index of the GPIO
- *
- * Returns GPIO number to use with Linux generic GPIO API, or one of the errno
- * value on the error condition.
- */
-static inline int of_get_gpio(const struct device_node *np, int index)
-{
- return of_get_gpio_flags(np, index, NULL);
-}
+#endif /* CONFIG_OF_GPIO */
#endif /* __LINUX_OF_GPIO_H */
diff --git a/include/linux/of_iommu.h b/include/linux/of_iommu.h
index 55c1eb300a86..9a5e6b410dd2 100644
--- a/include/linux/of_iommu.h
+++ b/include/linux/of_iommu.h
@@ -12,6 +12,9 @@ extern const struct iommu_ops *of_iommu_configure(struct device *dev,
struct device_node *master_np,
const u32 *id);
+extern void of_iommu_get_resv_regions(struct device *dev,
+ struct list_head *list);
+
#else
static inline const struct iommu_ops *of_iommu_configure(struct device *dev,
@@ -21,6 +24,11 @@ static inline const struct iommu_ops *of_iommu_configure(struct device *dev,
return NULL;
}
+static inline void of_iommu_get_resv_regions(struct device *dev,
+ struct list_head *list)
+{
+}
+
#endif /* CONFIG_OF_IOMMU */
#endif /* __OF_IOMMU_H */
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 69e93a0c1277..a7e3a3405520 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -531,6 +531,7 @@ PAGEFLAG(Readahead, readahead, PF_NO_COMPOUND)
* available at this point.
*/
#define PageHighMem(__p) is_highmem_idx(page_zonenum(__p))
+#define folio_test_highmem(__f) is_highmem_idx(folio_zonenum(__f))
#else
PAGEFLAG_FALSE(HighMem, highmem)
#endif
diff --git a/include/linux/page_ext.h b/include/linux/page_ext.h
index 22be4582faae..bc2e39090a1f 100644
--- a/include/linux/page_ext.h
+++ b/include/linux/page_ext.h
@@ -7,15 +7,35 @@
#include <linux/stackdepot.h>
struct pglist_data;
+
+/**
+ * struct page_ext_operations - per page_ext client operations
+ * @offset: Offset to the client's data within page_ext. Offset is returned to
+ * the client by page_ext_init.
+ * @size: The size of the client data within page_ext.
+ * @need: Function that returns true if client requires page_ext.
+ * @init: (optional) Called to initialize client once page_exts are allocated.
+ * @need_shared_flags: True when client is using shared page_ext->flags
+ * field.
+ *
+ * Each Page Extension client must define page_ext_operations in
+ * page_ext_ops array.
+ */
struct page_ext_operations {
size_t offset;
size_t size;
bool (*need)(void);
void (*init)(void);
+ bool need_shared_flags;
};
+extern bool deferred_struct_pages;
+
#ifdef CONFIG_PAGE_EXTENSION
+/*
+ * The page_ext_flags users must set need_shared_flags to true.
+ */
enum page_ext_flags {
PAGE_EXT_OWNER,
PAGE_EXT_OWNER_ALLOCATED,
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 29e1f9e76eb6..0acb8e1fb7af 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -547,6 +547,26 @@ static inline struct folio *filemap_lock_folio(struct address_space *mapping,
}
/**
+ * filemap_grab_folio - grab a folio from the page cache
+ * @mapping: The address space to search
+ * @index: The page index
+ *
+ * Looks up the page cache entry at @mapping & @index. If no folio is found,
+ * a new folio is created. The folio is locked, marked as accessed, and
+ * returned.
+ *
+ * Return: A found or created folio. NULL if no folio is found and failed to
+ * create a folio.
+ */
+static inline struct folio *filemap_grab_folio(struct address_space *mapping,
+ pgoff_t index)
+{
+ return __filemap_get_folio(mapping, index,
+ FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
+ mapping_gfp_mask(mapping));
+}
+
+/**
* find_get_page - find and get a page reference
* @mapping: the address_space to search
* @offset: the page index
@@ -719,16 +739,8 @@ unsigned filemap_get_folios(struct address_space *mapping, pgoff_t *start,
pgoff_t end, struct folio_batch *fbatch);
unsigned filemap_get_folios_contig(struct address_space *mapping,
pgoff_t *start, pgoff_t end, struct folio_batch *fbatch);
-unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,
- pgoff_t end, xa_mark_t tag, unsigned int nr_pages,
- struct page **pages);
-static inline unsigned find_get_pages_tag(struct address_space *mapping,
- pgoff_t *index, xa_mark_t tag, unsigned int nr_pages,
- struct page **pages)
-{
- return find_get_pages_range_tag(mapping, index, (pgoff_t)-1, tag,
- nr_pages, pages);
-}
+unsigned filemap_get_folios_tag(struct address_space *mapping, pgoff_t *start,
+ pgoff_t end, xa_mark_t tag, struct folio_batch *fbatch);
struct page *grab_cache_page_write_begin(struct address_space *mapping,
pgoff_t index);
@@ -744,6 +756,8 @@ static inline struct page *grab_cache_page(struct address_space *mapping,
struct folio *read_cache_folio(struct address_space *, pgoff_t index,
filler_t *filler, struct file *file);
+struct folio *mapping_read_folio_gfp(struct address_space *, pgoff_t index,
+ gfp_t flags);
struct page *read_cache_page(struct address_space *, pgoff_t index,
filler_t *filler, struct file *file);
extern struct page * read_cache_page_gfp(struct address_space *mapping,
@@ -978,16 +992,6 @@ static inline int folio_lock_killable(struct folio *folio)
}
/*
- * lock_page_killable is like lock_page but can be interrupted by fatal
- * signals. It returns 0 if it locked the page and -EINTR if it was
- * killed while waiting.
- */
-static inline int lock_page_killable(struct page *page)
-{
- return folio_lock_killable(page_folio(page));
-}
-
-/*
* folio_lock_or_retry - Lock the folio, unless this would block and the
* caller indicated that it can handle a retry.
*
diff --git a/include/linux/pagevec.h b/include/linux/pagevec.h
index 215eb6c3bdc9..f582f7213ea5 100644
--- a/include/linux/pagevec.h
+++ b/include/linux/pagevec.h
@@ -26,14 +26,6 @@ struct pagevec {
};
void __pagevec_release(struct pagevec *pvec);
-unsigned pagevec_lookup_range_tag(struct pagevec *pvec,
- struct address_space *mapping, pgoff_t *index, pgoff_t end,
- xa_mark_t tag);
-static inline unsigned pagevec_lookup_tag(struct pagevec *pvec,
- struct address_space *mapping, pgoff_t *index, xa_mark_t tag)
-{
- return pagevec_lookup_range_tag(pvec, mapping, index, (pgoff_t)-1, tag);
-}
static inline void pagevec_init(struct pagevec *pvec)
{
@@ -103,6 +95,11 @@ static inline void folio_batch_init(struct folio_batch *fbatch)
fbatch->percpu_pvec_drained = false;
}
+static inline void folio_batch_reinit(struct folio_batch *fbatch)
+{
+ fbatch->nr = 0;
+}
+
static inline unsigned int folio_batch_count(struct folio_batch *fbatch)
{
return fbatch->nr;
diff --git a/include/linux/pagewalk.h b/include/linux/pagewalk.h
index 959f52e5867d..27a6df448ee5 100644
--- a/include/linux/pagewalk.h
+++ b/include/linux/pagewalk.h
@@ -21,7 +21,16 @@ struct mm_walk;
* depth is -1 if not known, 0:PGD, 1:P4D, 2:PUD, 3:PMD.
* Any folded depths (where PTRS_PER_P?D is equal to 1)
* are skipped.
- * @hugetlb_entry: if set, called for each hugetlb entry
+ * @hugetlb_entry: if set, called for each hugetlb entry. This hook
+ * function is called with the vma lock held, in order to
+ * protect against a concurrent freeing of the pte_t* or
+ * the ptl. In some cases, the hook function needs to drop
+ * and retake the vma lock in order to avoid deadlocks
+ * while calling other functions. In such cases the hook
+ * function must either refrain from accessing the pte or
+ * ptl after dropping the vma lock, or else revalidate
+ * those items after re-acquiring the vma lock and before
+ * accessing them.
* @test_walk: caller specific callback function to determine whether
* we walk over the current vma or not. Returning 0 means
* "do page table walk over the current vma", returning
diff --git a/include/linux/parport_pc.h b/include/linux/parport_pc.h
index 3d6fc576d6a1..f1ec5c10c3b3 100644
--- a/include/linux/parport_pc.h
+++ b/include/linux/parport_pc.h
@@ -26,6 +26,9 @@ struct parport_pc_private {
/* Whether or not there's an ECR. */
int ecr;
+ /* Bitmask of writable ECR bits. */
+ unsigned char ecr_writable;
+
/* Number of PWords that FIFO will hold. */
int fifo_depth;
diff --git a/include/linux/pata_parport.h b/include/linux/pata_parport.h
new file mode 100644
index 000000000000..58781846f282
--- /dev/null
+++ b/include/linux/pata_parport.h
@@ -0,0 +1,111 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * pata_parport.h (c) 1997-8 Grant R. Guenther <grant@torque.net>
+ * Under the terms of the GPL.
+ *
+ * This file defines the interface for parallel port IDE adapter chip drivers.
+ */
+
+#ifndef LINUX_PATA_PARPORT_H
+#define LINUX_PATA_PARPORT_H
+
+#include <linux/libata.h>
+
+#define PI_PCD 1 /* dummy for paride protocol modules */
+
+struct pi_adapter {
+ struct device dev;
+ struct pi_protocol *proto; /* adapter protocol */
+ int port; /* base address of parallel port */
+ int mode; /* transfer mode in use */
+ int delay; /* adapter delay setting */
+ int devtype; /* dummy for paride protocol modules */
+ char *device; /* dummy for paride protocol modules */
+ int unit; /* unit number for chained adapters */
+ int saved_r0; /* saved port state */
+ int saved_r2; /* saved port state */
+ unsigned long private; /* for protocol module */
+ struct pardevice *pardev; /* pointer to pardevice */
+};
+
+typedef struct pi_adapter PIA; /* for paride protocol modules */
+
+/* registers are addressed as (cont,regr)
+ * cont: 0 for command register file, 1 for control register(s)
+ * regr: 0-7 for register number.
+ */
+
+/* macros and functions exported to the protocol modules */
+#define delay_p (pi->delay ? udelay(pi->delay) : (void)0)
+#define out_p(offs, byte) do { outb(byte, pi->port + offs); delay_p; } while (0)
+#define in_p(offs) (delay_p, inb(pi->port + offs))
+
+#define w0(byte) out_p(0, byte)
+#define r0() in_p(0)
+#define w1(byte) out_p(1, byte)
+#define r1() in_p(1)
+#define w2(byte) out_p(2, byte)
+#define r2() in_p(2)
+#define w3(byte) out_p(3, byte)
+#define w4(byte) out_p(4, byte)
+#define r4() in_p(4)
+#define w4w(data) do { outw(data, pi->port + 4); delay_p; } while (0)
+#define w4l(data) do { outl(data, pi->port + 4); delay_p; } while (0)
+#define r4w() (delay_p, inw(pi->port + 4))
+#define r4l() (delay_p, inl(pi->port + 4))
+
+static inline u16 pi_swab16(char *b, int k)
+{
+ union { u16 u; char t[2]; } r;
+
+ r.t[0] = b[2 * k + 1]; r.t[1] = b[2 * k];
+ return r.u;
+}
+
+static inline u32 pi_swab32(char *b, int k)
+{
+ union { u32 u; char f[4]; } r;
+
+ r.f[0] = b[4 * k + 1]; r.f[1] = b[4 * k];
+ r.f[2] = b[4 * k + 3]; r.f[3] = b[4 * k + 2];
+ return r.u;
+}
+
+struct pi_protocol {
+ char name[8];
+
+ int max_mode;
+ int epp_first; /* modes >= this use 8 ports */
+
+ int default_delay;
+ int max_units; /* max chained units probed for */
+
+ void (*write_regr)(struct pi_adapter *pi, int cont, int regr, int val);
+ int (*read_regr)(struct pi_adapter *pi, int cont, int regr);
+ void (*write_block)(struct pi_adapter *pi, char *buf, int count);
+ void (*read_block)(struct pi_adapter *pi, char *buf, int count);
+
+ void (*connect)(struct pi_adapter *pi);
+ void (*disconnect)(struct pi_adapter *pi);
+
+ int (*test_port)(struct pi_adapter *pi);
+ int (*probe_unit)(struct pi_adapter *pi);
+ int (*test_proto)(struct pi_adapter *pi, char *scratch, int verbose);
+ void (*log_adapter)(struct pi_adapter *pi, char *scratch, int verbose);
+
+ int (*init_proto)(struct pi_adapter *pi);
+ void (*release_proto)(struct pi_adapter *pi);
+ struct module *owner;
+ struct device_driver driver;
+ struct scsi_host_template sht;
+};
+
+#define PATA_PARPORT_SHT ATA_PIO_SHT
+
+int pata_parport_register_driver(struct pi_protocol *pr);
+void pata_parport_unregister_driver(struct pi_protocol *pr);
+/* defines for old paride protocol modules */
+#define paride_register pata_parport_register_driver
+#define paride_unregister pata_parport_unregister_driver
+
+#endif /* LINUX_PATA_PARPORT_H */
diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h
index a48778e1a4ee..301bb0e53707 100644
--- a/include/linux/pci-epc.h
+++ b/include/linux/pci-epc.h
@@ -122,6 +122,7 @@ struct pci_epc_mem {
* struct pci_epc - represents the PCI EPC device
* @dev: PCI EPC device
* @pci_epf: list of endpoint functions present in this EPC device
+ * list_lock: Mutex for protecting pci_epf list
* @ops: function pointers for performing endpoint operations
* @windows: array of address space of the endpoint controller
* @mem: first window of the endpoint controller, which corresponds to
@@ -134,11 +135,11 @@ struct pci_epc_mem {
* @group: configfs group representing the PCI EPC device
* @lock: mutex to protect pci_epc ops
* @function_num_map: bitmap to manage physical function number
- * @notifier: used to notify EPF of any EPC events (like linkup)
*/
struct pci_epc {
struct device dev;
struct list_head pci_epf;
+ struct mutex list_lock;
const struct pci_epc_ops *ops;
struct pci_epc_mem **windows;
struct pci_epc_mem *mem;
@@ -149,7 +150,6 @@ struct pci_epc {
/* mutex to protect against concurrent access of EP controller */
struct mutex lock;
unsigned long function_num_map;
- struct atomic_notifier_head notifier;
};
/**
@@ -192,12 +192,6 @@ static inline void *epc_get_drvdata(struct pci_epc *epc)
return dev_get_drvdata(&epc->dev);
}
-static inline int
-pci_epc_register_notifier(struct pci_epc *epc, struct notifier_block *nb)
-{
- return atomic_notifier_chain_register(&epc->notifier, nb);
-}
-
struct pci_epc *
__devm_pci_epc_create(struct device *dev, const struct pci_epc_ops *ops,
struct module *owner);
diff --git a/include/linux/pci-epf.h b/include/linux/pci-epf.h
index 009a07147c61..a215dc8ce693 100644
--- a/include/linux/pci-epf.h
+++ b/include/linux/pci-epf.h
@@ -17,11 +17,6 @@
struct pci_epf;
enum pci_epc_interface_type;
-enum pci_notify_event {
- CORE_INIT,
- LINK_UP,
-};
-
enum pci_barno {
NO_BAR = -1,
BAR_0,
@@ -73,6 +68,16 @@ struct pci_epf_ops {
};
/**
+ * struct pci_epf_event_ops - Callbacks for capturing the EPC events
+ * @core_init: Callback for the EPC initialization complete event
+ * @link_up: Callback for the EPC link up event
+ */
+struct pci_epc_event_ops {
+ int (*core_init)(struct pci_epf *epf);
+ int (*link_up)(struct pci_epf *epf);
+};
+
+/**
* struct pci_epf_driver - represents the PCI EPF driver
* @probe: ops to perform when a new EPF device has been bound to the EPF driver
* @remove: ops to perform when the binding between the EPF device and EPF
@@ -127,7 +132,6 @@ struct pci_epf_bar {
* @epf_pf: the physical EPF device to which this virtual EPF device is bound
* @driver: the EPF driver to which this EPF device is bound
* @list: to add pci_epf as a list of PCI endpoint functions to pci_epc
- * @nb: notifier block to notify EPF of any EPC events (like linkup)
* @lock: mutex to protect pci_epf_ops
* @sec_epc: the secondary EPC device to which this EPF device is bound
* @sec_epc_list: to add pci_epf as list of PCI endpoint functions to secondary
@@ -139,6 +143,7 @@ struct pci_epf_bar {
* @is_vf: true - virtual function, false - physical function
* @vfunction_num_map: bitmap to manage virtual function number
* @pci_vepf: list of virtual endpoint functions associated with this function
+ * @event_ops: Callbacks for capturing the EPC events
*/
struct pci_epf {
struct device dev;
@@ -154,7 +159,6 @@ struct pci_epf {
struct pci_epf *epf_pf;
struct pci_epf_driver *driver;
struct list_head list;
- struct notifier_block nb;
/* mutex to protect against concurrent access of pci_epf_ops */
struct mutex lock;
@@ -168,6 +172,7 @@ struct pci_epf {
unsigned int is_vf;
unsigned long vfunction_num_map;
struct list_head pci_vepf;
+ const struct pci_epc_event_ops *event_ops;
};
/**
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 254c8a4126a8..fafd8020c6d7 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -572,12 +572,14 @@ struct pci_host_bridge {
void *release_data;
unsigned int ignore_reset_delay:1; /* For entire hierarchy */
unsigned int no_ext_tags:1; /* No Extended Tags */
+ unsigned int no_inc_mrrs:1; /* No Increase MRRS */
unsigned int native_aer:1; /* OS may use PCIe AER */
unsigned int native_pcie_hotplug:1; /* OS may use PCIe hotplug */
unsigned int native_shpc_hotplug:1; /* OS may use SHPC hotplug */
unsigned int native_pme:1; /* OS may use PCIe PME */
unsigned int native_ltr:1; /* OS may use PCIe LTR */
unsigned int native_dpc:1; /* OS may use PCIe DPC */
+ unsigned int native_cxl_error:1; /* OS may use CXL RAS/Events */
unsigned int preserve_config:1; /* Preserve FW resource setup */
unsigned int size_windows:1; /* Enable root bus sizing */
unsigned int msi_domain:1; /* Bridge wants MSI domain */
@@ -1697,10 +1699,15 @@ extern bool pcie_ports_native;
#define PCIE_LINK_STATE_L1_2 BIT(4)
#define PCIE_LINK_STATE_L1_1_PCIPM BIT(5)
#define PCIE_LINK_STATE_L1_2_PCIPM BIT(6)
+#define PCIE_LINK_STATE_ALL (PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |\
+ PCIE_LINK_STATE_CLKPM | PCIE_LINK_STATE_L1_1 |\
+ PCIE_LINK_STATE_L1_2 | PCIE_LINK_STATE_L1_1_PCIPM |\
+ PCIE_LINK_STATE_L1_2_PCIPM)
#ifdef CONFIG_PCIEASPM
int pci_disable_link_state(struct pci_dev *pdev, int state);
int pci_disable_link_state_locked(struct pci_dev *pdev, int state);
+int pci_enable_link_state(struct pci_dev *pdev, int state);
void pcie_no_aspm(void);
bool pcie_aspm_support_enabled(void);
bool pcie_aspm_enabled(struct pci_dev *pdev);
@@ -1709,6 +1716,8 @@ static inline int pci_disable_link_state(struct pci_dev *pdev, int state)
{ return 0; }
static inline int pci_disable_link_state_locked(struct pci_dev *pdev, int state)
{ return 0; }
+static inline int pci_enable_link_state(struct pci_dev *pdev, int state)
+{ return 0; }
static inline void pcie_no_aspm(void) { }
static inline bool pcie_aspm_support_enabled(void) { return false; }
static inline bool pcie_aspm_enabled(struct pci_dev *pdev) { return false; }
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index b362d90eb9b0..45c3d62e616d 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -3012,6 +3012,8 @@
#define PCI_DEVICE_ID_INTEL_VMD_9A0B 0x9a0b
#define PCI_DEVICE_ID_INTEL_S21152BB 0xb152
+#define PCI_VENDOR_ID_WANGXUN 0x8088
+
#define PCI_VENDOR_ID_SCALEMP 0x8686
#define PCI_DEVICE_ID_SCALEMP_VSMP_CTL 0x1010
@@ -3092,6 +3094,8 @@
#define PCI_VENDOR_ID_3COM_2 0xa727
+#define PCI_VENDOR_ID_SOLIDRUN 0xd063
+
#define PCI_VENDOR_ID_DIGIUM 0xd161
#define PCI_DEVICE_ID_DIGIUM_HFC4S 0xb410
diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h
index a3aae8d57a42..521a733e21a9 100644
--- a/include/linux/percpu_counter.h
+++ b/include/linux/percpu_counter.h
@@ -152,9 +152,11 @@ __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch)
static inline void
percpu_counter_add(struct percpu_counter *fbc, s64 amount)
{
- preempt_disable();
+ unsigned long flags;
+
+ local_irq_save(flags);
fbc->count += amount;
- preempt_enable();
+ local_irq_restore(flags);
}
/* non-SMP percpu_counter_add_local is the same with percpu_counter_add */
diff --git a/include/linux/perf/riscv_pmu.h b/include/linux/perf/riscv_pmu.h
index e17e86ad6f3a..43fc892aa7d9 100644
--- a/include/linux/perf/riscv_pmu.h
+++ b/include/linux/perf/riscv_pmu.h
@@ -26,6 +26,8 @@
#define RISCV_PMU_STOP_FLAG_RESET 1
+#define RISCV_PMU_CONFIG1_GUEST_EVENTS 0x1
+
struct cpu_hw_events {
/* currently enabled events */
int n_events;
@@ -73,6 +75,9 @@ void riscv_pmu_legacy_skip_init(void);
static inline void riscv_pmu_legacy_skip_init(void) {};
#endif
struct riscv_pmu *riscv_pmu_alloc(void);
+#ifdef CONFIG_RISCV_PMU_SBI
+int riscv_pmu_get_hpm_info(u32 *hw_ctr_width, u32 *num_hw_ctr);
+#endif
#endif /* CONFIG_RISCV_PMU */
diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
index 1159b25b0542..c63cd44777ec 100644
--- a/include/linux/pgtable.h
+++ b/include/linux/pgtable.h
@@ -1064,35 +1064,6 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
#define arch_start_context_switch(prev) do {} while (0)
#endif
-/*
- * When replacing an anonymous page by a real (!non) swap entry, we clear
- * PG_anon_exclusive from the page and instead remember whether the flag was
- * set in the swp pte. During fork(), we have to mark the entry as !exclusive
- * (possibly shared). On swapin, we use that information to restore
- * PG_anon_exclusive, which is very helpful in cases where we might have
- * additional (e.g., FOLL_GET) references on a page and wouldn't be able to
- * detect exclusivity.
- *
- * These functions don't apply to non-swap entries (e.g., migration, hwpoison,
- * ...).
- */
-#ifndef __HAVE_ARCH_PTE_SWP_EXCLUSIVE
-static inline pte_t pte_swp_mkexclusive(pte_t pte)
-{
- return pte;
-}
-
-static inline int pte_swp_exclusive(pte_t pte)
-{
- return false;
-}
-
-static inline pte_t pte_swp_clear_exclusive(pte_t pte)
-{
- return pte;
-}
-#endif
-
#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
#ifndef CONFIG_ARCH_ENABLE_THP_MIGRATION
static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd)
@@ -1214,7 +1185,8 @@ static inline int track_pfn_copy(struct vm_area_struct *vma)
* can be for the entire vma (in which case pfn, size are zero).
*/
static inline void untrack_pfn(struct vm_area_struct *vma,
- unsigned long pfn, unsigned long size)
+ unsigned long pfn, unsigned long size,
+ bool mm_wr_locked)
{
}
@@ -1232,7 +1204,7 @@ extern void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
pfn_t pfn);
extern int track_pfn_copy(struct vm_area_struct *vma);
extern void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
- unsigned long size);
+ unsigned long size, bool mm_wr_locked);
extern void untrack_pfn_moved(struct vm_area_struct *vma);
#endif
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 6378c997ded5..36bf0bbc8efa 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -45,27 +45,32 @@
extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_basic_features) __ro_after_init;
extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_basic_t1_features) __ro_after_init;
+extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_basic_t1s_p2mp_features) __ro_after_init;
extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_features) __ro_after_init;
extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_fibre_features) __ro_after_init;
extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_all_ports_features) __ro_after_init;
extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_features) __ro_after_init;
extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_fec_features) __ro_after_init;
extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_init;
+extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_eee_cap1_features) __ro_after_init;
#define PHY_BASIC_FEATURES ((unsigned long *)&phy_basic_features)
#define PHY_BASIC_T1_FEATURES ((unsigned long *)&phy_basic_t1_features)
+#define PHY_BASIC_T1S_P2MP_FEATURES ((unsigned long *)&phy_basic_t1s_p2mp_features)
#define PHY_GBIT_FEATURES ((unsigned long *)&phy_gbit_features)
#define PHY_GBIT_FIBRE_FEATURES ((unsigned long *)&phy_gbit_fibre_features)
#define PHY_GBIT_ALL_PORTS_FEATURES ((unsigned long *)&phy_gbit_all_ports_features)
#define PHY_10GBIT_FEATURES ((unsigned long *)&phy_10gbit_features)
#define PHY_10GBIT_FEC_FEATURES ((unsigned long *)&phy_10gbit_fec_features)
#define PHY_10GBIT_FULL_FEATURES ((unsigned long *)&phy_10gbit_full_features)
+#define PHY_EEE_CAP1_FEATURES ((unsigned long *)&phy_eee_cap1_features)
extern const int phy_basic_ports_array[3];
extern const int phy_fibre_port_array[1];
extern const int phy_all_ports_features_array[7];
extern const int phy_10_100_features_array[4];
extern const int phy_basic_t1_features_array[3];
+extern const int phy_basic_t1s_p2mp_features_array[2];
extern const int phy_gbit_features_array[2];
extern const int phy_10gbit_features_array[1];
@@ -364,6 +369,11 @@ struct mii_bus {
int (*read)(struct mii_bus *bus, int addr, int regnum);
/** @write: Perform a write transfer on the bus */
int (*write)(struct mii_bus *bus, int addr, int regnum, u16 val);
+ /** @read_c45: Perform a C45 read transfer on the bus */
+ int (*read_c45)(struct mii_bus *bus, int addr, int devnum, int regnum);
+ /** @write_c45: Perform a C45 write transfer on the bus */
+ int (*write_c45)(struct mii_bus *bus, int addr, int devnum,
+ int regnum, u16 val);
/** @reset: Perform a reset of the bus */
int (*reset)(struct mii_bus *bus);
@@ -411,14 +421,6 @@ struct mii_bus {
/** @reset_gpiod: Reset GPIO descriptor pointer */
struct gpio_desc *reset_gpiod;
- /** @probe_capabilities: bus capabilities, used for probing */
- enum {
- MDIOBUS_NO_CAP = 0,
- MDIOBUS_C22,
- MDIOBUS_C45,
- MDIOBUS_C22_C45,
- } probe_capabilities;
-
/** @shared_lock: protect access to the shared element */
struct mutex shared_lock;
@@ -456,7 +458,7 @@ static inline struct mii_bus *devm_mdiobus_alloc(struct device *dev)
}
struct mii_bus *mdio_find_bus(const char *mdio_name);
-struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr);
+struct phy_device *mdiobus_scan_c22(struct mii_bus *bus, int addr);
#define PHY_INTERRUPT_DISABLED false
#define PHY_INTERRUPT_ENABLED true
@@ -572,6 +574,9 @@ struct macsec_ops;
* @supported: Combined MAC/PHY supported linkmodes
* @advertising: Currently advertised linkmodes
* @adv_old: Saved advertised while power saving for WoL
+ * @supported_eee: supported PHY EEE linkmodes
+ * @advertising_eee: Currently advertised EEE linkmodes
+ * @eee_enabled: Flag indicating whether the EEE feature is enabled
* @lp_advertising: Current link partner advertised linkmodes
* @host_interfaces: PHY interface modes supported by host
* @eee_broken_modes: Energy efficient ethernet modes which should be prohibited
@@ -676,6 +681,10 @@ struct phy_device {
__ETHTOOL_DECLARE_LINK_MODE_MASK(lp_advertising);
/* used with phy_speed_down */
__ETHTOOL_DECLARE_LINK_MODE_MASK(adv_old);
+ /* used for eee validation */
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported_eee);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising_eee);
+ bool eee_enabled;
/* Host supported PHY interface types. Should be ignored if empty. */
DECLARE_PHY_INTERFACE_MASK(host_interfaces);
@@ -739,6 +748,9 @@ struct phy_device {
#endif
};
+/* Generic phy_device::dev_flags */
+#define PHY_F_NO_IRQ 0x80000000
+
static inline struct phy_device *to_phy_device(const struct device *dev)
{
return container_of(to_mdio_device(dev), struct phy_device, mdio);
@@ -766,6 +778,63 @@ struct phy_tdr_config {
#define PHY_PAIR_ALL -1
/**
+ * struct phy_plca_cfg - Configuration of the PLCA (Physical Layer Collision
+ * Avoidance) Reconciliation Sublayer.
+ *
+ * @version: read-only PLCA register map version. -1 = not available. Ignored
+ * when setting the configuration. Format is the same as reported by the PLCA
+ * IDVER register (31.CA00). -1 = not available.
+ * @enabled: PLCA configured mode (enabled/disabled). -1 = not available / don't
+ * set. 0 = disabled, anything else = enabled.
+ * @node_id: the PLCA local node identifier. -1 = not available / don't set.
+ * Allowed values [0 .. 254]. 255 = node disabled.
+ * @node_cnt: the PLCA node count (maximum number of nodes having a TO). Only
+ * meaningful for the coordinator (node_id = 0). -1 = not available / don't
+ * set. Allowed values [1 .. 255].
+ * @to_tmr: The value of the PLCA to_timer in bit-times, which determines the
+ * PLCA transmit opportunity window opening. See IEEE802.3 Clause 148 for
+ * more details. The to_timer shall be set equal over all nodes.
+ * -1 = not available / don't set. Allowed values [0 .. 255].
+ * @burst_cnt: controls how many additional frames a node is allowed to send in
+ * single transmit opportunity (TO). The default value of 0 means that the
+ * node is allowed exactly one frame per TO. A value of 1 allows two frames
+ * per TO, and so on. -1 = not available / don't set.
+ * Allowed values [0 .. 255].
+ * @burst_tmr: controls how many bit times to wait for the MAC to send a new
+ * frame before interrupting the burst. This value should be set to a value
+ * greater than the MAC inter-packet gap (which is typically 96 bits).
+ * -1 = not available / don't set. Allowed values [0 .. 255].
+ *
+ * A structure containing configuration parameters for setting/getting the PLCA
+ * RS configuration. The driver does not need to implement all the parameters,
+ * but should report what is actually used.
+ */
+struct phy_plca_cfg {
+ int version;
+ int enabled;
+ int node_id;
+ int node_cnt;
+ int to_tmr;
+ int burst_cnt;
+ int burst_tmr;
+};
+
+/**
+ * struct phy_plca_status - Status of the PLCA (Physical Layer Collision
+ * Avoidance) Reconciliation Sublayer.
+ *
+ * @pst: The PLCA status as reported by the PST bit in the PLCA STATUS
+ * register(31.CA03), indicating BEACON activity.
+ *
+ * A structure containing status information of the PLCA RS configuration.
+ * The driver does not need to implement all the parameters, but should report
+ * what is actually used.
+ */
+struct phy_plca_status {
+ bool pst;
+};
+
+/**
* struct phy_driver - Driver structure for a particular PHY type
*
* @mdiodrv: Data common to all MDIO devices
@@ -976,6 +1045,17 @@ struct phy_driver {
int (*get_sqi)(struct phy_device *dev);
/** @get_sqi_max: Get the maximum signal quality indication */
int (*get_sqi_max)(struct phy_device *dev);
+
+ /* PLCA RS interface */
+ /** @get_plca_cfg: Return the current PLCA configuration */
+ int (*get_plca_cfg)(struct phy_device *dev,
+ struct phy_plca_cfg *plca_cfg);
+ /** @set_plca_cfg: Set the PLCA configuration */
+ int (*set_plca_cfg)(struct phy_device *dev,
+ const struct phy_plca_cfg *plca_cfg);
+ /** @get_plca_status: Return the current PLCA status info */
+ int (*get_plca_status)(struct phy_device *dev,
+ struct phy_plca_status *plca_st);
};
#define to_phy_driver(d) container_of(to_mdio_common_driver(d), \
struct phy_driver, mdiodrv)
@@ -1543,6 +1623,7 @@ int phy_start_aneg(struct phy_device *phydev);
int phy_aneg_done(struct phy_device *phydev);
int phy_speed_down(struct phy_device *phydev, bool sync);
int phy_speed_up(struct phy_device *phydev);
+bool phy_check_valid(int speed, int duplex, unsigned long *features);
int phy_restart_aneg(struct phy_device *phydev);
int phy_reset_after_clk_enable(struct phy_device *phydev);
@@ -1666,6 +1747,7 @@ int genphy_c45_an_config_aneg(struct phy_device *phydev);
int genphy_c45_an_disable_aneg(struct phy_device *phydev);
int genphy_c45_read_mdix(struct phy_device *phydev);
int genphy_c45_pma_read_abilities(struct phy_device *phydev);
+int genphy_c45_read_eee_abilities(struct phy_device *phydev);
int genphy_c45_pma_baset1_read_master_slave(struct phy_device *phydev);
int genphy_c45_read_status(struct phy_device *phydev);
int genphy_c45_baset1_read_status(struct phy_device *phydev);
@@ -1674,6 +1756,21 @@ int genphy_c45_loopback(struct phy_device *phydev, bool enable);
int genphy_c45_pma_resume(struct phy_device *phydev);
int genphy_c45_pma_suspend(struct phy_device *phydev);
int genphy_c45_fast_retrain(struct phy_device *phydev, bool enable);
+int genphy_c45_plca_get_cfg(struct phy_device *phydev,
+ struct phy_plca_cfg *plca_cfg);
+int genphy_c45_plca_set_cfg(struct phy_device *phydev,
+ const struct phy_plca_cfg *plca_cfg);
+int genphy_c45_plca_get_status(struct phy_device *phydev,
+ struct phy_plca_status *plca_st);
+int genphy_c45_eee_is_active(struct phy_device *phydev, unsigned long *adv,
+ unsigned long *lp, bool *is_enabled);
+int genphy_c45_ethtool_get_eee(struct phy_device *phydev,
+ struct ethtool_eee *data);
+int genphy_c45_ethtool_set_eee(struct phy_device *phydev,
+ struct ethtool_eee *data);
+int genphy_c45_write_eee_adv(struct phy_device *phydev, unsigned long *adv);
+int genphy_c45_an_config_eee_aneg(struct phy_device *phydev);
+int genphy_c45_read_eee_adv(struct phy_device *phydev, unsigned long *adv);
/* Generic C45 PHY driver */
extern struct phy_driver genphy_c45_driver;
@@ -1772,6 +1869,13 @@ int phy_ethtool_get_strings(struct phy_device *phydev, u8 *data);
int phy_ethtool_get_sset_count(struct phy_device *phydev);
int phy_ethtool_get_stats(struct phy_device *phydev,
struct ethtool_stats *stats, u64 *data);
+int phy_ethtool_get_plca_cfg(struct phy_device *phydev,
+ struct phy_plca_cfg *plca_cfg);
+int phy_ethtool_set_plca_cfg(struct phy_device *phydev,
+ const struct phy_plca_cfg *plca_cfg,
+ struct netlink_ext_ack *extack);
+int phy_ethtool_get_plca_status(struct phy_device *phydev,
+ struct phy_plca_status *plca_st);
static inline int phy_package_read(struct phy_device *phydev, u32 regnum)
{
diff --git a/include/linux/phy/phy.h b/include/linux/phy/phy.h
index b1413757fcc3..3a570bc59fc7 100644
--- a/include/linux/phy/phy.h
+++ b/include/linux/phy/phy.h
@@ -250,11 +250,12 @@ static inline void phy_set_bus_width(struct phy *phy, int bus_width)
phy->attrs.bus_width = bus_width;
}
struct phy *phy_get(struct device *dev, const char *string);
-struct phy *phy_optional_get(struct device *dev, const char *string);
struct phy *devm_phy_get(struct device *dev, const char *string);
struct phy *devm_phy_optional_get(struct device *dev, const char *string);
struct phy *devm_of_phy_get(struct device *dev, struct device_node *np,
const char *con_id);
+struct phy *devm_of_phy_optional_get(struct device *dev, struct device_node *np,
+ const char *con_id);
struct phy *devm_of_phy_get_by_index(struct device *dev, struct device_node *np,
int index);
void of_phy_put(struct phy *phy);
@@ -426,12 +427,6 @@ static inline struct phy *phy_get(struct device *dev, const char *string)
return ERR_PTR(-ENOSYS);
}
-static inline struct phy *phy_optional_get(struct device *dev,
- const char *string)
-{
- return ERR_PTR(-ENOSYS);
-}
-
static inline struct phy *devm_phy_get(struct device *dev, const char *string)
{
return ERR_PTR(-ENOSYS);
@@ -450,6 +445,13 @@ static inline struct phy *devm_of_phy_get(struct device *dev,
return ERR_PTR(-ENOSYS);
}
+static inline struct phy *devm_of_phy_optional_get(struct device *dev,
+ struct device_node *np,
+ const char *con_id)
+{
+ return NULL;
+}
+
static inline struct phy *devm_of_phy_get_by_index(struct device *dev,
struct device_node *np,
int index)
diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h
index 07481bb87d4e..c758809d5bcf 100644
--- a/include/linux/pid_namespace.h
+++ b/include/linux/pid_namespace.h
@@ -16,6 +16,21 @@
struct fs_pin;
+#if defined(CONFIG_SYSCTL) && defined(CONFIG_MEMFD_CREATE)
+/*
+ * sysctl for vm.memfd_noexec
+ * 0: memfd_create() without MFD_EXEC nor MFD_NOEXEC_SEAL
+ * acts like MFD_EXEC was set.
+ * 1: memfd_create() without MFD_EXEC nor MFD_NOEXEC_SEAL
+ * acts like MFD_NOEXEC_SEAL was set.
+ * 2: memfd_create() without MFD_NOEXEC_SEAL will be
+ * rejected.
+ */
+#define MEMFD_NOEXEC_SCOPE_EXEC 0
+#define MEMFD_NOEXEC_SCOPE_NOEXEC_SEAL 1
+#define MEMFD_NOEXEC_SCOPE_NOEXEC_ENFORCED 2
+#endif
+
struct pid_namespace {
struct idr idr;
struct rcu_head rcu;
@@ -31,6 +46,10 @@ struct pid_namespace {
struct ucounts *ucounts;
int reboot; /* group exit code if this pidns was rebooted */
struct ns_common ns;
+#if defined(CONFIG_SYSCTL) && defined(CONFIG_MEMFD_CREATE)
+ /* sysctl for vm.memfd_noexec */
+ int memfd_noexec_scope;
+#endif
} __randomize_layout;
extern struct pid_namespace init_pid_ns;
diff --git a/include/linux/pinctrl/devinfo.h b/include/linux/pinctrl/devinfo.h
index 9e8b559e1253..bb6653af4f92 100644
--- a/include/linux/pinctrl/devinfo.h
+++ b/include/linux/pinctrl/devinfo.h
@@ -18,6 +18,8 @@ struct device;
#ifdef CONFIG_PINCTRL
+#include <linux/device.h>
+
/* The device core acts as a consumer toward pinctrl */
#include <linux/pinctrl/consumer.h>
@@ -44,6 +46,14 @@ struct dev_pin_info {
extern int pinctrl_bind_pins(struct device *dev);
extern int pinctrl_init_done(struct device *dev);
+static inline struct pinctrl *dev_pinctrl(struct device *dev)
+{
+ if (!dev->pins)
+ return NULL;
+
+ return dev->pins->p;
+}
+
#else
/* Stubs if we're not using pinctrl */
@@ -58,5 +68,10 @@ static inline int pinctrl_init_done(struct device *dev)
return 0;
}
+static inline struct pinctrl *dev_pinctrl(struct device *dev)
+{
+ return NULL;
+}
+
#endif /* CONFIG_PINCTRL */
#endif /* PINCTRL_DEVINFO_H */
diff --git a/include/linux/pinctrl/pinctrl.h b/include/linux/pinctrl/pinctrl.h
index a0d39b303431..4d252ea00ed1 100644
--- a/include/linux/pinctrl/pinctrl.h
+++ b/include/linux/pinctrl/pinctrl.h
@@ -206,6 +206,26 @@ extern int pinctrl_get_group_pins(struct pinctrl_dev *pctldev,
const char *pin_group, const unsigned **pins,
unsigned *num_pins);
+/**
+ * struct pinfunction - Description about a function
+ * @name: Name of the function
+ * @groups: An array of groups for this function
+ * @ngroups: Number of groups in @groups
+ */
+struct pinfunction {
+ const char *name;
+ const char * const *groups;
+ size_t ngroups;
+};
+
+/* Convenience macro to define a single named pinfunction */
+#define PINCTRL_PINFUNCTION(_name, _groups, _ngroups) \
+(struct pinfunction) { \
+ .name = (_name), \
+ .groups = (_groups), \
+ .ngroups = (_ngroups), \
+ }
+
#if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_PINCTRL)
extern struct pinctrl_dev *of_pinctrl_get(struct device_node *np);
#else
diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
index 6cb65df3e3ba..d2c3f16cf6b1 100644
--- a/include/linux/pipe_fs_i.h
+++ b/include/linux/pipe_fs_i.h
@@ -157,6 +157,26 @@ static inline bool pipe_full(unsigned int head, unsigned int tail,
}
/**
+ * pipe_buf - Return the pipe buffer for the specified slot in the pipe ring
+ * @pipe: The pipe to access
+ * @slot: The slot of interest
+ */
+static inline struct pipe_buffer *pipe_buf(const struct pipe_inode_info *pipe,
+ unsigned int slot)
+{
+ return &pipe->bufs[slot & (pipe->ring_size - 1)];
+}
+
+/**
+ * pipe_head_buf - Return the pipe buffer at the head of the pipe ring
+ * @pipe: The pipe to access
+ */
+static inline struct pipe_buffer *pipe_head_buf(const struct pipe_inode_info *pipe)
+{
+ return pipe_buf(pipe, pipe->head);
+}
+
+/**
* pipe_buf_get - get a reference to a pipe_buffer
* @pipe: the pipe that the buffer belongs to
* @buf: the buffer to get a reference to
diff --git a/include/linux/platform_data/amd_xdma.h b/include/linux/platform_data/amd_xdma.h
new file mode 100644
index 000000000000..b5e23e14bac8
--- /dev/null
+++ b/include/linux/platform_data/amd_xdma.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2022, Advanced Micro Devices, Inc.
+ */
+
+#ifndef _PLATDATA_AMD_XDMA_H
+#define _PLATDATA_AMD_XDMA_H
+
+#include <linux/dmaengine.h>
+
+/**
+ * struct xdma_chan_info - DMA channel information
+ * This information is used to match channel when request dma channel
+ * @dir: Channel transfer direction
+ */
+struct xdma_chan_info {
+ enum dma_transfer_direction dir;
+};
+
+#define XDMA_FILTER_PARAM(chan_info) ((void *)(chan_info))
+
+struct dma_slave_map;
+
+/**
+ * struct xdma_platdata - platform specific data for XDMA engine
+ * @max_dma_channels: Maximum dma channels in each direction
+ */
+struct xdma_platdata {
+ u32 max_dma_channels;
+ u32 device_map_cnt;
+ struct dma_slave_map *device_map;
+};
+
+#endif /* _PLATDATA_AMD_XDMA_H */
diff --git a/include/linux/platform_data/asoc-ux500-msp.h b/include/linux/platform_data/asoc-ux500-msp.h
deleted file mode 100644
index b8d0f730dda8..000000000000
--- a/include/linux/platform_data/asoc-ux500-msp.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) ST-Ericsson SA 2010
- *
- * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
- */
-
-#ifndef __MSP_H
-#define __MSP_H
-
-#include <linux/platform_data/dma-ste-dma40.h>
-
-/* Platform data structure for a MSP I2S-device */
-struct msp_i2s_platform_data {
- int id;
- struct stedma40_chan_cfg *msp_i2s_dma_rx;
- struct stedma40_chan_cfg *msp_i2s_dma_tx;
-};
-
-#endif
diff --git a/include/linux/platform_data/cros_ec_commands.h b/include/linux/platform_data/cros_ec_commands.h
index b3b3df163efc..7e9c76aedd2d 100644
--- a/include/linux/platform_data/cros_ec_commands.h
+++ b/include/linux/platform_data/cros_ec_commands.h
@@ -3481,6 +3481,9 @@ struct ec_response_get_next_event_v1 {
#define EC_MKBP_VOL_UP 1
#define EC_MKBP_VOL_DOWN 2
#define EC_MKBP_RECOVERY 3
+#define EC_MKBP_BRI_UP 4
+#define EC_MKBP_BRI_DOWN 5
+#define EC_MKBP_SCREEN_LOCK 6
/* Switches */
#define EC_MKBP_LID_OPEN 0
diff --git a/include/linux/platform_data/i2c-gpio.h b/include/linux/platform_data/i2c-gpio.h
index a907774fd177..545639bcca72 100644
--- a/include/linux/platform_data/i2c-gpio.h
+++ b/include/linux/platform_data/i2c-gpio.h
@@ -16,16 +16,25 @@
* isn't actively driven high when setting the output value high.
* gpio_get_value() must return the actual pin state even if the
* pin is configured as an output.
+ * @sda_is_output_only: SDA output drivers can't be turned off.
+ * This is for clients that can only read SDA/SCL.
+ * @sda_has_no_pullup: SDA is used in a non-compliant way and has no pull-up.
+ * Therefore disable open-drain.
* @scl_is_open_drain: SCL is set up as open drain. Same requirements
* as for sda_is_open_drain apply.
* @scl_is_output_only: SCL output drivers cannot be turned off.
+ * @scl_has_no_pullup: SCL is used in a non-compliant way and has no pull-up.
+ * Therefore disable open-drain.
*/
struct i2c_gpio_platform_data {
int udelay;
int timeout;
unsigned int sda_is_open_drain:1;
+ unsigned int sda_is_output_only:1;
+ unsigned int sda_has_no_pullup:1;
unsigned int scl_is_open_drain:1;
unsigned int scl_is_output_only:1;
+ unsigned int scl_has_no_pullup:1;
};
#endif /* _LINUX_I2C_GPIO_H */
diff --git a/include/linux/platform_data/pcf857x.h b/include/linux/platform_data/pcf857x.h
deleted file mode 100644
index 01d0a3ea3aef..000000000000
--- a/include/linux/platform_data/pcf857x.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __LINUX_PCF857X_H
-#define __LINUX_PCF857X_H
-
-/**
- * struct pcf857x_platform_data - data to set up pcf857x driver
- * @gpio_base: number of the chip's first GPIO
- * @n_latch: optional bit-inverse of initial register value; if
- * you leave this initialized to zero the driver will act
- * like the chip was just reset
- * @setup: optional callback issued once the GPIOs are valid
- * @teardown: optional callback issued before the GPIOs are invalidated
- * @context: optional parameter passed to setup() and teardown()
- *
- * In addition to the I2C_BOARD_INFO() state appropriate to each chip,
- * the i2c_board_info used with the pcf875x driver must provide its
- * platform_data (pointer to one of these structures) with at least
- * the gpio_base value initialized.
- *
- * The @setup callback may be used with the kind of board-specific glue
- * which hands the (now-valid) GPIOs to other drivers, or which puts
- * devices in their initial states using these GPIOs.
- *
- * These GPIO chips are only "quasi-bidirectional"; read the chip specs
- * to understand the behavior. They don't have separate registers to
- * record which pins are used for input or output, record which output
- * values are driven, or provide access to input values. That must be
- * inferred by reading the chip's value and knowing the last value written
- * to it. If you leave n_latch initialized to zero, that last written
- * value is presumed to be all ones (as if the chip were just reset).
- */
-struct pcf857x_platform_data {
- unsigned gpio_base;
- unsigned n_latch;
-
- int (*setup)(struct i2c_client *client,
- int gpio, unsigned ngpio,
- void *context);
- void (*teardown)(struct i2c_client *client,
- int gpio, unsigned ngpio,
- void *context);
- void *context;
-};
-
-#endif /* __LINUX_PCF857X_H */
diff --git a/include/linux/platform_data/simplefb.h b/include/linux/platform_data/simplefb.h
index 27ea99af6e1d..4f94d52ac99f 100644
--- a/include/linux/platform_data/simplefb.h
+++ b/include/linux/platform_data/simplefb.h
@@ -22,6 +22,7 @@
{ "r8g8b8", 24, {16, 8}, {8, 8}, {0, 8}, {0, 0}, DRM_FORMAT_RGB888 }, \
{ "x8r8g8b8", 32, {16, 8}, {8, 8}, {0, 8}, {0, 0}, DRM_FORMAT_XRGB8888 }, \
{ "a8r8g8b8", 32, {16, 8}, {8, 8}, {0, 8}, {24, 8}, DRM_FORMAT_ARGB8888 }, \
+ { "x8b8g8r8", 32, {0, 8}, {8, 8}, {16, 8}, {0, 0}, DRM_FORMAT_XBGR8888 }, \
{ "a8b8g8r8", 32, {0, 8}, {8, 8}, {16, 8}, {24, 8}, DRM_FORMAT_ABGR8888 }, \
{ "x2r10g10b10", 32, {20, 10}, {10, 10}, {0, 10}, {0, 0}, DRM_FORMAT_XRGB2101010 }, \
{ "a2r10g10b10", 32, {20, 10}, {10, 10}, {0, 10}, {30, 2}, DRM_FORMAT_ARGB2101010 }, \
diff --git a/include/linux/platform_data/tsl2563.h b/include/linux/platform_data/tsl2563.h
deleted file mode 100644
index 9cf9309c3f24..000000000000
--- a/include/linux/platform_data/tsl2563.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __LINUX_TSL2563_H
-#define __LINUX_TSL2563_H
-
-struct tsl2563_platform_data {
- int cover_comp_gain;
-};
-
-#endif /* __LINUX_TSL2563_H */
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
index b0d5a253156e..b845fd83f429 100644
--- a/include/linux/platform_device.h
+++ b/include/linux/platform_device.h
@@ -207,7 +207,18 @@ extern void platform_device_put(struct platform_device *pdev);
struct platform_driver {
int (*probe)(struct platform_device *);
+
+ /*
+ * Traditionally the remove callback returned an int which however is
+ * ignored by the driver core. This led to wrong expectations by driver
+ * authors who thought returning an error code was a valid error
+ * handling strategy. To convert to a callback returning void, new
+ * drivers should implement .remove_new() until the conversion it done
+ * that eventually makes .remove() return void.
+ */
int (*remove)(struct platform_device *);
+ void (*remove_new)(struct platform_device *);
+
void (*shutdown)(struct platform_device *);
int (*suspend)(struct platform_device *, pm_message_t state);
int (*resume)(struct platform_device *);
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index 1cd41bdf73cf..f776fb93eaa0 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -136,6 +136,7 @@ struct generic_pm_domain {
unsigned int prepared_count; /* Suspend counter of prepared devices */
unsigned int performance_state; /* Aggregated max performance state */
cpumask_var_t cpus; /* A cpumask of the attached CPUs */
+ bool synced_poweroff; /* A consumer needs a synced poweroff */
int (*power_off)(struct generic_pm_domain *domain);
int (*power_on)(struct generic_pm_domain *domain);
struct raw_notifier_head power_notifiers; /* Power on/off notifiers */
@@ -235,6 +236,7 @@ int dev_pm_genpd_add_notifier(struct device *dev, struct notifier_block *nb);
int dev_pm_genpd_remove_notifier(struct device *dev);
void dev_pm_genpd_set_next_wakeup(struct device *dev, ktime_t next);
ktime_t dev_pm_genpd_get_next_hrtimer(struct device *dev);
+void dev_pm_genpd_synced_poweroff(struct device *dev);
extern struct dev_power_governor simple_qos_governor;
extern struct dev_power_governor pm_domain_always_on_gov;
@@ -300,6 +302,9 @@ static inline ktime_t dev_pm_genpd_get_next_hrtimer(struct device *dev)
{
return KTIME_MAX;
}
+static inline void dev_pm_genpd_synced_poweroff(struct device *dev)
+{ }
+
#define simple_qos_governor (*(struct dev_power_governor *)(NULL))
#define pm_domain_always_on_gov (*(struct dev_power_governor *)(NULL))
#endif
diff --git a/include/linux/poison.h b/include/linux/poison.h
index 0e8a1f2ceb2f..851a855d3868 100644
--- a/include/linux/poison.h
+++ b/include/linux/poison.h
@@ -81,6 +81,9 @@
/********** net/core/page_pool.c **********/
#define PP_SIGNATURE (0x40 + POISON_POINTER_DELTA)
+/********** net/core/skbuff.c **********/
+#define SKB_LIST_POISON_NEXT ((void *)(0x800 + POISON_POINTER_DELTA))
+
/********** kernel/bpf/ **********/
#define BPF_PTR_POISON ((void *)(0xeB9FUL + POISON_POINTER_DELTA))
diff --git a/include/linux/printk.h b/include/linux/printk.h
index 8c81806c2e99..8ef499ab3c1e 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -44,8 +44,6 @@ static inline const char *printk_skip_headers(const char *buffer)
return buffer;
}
-#define CONSOLE_EXT_LOG_MAX 8192
-
/* printk's without a loglevel use this.. */
#define MESSAGE_LOGLEVEL_DEFAULT CONFIG_MESSAGE_LOGLEVEL_DEFAULT
diff --git a/include/linux/property.h b/include/linux/property.h
index 37179e3abad5..0a29db15ff34 100644
--- a/include/linux/property.h
+++ b/include/linux/property.h
@@ -436,9 +436,9 @@ fwnode_graph_get_endpoint_by_id(const struct fwnode_handle *fwnode,
unsigned int fwnode_graph_get_endpoint_count(struct fwnode_handle *fwnode,
unsigned long flags);
-#define fwnode_graph_for_each_endpoint(fwnode, child) \
- for (child = NULL; \
- (child = fwnode_graph_get_next_endpoint(fwnode, child)); )
+#define fwnode_graph_for_each_endpoint(fwnode, child) \
+ for (child = fwnode_graph_get_next_endpoint(fwnode, NULL); child; \
+ child = fwnode_graph_get_next_endpoint(fwnode, child))
int fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode,
struct fwnode_endpoint *endpoint);
@@ -486,9 +486,6 @@ const struct software_node *
software_node_find_by_name(const struct software_node *parent,
const char *name);
-int software_node_register_nodes(const struct software_node *nodes);
-void software_node_unregister_nodes(const struct software_node *nodes);
-
int software_node_register_node_group(const struct software_node **node_group);
void software_node_unregister_node_group(const struct software_node **node_group);
diff --git a/include/linux/ptp_classify.h b/include/linux/ptp_classify.h
index 2b6ea36ad162..1b5a953c6bbc 100644
--- a/include/linux/ptp_classify.h
+++ b/include/linux/ptp_classify.h
@@ -10,8 +10,12 @@
#ifndef _PTP_CLASSIFY_H_
#define _PTP_CLASSIFY_H_
+#include <asm/unaligned.h>
#include <linux/ip.h>
+#include <linux/ktime.h>
#include <linux/skbuff.h>
+#include <linux/udp.h>
+#include <net/checksum.h>
#define PTP_CLASS_NONE 0x00 /* not a PTP event message */
#define PTP_CLASS_V1 0x01 /* protocol version 1 */
@@ -130,6 +134,69 @@ static inline u8 ptp_get_msgtype(const struct ptp_header *hdr,
}
/**
+ * ptp_check_diff8 - Computes new checksum (when altering a 64-bit field)
+ * @old: old field value
+ * @new: new field value
+ * @oldsum: previous checksum
+ *
+ * This function can be used to calculate a new checksum when only a single
+ * field is changed. Similar as ip_vs_check_diff*() in ip_vs.h.
+ *
+ * Return: Updated checksum
+ */
+static inline __wsum ptp_check_diff8(__be64 old, __be64 new, __wsum oldsum)
+{
+ __be64 diff[2] = { ~old, new };
+
+ return csum_partial(diff, sizeof(diff), oldsum);
+}
+
+/**
+ * ptp_header_update_correction - Update PTP header's correction field
+ * @skb: packet buffer
+ * @type: type of the packet (see ptp_classify_raw())
+ * @hdr: ptp header
+ * @correction: new correction value
+ *
+ * This updates the correction field of a PTP header and updates the UDP
+ * checksum (if UDP is used as transport). It is needed for hardware capable of
+ * one-step P2P that does not already modify the correction field of Pdelay_Req
+ * event messages on ingress.
+ */
+static inline
+void ptp_header_update_correction(struct sk_buff *skb, unsigned int type,
+ struct ptp_header *hdr, s64 correction)
+{
+ __be64 correction_old;
+ struct udphdr *uhdr;
+
+ /* previous correction value is required for checksum update. */
+ memcpy(&correction_old, &hdr->correction, sizeof(correction_old));
+
+ /* write new correction value */
+ put_unaligned_be64((u64)correction, &hdr->correction);
+
+ switch (type & PTP_CLASS_PMASK) {
+ case PTP_CLASS_IPV4:
+ case PTP_CLASS_IPV6:
+ /* locate udp header */
+ uhdr = (struct udphdr *)((char *)hdr - sizeof(struct udphdr));
+ break;
+ default:
+ return;
+ }
+
+ /* update checksum */
+ uhdr->check = csum_fold(ptp_check_diff8(correction_old,
+ hdr->correction,
+ ~csum_unfold(uhdr->check)));
+ if (!uhdr->check)
+ uhdr->check = CSUM_MANGLED_0;
+
+ skb->ip_summed = CHECKSUM_NONE;
+}
+
+/**
* ptp_msg_is_sync - Evaluates whether the given skb is a PTP Sync message
* @skb: packet buffer
* @type: type of the packet (see ptp_classify_raw())
@@ -166,5 +233,11 @@ static inline bool ptp_msg_is_sync(struct sk_buff *skb, unsigned int type)
{
return false;
}
+
+static inline
+void ptp_header_update_correction(struct sk_buff *skb, unsigned int type,
+ struct ptp_header *hdr, s64 correction)
+{
+}
#endif
#endif /* _PTP_CLASSIFY_H_ */
diff --git a/include/linux/pwm.h b/include/linux/pwm.h
index 161e91167b9c..7b7b93b6fb81 100644
--- a/include/linux/pwm.h
+++ b/include/linux/pwm.h
@@ -440,13 +440,6 @@ static inline int pwm_config(struct pwm_device *pwm, int duty_ns,
return -EINVAL;
}
-static inline int pwm_capture(struct pwm_device *pwm,
- struct pwm_capture *result,
- unsigned long timeout)
-{
- return -EINVAL;
-}
-
static inline int pwm_enable(struct pwm_device *pwm)
{
might_sleep();
@@ -458,6 +451,13 @@ static inline void pwm_disable(struct pwm_device *pwm)
might_sleep();
}
+static inline int pwm_capture(struct pwm_device *pwm,
+ struct pwm_capture *result,
+ unsigned long timeout)
+{
+ return -EINVAL;
+}
+
static inline int pwm_set_chip_data(struct pwm_device *pwm, void *data)
{
return -EINVAL;
diff --git a/include/linux/pwm_backlight.h b/include/linux/pwm_backlight.h
index 06086cb93b6f..cdd2ac366bc7 100644
--- a/include/linux/pwm_backlight.h
+++ b/include/linux/pwm_backlight.h
@@ -8,7 +8,6 @@
#include <linux/backlight.h>
struct platform_pwm_backlight_data {
- int pwm_id;
unsigned int max_brightness;
unsigned int dft_brightness;
unsigned int lth_brightness;
diff --git a/include/linux/range.h b/include/linux/range.h
index 274681cc3154..7efb6a9b069b 100644
--- a/include/linux/range.h
+++ b/include/linux/range.h
@@ -13,6 +13,11 @@ static inline u64 range_len(const struct range *range)
return range->end - range->start + 1;
}
+static inline bool range_contains(struct range *r1, struct range *r2)
+{
+ return r1->start <= r2->start && r1->end >= r2->end;
+}
+
int add_range(struct range *range, int az, int nr_range,
u64 start, u64 end);
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index a3bc695bcca0..4d10790adeb0 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -38,6 +38,14 @@ struct regmap_field;
struct snd_ac97;
struct sdw_slave;
+/*
+ * regmap_mdio address encoding. IEEE 802.3ae clause 45 addresses consist of a
+ * device address and a register address.
+ */
+#define REGMAP_MDIO_C45_DEVAD_SHIFT 16
+#define REGMAP_MDIO_C45_DEVAD_MASK GENMASK(20, 16)
+#define REGMAP_MDIO_C45_REGNUM_MASK GENMASK(15, 0)
+
/* An enum of all the supported cache types */
enum regcache_type {
REGCACHE_NONE,
@@ -512,6 +520,7 @@ typedef void (*regmap_hw_free_context)(void *context);
* to perform locking. This field is ignored if custom lock/unlock
* functions are used (see fields lock/unlock of
* struct regmap_config).
+ * @free_on_exit: kfree this on exit of regmap
* @write: Write operation.
* @gather_write: Write operation with split register/value, return -ENOTSUPP
* if not implemented on a given device.
@@ -540,10 +549,10 @@ typedef void (*regmap_hw_free_context)(void *context);
* DEFAULT, BIG is assumed.
* @max_raw_read: Max raw read size that can be used on the bus.
* @max_raw_write: Max raw write size that can be used on the bus.
- * @free_on_exit: kfree this on exit of regmap
*/
struct regmap_bus {
bool fast_io;
+ bool free_on_exit;
regmap_hw_write write;
regmap_hw_gather_write gather_write;
regmap_hw_async_write async_write;
@@ -560,7 +569,6 @@ struct regmap_bus {
enum regmap_endian val_format_endian_default;
size_t max_raw_read;
size_t max_raw_write;
- bool free_on_exit;
};
/*
@@ -1532,9 +1540,6 @@ struct regmap_irq_chip_data;
* @config_base: Base address for IRQ type config regs. If null unsupported.
* @irq_reg_stride: Stride to use for chips where registers are not contiguous.
* @init_ack_masked: Ack all masked interrupts once during initalization.
- * @mask_invert: Inverted mask register: cleared bits are masked out.
- * Deprecated; prefer describing an inverted mask register as
- * an unmask register.
* @mask_unmask_non_inverted: Controls mask bit inversion for chips that set
* both @mask_base and @unmask_base. If false, mask and unmask bits are
* inverted (which is deprecated behavior); if true, bits will not be
@@ -1547,8 +1552,6 @@ struct regmap_irq_chip_data;
* @ack_invert: Inverted ack register: cleared bits for ack.
* @clear_ack: Use this to set 1 and 0 or vice-versa to clear interrupts.
* @wake_invert: Inverted wake register: cleared bits are wake enabled.
- * @type_invert: Invert the type flags. Deprecated, use config registers
- * instead.
* @type_in_mask: Use the mask registers for controlling irq type. Use this if
* the hardware provides separate bits for rising/falling edge
* or low/high level interrupts and they should be combined into
@@ -1618,14 +1621,12 @@ struct regmap_irq_chip {
const unsigned int *config_base;
unsigned int irq_reg_stride;
unsigned int init_ack_masked:1;
- unsigned int mask_invert:1;
unsigned int mask_unmask_non_inverted:1;
unsigned int use_ack:1;
unsigned int ack_invert:1;
unsigned int clear_ack:1;
unsigned int wake_invert:1;
unsigned int runtime_pm:1;
- unsigned int type_invert:1;
unsigned int type_in_mask:1;
unsigned int clear_on_unmask:1;
unsigned int not_fixed_stride:1;
diff --git a/include/linux/remoteproc/pruss.h b/include/linux/remoteproc/pruss.h
new file mode 100644
index 000000000000..039b50d58df2
--- /dev/null
+++ b/include/linux/remoteproc/pruss.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * PRU-ICSS Subsystem user interfaces
+ *
+ * Copyright (C) 2015-2022 Texas Instruments Incorporated - http://www.ti.com
+ * Suman Anna <s-anna@ti.com>
+ */
+
+#ifndef __LINUX_PRUSS_H
+#define __LINUX_PRUSS_H
+
+#include <linux/device.h>
+#include <linux/types.h>
+
+#define PRU_RPROC_DRVNAME "pru-rproc"
+
+/**
+ * enum pruss_pru_id - PRU core identifiers
+ * @PRUSS_PRU0: PRU Core 0.
+ * @PRUSS_PRU1: PRU Core 1.
+ * @PRUSS_NUM_PRUS: Total number of PRU Cores available.
+ *
+ */
+
+enum pruss_pru_id {
+ PRUSS_PRU0 = 0,
+ PRUSS_PRU1,
+ PRUSS_NUM_PRUS,
+};
+
+/*
+ * enum pru_ctable_idx - Configurable Constant table index identifiers
+ */
+enum pru_ctable_idx {
+ PRU_C24 = 0,
+ PRU_C25,
+ PRU_C26,
+ PRU_C27,
+ PRU_C28,
+ PRU_C29,
+ PRU_C30,
+ PRU_C31,
+};
+
+struct device_node;
+struct rproc;
+
+#if IS_ENABLED(CONFIG_PRU_REMOTEPROC)
+
+struct rproc *pru_rproc_get(struct device_node *np, int index,
+ enum pruss_pru_id *pru_id);
+void pru_rproc_put(struct rproc *rproc);
+int pru_rproc_set_ctable(struct rproc *rproc, enum pru_ctable_idx c, u32 addr);
+
+#else
+
+static inline struct rproc *
+pru_rproc_get(struct device_node *np, int index, enum pruss_pru_id *pru_id)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline void pru_rproc_put(struct rproc *rproc) { }
+
+static inline int pru_rproc_set_ctable(struct rproc *rproc,
+ enum pru_ctable_idx c, u32 addr)
+{
+ return -EOPNOTSUPP;
+}
+
+#endif /* CONFIG_PRU_REMOTEPROC */
+
+static inline bool is_pru_rproc(struct device *dev)
+{
+ const char *drv_name = dev_driver_string(dev);
+
+ if (strncmp(drv_name, PRU_RPROC_DRVNAME, sizeof(PRU_RPROC_DRVNAME)))
+ return false;
+
+ return true;
+}
+
+#endif /* __LINUX_PRUSS_H */
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index bd3504d11b15..a4570da03e58 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -194,6 +194,8 @@ void page_add_anon_rmap(struct page *, struct vm_area_struct *,
unsigned long address, rmap_t flags);
void page_add_new_anon_rmap(struct page *, struct vm_area_struct *,
unsigned long address);
+void folio_add_new_anon_rmap(struct folio *, struct vm_area_struct *,
+ unsigned long address);
void page_add_file_rmap(struct page *, struct vm_area_struct *,
bool compound);
void page_remove_rmap(struct page *, struct vm_area_struct *,
@@ -201,12 +203,19 @@ void page_remove_rmap(struct page *, struct vm_area_struct *,
void hugepage_add_anon_rmap(struct page *, struct vm_area_struct *,
unsigned long address, rmap_t flags);
-void hugepage_add_new_anon_rmap(struct page *, struct vm_area_struct *,
+void hugepage_add_new_anon_rmap(struct folio *, struct vm_area_struct *,
unsigned long address);
static inline void __page_dup_rmap(struct page *page, bool compound)
{
- atomic_inc(compound ? compound_mapcount_ptr(page) : &page->_mapcount);
+ if (compound) {
+ struct folio *folio = (struct folio *)page;
+
+ VM_BUG_ON_PAGE(compound && !PageHead(page), page);
+ atomic_inc(&folio->_entire_mapcount);
+ } else {
+ atomic_inc(&page->_mapcount);
+ }
}
static inline void page_dup_file_rmap(struct page *page, bool compound)
diff --git a/include/linux/rpmsg/qcom_glink.h b/include/linux/rpmsg/qcom_glink.h
index 22fc3a69b683..bfbd48f435fa 100644
--- a/include/linux/rpmsg/qcom_glink.h
+++ b/include/linux/rpmsg/qcom_glink.h
@@ -5,7 +5,7 @@
#include <linux/device.h>
-struct qcom_glink;
+struct qcom_glink_smem;
#if IS_ENABLED(CONFIG_RPMSG_QCOM_GLINK)
void qcom_glink_ssr_notify(const char *ssr_name);
@@ -15,20 +15,20 @@ static inline void qcom_glink_ssr_notify(const char *ssr_name) {}
#if IS_ENABLED(CONFIG_RPMSG_QCOM_GLINK_SMEM)
-struct qcom_glink *qcom_glink_smem_register(struct device *parent,
- struct device_node *node);
-void qcom_glink_smem_unregister(struct qcom_glink *glink);
+struct qcom_glink_smem *qcom_glink_smem_register(struct device *parent,
+ struct device_node *node);
+void qcom_glink_smem_unregister(struct qcom_glink_smem *glink);
#else
-static inline struct qcom_glink *
+static inline struct qcom_glink_smem *
qcom_glink_smem_register(struct device *parent,
struct device_node *node)
{
return NULL;
}
-static inline void qcom_glink_smem_unregister(struct qcom_glink *glink) {}
+static inline void qcom_glink_smem_unregister(struct qcom_glink_smem *glink) {}
#endif
#endif
diff --git a/include/linux/sched/coredump.h b/include/linux/sched/coredump.h
index 8270ad7ae14c..0e17ae7fbfd3 100644
--- a/include/linux/sched/coredump.h
+++ b/include/linux/sched/coredump.h
@@ -81,9 +81,13 @@ static inline int get_dumpable(struct mm_struct *mm)
* lifecycle of this mm, just for simplicity.
*/
#define MMF_HAS_PINNED 27 /* FOLL_PIN has run, never cleared */
+
+#define MMF_HAS_MDWE 28
+#define MMF_HAS_MDWE_MASK (1 << MMF_HAS_MDWE)
+
#define MMF_DISABLE_THP_MASK (1 << MMF_DISABLE_THP)
#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\
- MMF_DISABLE_THP_MASK)
+ MMF_DISABLE_THP_MASK | MMF_HAS_MDWE_MASK)
#endif /* _LINUX_SCHED_COREDUMP_H */
diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h
index 4f765bc788ff..0ce5746a4470 100644
--- a/include/linux/scmi_protocol.h
+++ b/include/linux/scmi_protocol.h
@@ -804,11 +804,6 @@ struct scmi_device {
#define to_scmi_dev(d) container_of(d, struct scmi_device, dev)
-struct scmi_device *
-scmi_device_create(struct device_node *np, struct device *parent, int protocol,
- const char *name);
-void scmi_device_destroy(struct scmi_device *scmi_dev);
-
struct scmi_device_id {
u8 protocol_id;
const char *name;
diff --git a/include/linux/serial.h b/include/linux/serial.h
index 3d6fe3ef92cf..bfda927dde15 100644
--- a/include/linux/serial.h
+++ b/include/linux/serial.h
@@ -12,6 +12,11 @@
#include <uapi/linux/serial.h>
#include <uapi/linux/serial_reg.h>
+#define UART_IER_ALL_INTR (UART_IER_MSI | \
+ UART_IER_RLSI | \
+ UART_IER_THRI | \
+ UART_IER_RDI)
+
/* Helper for dealing with UART_LCR_WLEN* defines */
#define UART_LCR_WLEN(x) ((x) - 5)
@@ -23,6 +28,11 @@ static inline bool uart_lsr_tx_empty(u16 lsr)
return (lsr & UART_LSR_BOTH_EMPTY) == UART_LSR_BOTH_EMPTY;
}
+#define UART_MSR_STATUS_BITS (UART_MSR_DCD | \
+ UART_MSR_RI | \
+ UART_MSR_DSR | \
+ UART_MSR_CTS)
+
/*
* Counters of the input lines (CTS, DSR, RI, CD) interrupts
*/
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index fd59f600094a..9e3e5e0d11b2 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -579,6 +579,7 @@ struct uart_port {
struct serial_rs485 rs485;
struct serial_rs485 rs485_supported; /* Supported mask for serial_rs485 */
struct gpio_desc *rs485_term_gpio; /* enable RS485 bus termination */
+ struct gpio_desc *rs485_rx_during_tx_gpio; /* Output GPIO that sets the state of RS485 RX during TX */
struct serial_iso7816 iso7816;
void *private_data; /* generic platform data pointer */
};
@@ -781,7 +782,7 @@ static inline int uart_poll_timeout(struct uart_port *port)
struct earlycon_device {
struct console *con;
struct uart_port port;
- char options[16]; /* e.g., 115200n8 */
+ char options[32]; /* e.g., 115200n8 */
unsigned int baud;
};
@@ -896,10 +897,8 @@ static inline bool uart_softcts_mode(struct uart_port *uport)
* The following are helper functions for the low level drivers.
*/
-extern void uart_handle_dcd_change(struct uart_port *uport,
- unsigned int status);
-extern void uart_handle_cts_change(struct uart_port *uport,
- unsigned int status);
+extern void uart_handle_dcd_change(struct uart_port *uport, bool active);
+extern void uart_handle_cts_change(struct uart_port *uport, bool active);
extern void uart_insert_char(struct uart_port *port, unsigned int status,
unsigned int overrun, unsigned int ch, unsigned int flag);
diff --git a/include/linux/sh_intc.h b/include/linux/sh_intc.h
index c255273b0281..37ad81058d6a 100644
--- a/include/linux/sh_intc.h
+++ b/include/linux/sh_intc.h
@@ -97,7 +97,10 @@ struct intc_hw_desc {
unsigned int nr_subgroups;
};
-#define _INTC_ARRAY(a) a, __same_type(a, NULL) ? 0 : sizeof(a)/sizeof(*a)
+#define _INTC_SIZEOF_OR_ZERO(a) (_Generic(a, \
+ typeof(NULL): 0, \
+ default: sizeof(a)))
+#define _INTC_ARRAY(a) a, _INTC_SIZEOF_OR_ZERO(a)/sizeof(*a)
#define INTC_HW_DESC(vectors, groups, mask_regs, \
prio_regs, sense_regs, ack_regs) \
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index d500ea967dc7..103d1000a5a2 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -92,14 +92,8 @@ extern struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
extern void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end);
int shmem_unuse(unsigned int type);
-extern bool shmem_is_huge(struct vm_area_struct *vma, struct inode *inode,
- pgoff_t index, bool shmem_huge_force);
-static inline bool shmem_huge_enabled(struct vm_area_struct *vma,
- bool shmem_huge_force)
-{
- return shmem_is_huge(vma, file_inode(vma->vm_file), vma->vm_pgoff,
- shmem_huge_force);
-}
+extern bool shmem_is_huge(struct inode *inode, pgoff_t index, bool shmem_huge_force,
+ struct mm_struct *mm, unsigned long vm_flags);
extern unsigned long shmem_swap_usage(struct vm_area_struct *vma);
extern unsigned long shmem_partial_swap_usage(struct address_space *mapping,
pgoff_t start, pgoff_t end);
@@ -115,6 +109,14 @@ enum sgp_type {
int shmem_get_folio(struct inode *inode, pgoff_t index, struct folio **foliop,
enum sgp_type sgp);
+struct folio *shmem_read_folio_gfp(struct address_space *mapping,
+ pgoff_t index, gfp_t gfp);
+
+static inline struct folio *shmem_read_folio(struct address_space *mapping,
+ pgoff_t index)
+{
+ return shmem_read_folio_gfp(mapping, index, mapping_gfp_mask(mapping));
+}
static inline struct page *shmem_read_mapping_page(
struct address_space *mapping, pgoff_t index)
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 4c8492401a10..ff7ad331fb82 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -23,17 +23,11 @@
#include <linux/atomic.h>
#include <asm/types.h>
#include <linux/spinlock.h>
-#include <linux/net.h>
-#include <linux/textsearch.h>
#include <net/checksum.h>
#include <linux/rcupdate.h>
-#include <linux/hrtimer.h>
#include <linux/dma-mapping.h>
#include <linux/netdev_features.h>
-#include <linux/sched.h>
-#include <linux/sched/clock.h>
#include <net/flow_dissector.h>
-#include <linux/splice.h>
#include <linux/in6.h>
#include <linux/if_packet.h>
#include <linux/llist.h>
@@ -261,6 +255,14 @@
#define SKB_DATA_ALIGN(X) ALIGN(X, SMP_CACHE_BYTES)
#define SKB_WITH_OVERHEAD(X) \
((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+
+/* For X bytes available in skb->head, what is the minimal
+ * allocation needed, knowing struct skb_shared_info needs
+ * to be aligned.
+ */
+#define SKB_HEAD_ALIGN(X) (SKB_DATA_ALIGN(X) + \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+
#define SKB_MAX_ORDER(X, ORDER) \
SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X))
#define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0))
@@ -280,6 +282,7 @@ struct napi_struct;
struct bpf_prog;
union bpf_attr;
struct skb_ext;
+struct ts_config;
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
struct nf_bridge_info {
@@ -316,12 +319,16 @@ struct nf_bridge_info {
* and read by ovs to recirc_id.
*/
struct tc_skb_ext {
- __u32 chain;
+ union {
+ u64 act_miss_cookie;
+ __u32 chain;
+ };
__u16 mru;
__u16 zone;
u8 post_ct:1;
u8 post_ct_snat:1;
u8 post_ct_dnat:1;
+ u8 act_miss:1; /* Set if act_miss_cookie is used */
};
#endif
@@ -1240,7 +1247,7 @@ static inline void consume_skb(struct sk_buff *skb)
void __consume_stateless_skb(struct sk_buff *skb);
void __kfree_skb(struct sk_buff *skb);
-extern struct kmem_cache *skbuff_head_cache;
+extern struct kmem_cache *skbuff_cache;
void kfree_skb_partial(struct sk_buff *skb, bool head_stolen);
bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
@@ -1743,6 +1750,13 @@ static inline void skb_mark_not_on_list(struct sk_buff *skb)
skb->next = NULL;
}
+static inline void skb_poison_list(struct sk_buff *skb)
+{
+#ifdef CONFIG_DEBUG_NET
+ skb->next = SKB_LIST_POISON_NEXT;
+#endif
+}
+
/* Iterate through singly-linked GSO fragments of an skb. */
#define skb_list_walk_safe(first, skb, next_skb) \
for ((skb) = (first), (next_skb) = (skb) ? (skb)->next : NULL; (skb); \
@@ -2621,13 +2635,24 @@ void *skb_pull_data(struct sk_buff *skb, size_t len);
void *__pskb_pull_tail(struct sk_buff *skb, int delta);
-static inline bool pskb_may_pull(struct sk_buff *skb, unsigned int len)
+static inline enum skb_drop_reason
+pskb_may_pull_reason(struct sk_buff *skb, unsigned int len)
{
if (likely(len <= skb_headlen(skb)))
- return true;
+ return SKB_NOT_DROPPED_YET;
+
if (unlikely(len > skb->len))
- return false;
- return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL;
+ return SKB_DROP_REASON_PKT_TOO_SMALL;
+
+ if (unlikely(!__pskb_pull_tail(skb, len - skb_headlen(skb))))
+ return SKB_DROP_REASON_NOMEM;
+
+ return SKB_NOT_DROPPED_YET;
+}
+
+static inline bool pskb_may_pull(struct sk_buff *skb, unsigned int len)
+{
+ return pskb_may_pull_reason(skb, len) == SKB_NOT_DROPPED_YET;
}
static inline void *pskb_pull(struct sk_buff *skb, unsigned int len)
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index 5834bad8ad78..a61e7d55d0d3 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -72,7 +72,7 @@ struct kmem_cache {
int obj_offset;
#endif /* CONFIG_DEBUG_SLAB */
-#ifdef CONFIG_KASAN
+#ifdef CONFIG_KASAN_GENERIC
struct kasan_cache kasan_info;
#endif
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index aa0ee1678d29..f6df03f934e5 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -136,7 +136,7 @@ struct kmem_cache {
unsigned int *random_seq;
#endif
-#ifdef CONFIG_KASAN
+#ifdef CONFIG_KASAN_GENERIC
struct kasan_cache kasan_info;
#endif
diff --git a/include/linux/soc/apple/rtkit.h b/include/linux/soc/apple/rtkit.h
index c9cabb679cd1..fc456f75c131 100644
--- a/include/linux/soc/apple/rtkit.h
+++ b/include/linux/soc/apple/rtkit.h
@@ -22,6 +22,7 @@
* @size: Size of the shared memory buffer.
* @iova: Device VA of shared memory buffer.
* @is_mapped: Shared memory buffer is managed by the co-processor.
+ * @private: Private data pointer for the parent driver.
*/
struct apple_rtkit_shmem {
@@ -30,6 +31,7 @@ struct apple_rtkit_shmem {
size_t size;
dma_addr_t iova;
bool is_mapped;
+ void *private;
};
/*
@@ -78,6 +80,25 @@ struct apple_rtkit *devm_apple_rtkit_init(struct device *dev, void *cookie,
const struct apple_rtkit_ops *ops);
/*
+ * Non-devm version of devm_apple_rtkit_init. Must be freed with
+ * apple_rtkit_free.
+ *
+ * @dev: Pointer to the device node this coprocessor is assocated with
+ * @cookie: opaque cookie passed to all functions defined in rtkit_ops
+ * @mbox_name: mailbox name used to communicate with the co-processor
+ * @mbox_idx: mailbox index to be used if mbox_name is NULL
+ * @ops: pointer to rtkit_ops to be used for this co-processor
+ */
+struct apple_rtkit *apple_rtkit_init(struct device *dev, void *cookie,
+ const char *mbox_name, int mbox_idx,
+ const struct apple_rtkit_ops *ops);
+
+/*
+ * Free an instance of apple_rtkit.
+ */
+void apple_rtkit_free(struct apple_rtkit *rtk);
+
+/*
* Reinitialize internal structures. Must only be called with the co-processor
* is held in reset.
*/
@@ -105,6 +126,11 @@ int apple_rtkit_wake(struct apple_rtkit *rtk);
int apple_rtkit_shutdown(struct apple_rtkit *rtk);
/*
+ * Put the co-processor into idle mode
+ */
+int apple_rtkit_idle(struct apple_rtkit *rtk);
+
+/*
* Checks if RTKit is running and ready to handle messages.
*/
bool apple_rtkit_is_running(struct apple_rtkit *rtk);
diff --git a/include/linux/soc/mediatek/infracfg.h b/include/linux/soc/mediatek/infracfg.h
index 50804ac748bd..07f67b3d8e97 100644
--- a/include/linux/soc/mediatek/infracfg.h
+++ b/include/linux/soc/mediatek/infracfg.h
@@ -140,6 +140,127 @@
#define MT8192_TOP_AXI_PROT_EN_MM_2_MDP_2ND BIT(13)
#define MT8192_TOP_AXI_PROT_EN_VDNR_CAM BIT(21)
+#define MT8188_TOP_AXI_PROT_EN_SET 0x2A0
+#define MT8188_TOP_AXI_PROT_EN_CLR 0x2A4
+#define MT8188_TOP_AXI_PROT_EN_STA 0x228
+#define MT8188_TOP_AXI_PROT_EN_1_SET 0x2A8
+#define MT8188_TOP_AXI_PROT_EN_1_CLR 0x2AC
+#define MT8188_TOP_AXI_PROT_EN_1_STA 0x258
+#define MT8188_TOP_AXI_PROT_EN_2_SET 0x714
+#define MT8188_TOP_AXI_PROT_EN_2_CLR 0x718
+#define MT8188_TOP_AXI_PROT_EN_2_STA 0x724
+
+#define MT8188_TOP_AXI_PROT_EN_MM_SET 0x2D4
+#define MT8188_TOP_AXI_PROT_EN_MM_CLR 0x2D8
+#define MT8188_TOP_AXI_PROT_EN_MM_STA 0x2EC
+#define MT8188_TOP_AXI_PROT_EN_MM_2_SET 0xDCC
+#define MT8188_TOP_AXI_PROT_EN_MM_2_CLR 0xDD0
+#define MT8188_TOP_AXI_PROT_EN_MM_2_STA 0xDD8
+
+#define MT8188_TOP_AXI_PROT_EN_INFRA_VDNR_SET 0xB84
+#define MT8188_TOP_AXI_PROT_EN_INFRA_VDNR_CLR 0xB88
+#define MT8188_TOP_AXI_PROT_EN_INFRA_VDNR_STA 0xB90
+#define MT8188_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_SET 0xBCC
+#define MT8188_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_CLR 0xBD0
+#define MT8188_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_STA 0xBD8
+
+#define MT8188_TOP_AXI_PROT_EN_MFG1_STEP1 BIT(11)
+#define MT8188_TOP_AXI_PROT_EN_2_MFG1_STEP2 BIT(7)
+#define MT8188_TOP_AXI_PROT_EN_1_MFG1_STEP3 BIT(19)
+#define MT8188_TOP_AXI_PROT_EN_2_MFG1_STEP4 BIT(5)
+#define MT8188_TOP_AXI_PROT_EN_MFG1_STEP5 GENMASK(22, 21)
+#define MT8188_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_MFG1_STEP6 BIT(17)
+
+#define MT8188_TOP_AXI_PROT_EN_PEXTP_MAC_P0_STEP1 BIT(2)
+#define MT8188_TOP_AXI_PROT_EN_INFRA_VDNR_PEXTP_MAC_P0_STEP2 (BIT(8) | BIT(18) | BIT(30))
+#define MT8188_TOP_AXI_PROT_EN_INFRA_VDNR_ETHER_STEP1 BIT(24)
+#define MT8188_TOP_AXI_PROT_EN_INFRA_VDNR_HDMI_TX_STEP1 BIT(20)
+#define MT8188_TOP_AXI_PROT_EN_2_ADSP_AO_STEP1 GENMASK(31, 29)
+#define MT8188_TOP_AXI_PROT_EN_2_ADSP_AO_STEP2 (GENMASK(4, 3) | BIT(28))
+#define MT8188_TOP_AXI_PROT_EN_2_ADSP_INFRA_STEP1 (GENMASK(16, 14) | BIT(23) | \
+ BIT(27))
+#define MT8188_TOP_AXI_PROT_EN_2_ADSP_INFRA_STEP2 (GENMASK(19, 17) | GENMASK(26, 25))
+#define MT8188_TOP_AXI_PROT_EN_2_ADSP_STEP1 GENMASK(11, 8)
+#define MT8188_TOP_AXI_PROT_EN_2_ADSP_STEP2 GENMASK(22, 21)
+#define MT8188_TOP_AXI_PROT_EN_2_AUDIO_STEP1 BIT(20)
+#define MT8188_TOP_AXI_PROT_EN_2_AUDIO_STEP2 BIT(12)
+#define MT8188_TOP_AXI_PROT_EN_2_AUDIO_ASRC_STEP1 BIT(24)
+#define MT8188_TOP_AXI_PROT_EN_2_AUDIO_ASRC_STEP2 BIT(13)
+
+#define MT8188_TOP_AXI_PROT_EN_VPPSYS0_STEP1 BIT(10)
+#define MT8188_TOP_AXI_PROT_EN_MM_2_VPPSYS0_STEP2 GENMASK(9, 8)
+#define MT8188_TOP_AXI_PROT_EN_VPPSYS0_STEP3 BIT(23)
+#define MT8188_TOP_AXI_PROT_EN_MM_2_VPPSYS0_STEP4 (BIT(1) | BIT(4) | BIT(11))
+#define MT8188_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_VPPSYS0_STEP5 (BIT(20))
+#define MT8188_TOP_AXI_PROT_EN_MM_VDOSYS0_STEP1 (GENMASK(18, 17) | GENMASK(21, 20))
+#define MT8188_TOP_AXI_PROT_EN_VDOSYS0_STEP2 BIT(6)
+#define MT8188_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_VDOSYS0_STEP3 BIT(21)
+#define MT8188_TOP_AXI_PROT_EN_MM_VDOSYS1_STEP1 GENMASK(31, 30)
+#define MT8188_TOP_AXI_PROT_EN_MM_VDOSYS1_STEP2 BIT(22)
+#define MT8188_TOP_AXI_PROT_EN_MM_2_VDOSYS1_STEP3 BIT(10)
+#define MT8188_TOP_AXI_PROT_EN_INFRA_VDNR_DP_TX_STEP1 BIT(23)
+#define MT8188_TOP_AXI_PROT_EN_INFRA_VDNR_EDP_TX_STEP1 BIT(22)
+
+#define MT8188_TOP_AXI_PROT_EN_MM_VPPSYS1_STEP1 GENMASK(6, 5)
+#define MT8188_TOP_AXI_PROT_EN_MM_VPPSYS1_STEP2 BIT(23)
+#define MT8188_TOP_AXI_PROT_EN_MM_2_VPPSYS1_STEP3 BIT(18)
+#define MT8188_TOP_AXI_PROT_EN_MM_2_WPE_STEP1 BIT(23)
+#define MT8188_TOP_AXI_PROT_EN_MM_2_WPE_STEP2 BIT(21)
+#define MT8188_TOP_AXI_PROT_EN_MM_VDEC0_STEP1 BIT(13)
+#define MT8188_TOP_AXI_PROT_EN_MM_2_VDEC0_STEP2 BIT(13)
+#define MT8188_TOP_AXI_PROT_EN_MM_VDEC1_STEP1 BIT(14)
+#define MT8188_TOP_AXI_PROT_EN_MM_VDEC1_STEP2 BIT(29)
+#define MT8188_TOP_AXI_PROT_EN_MM_VENC_STEP1 (BIT(9) | BIT(11))
+#define MT8188_TOP_AXI_PROT_EN_MM_VENC_STEP2 BIT(26)
+#define MT8188_TOP_AXI_PROT_EN_MM_2_VENC_STEP3 BIT(2)
+#define MT8188_TOP_AXI_PROT_EN_MM_IMG_VCORE_STEP1 (BIT(1) | BIT(3))
+#define MT8188_TOP_AXI_PROT_EN_MM_IMG_VCORE_STEP2 BIT(25)
+#define MT8188_TOP_AXI_PROT_EN_MM_2_IMG_VCORE_STEP3 BIT(16)
+#define MT8188_TOP_AXI_PROT_EN_MM_2_IMG_MAIN_STEP1 GENMASK(27, 26)
+#define MT8188_TOP_AXI_PROT_EN_MM_2_IMG_MAIN_STEP2 GENMASK(25, 24)
+#define MT8188_TOP_AXI_PROT_EN_MM_CAM_VCORE_STEP1 (BIT(2) | BIT(4))
+#define MT8188_TOP_AXI_PROT_EN_2_CAM_VCORE_STEP2 BIT(0)
+#define MT8188_TOP_AXI_PROT_EN_1_CAM_VCORE_STEP3 BIT(22)
+#define MT8188_TOP_AXI_PROT_EN_MM_CAM_VCORE_STEP4 BIT(24)
+#define MT8188_TOP_AXI_PROT_EN_MM_2_CAM_VCORE_STEP5 BIT(17)
+#define MT8188_TOP_AXI_PROT_EN_MM_2_CAM_MAIN_STEP1 GENMASK(31, 30)
+#define MT8188_TOP_AXI_PROT_EN_2_CAM_MAIN_STEP2 BIT(2)
+#define MT8188_TOP_AXI_PROT_EN_MM_2_CAM_MAIN_STEP3 GENMASK(29, 28)
+#define MT8188_TOP_AXI_PROT_EN_2_CAM_MAIN_STEP4 BIT(1)
+
+#define MT8188_SMI_COMMON_CLAMP_EN_STA 0x3C0
+#define MT8188_SMI_COMMON_CLAMP_EN_SET 0x3C4
+#define MT8188_SMI_COMMON_CLAMP_EN_CLR 0x3C8
+
+#define MT8188_SMI_COMMON_SMI_CLAMP_DIP_TO_VDO0 GENMASK(3, 1)
+#define MT8188_SMI_COMMON_SMI_CLAMP_DIP_TO_VPP1 GENMASK(2, 1)
+#define MT8188_SMI_COMMON_SMI_CLAMP_IPE_TO_VPP1 BIT(0)
+
+#define MT8188_SMI_COMMON_SMI_CLAMP_CAM_SUBA_TO_VPP0 GENMASK(3, 2)
+#define MT8188_SMI_COMMON_SMI_CLAMP_CAM_SUBB_TO_VDO0 GENMASK(3, 2)
+
+#define MT8188_SMI_LARB10_RESET_ADDR 0xC
+#define MT8188_SMI_LARB11A_RESET_ADDR 0xC
+#define MT8188_SMI_LARB11C_RESET_ADDR 0xC
+#define MT8188_SMI_LARB12_RESET_ADDR 0xC
+#define MT8188_SMI_LARB11B_RESET_ADDR 0xC
+#define MT8188_SMI_LARB15_RESET_ADDR 0xC
+#define MT8188_SMI_LARB16B_RESET_ADDR 0xA0
+#define MT8188_SMI_LARB17B_RESET_ADDR 0xA0
+#define MT8188_SMI_LARB16A_RESET_ADDR 0xA0
+#define MT8188_SMI_LARB17A_RESET_ADDR 0xA0
+
+#define MT8188_SMI_LARB10_RESET BIT(0)
+#define MT8188_SMI_LARB11A_RESET BIT(0)
+#define MT8188_SMI_LARB11C_RESET BIT(0)
+#define MT8188_SMI_LARB12_RESET BIT(8)
+#define MT8188_SMI_LARB11B_RESET BIT(0)
+#define MT8188_SMI_LARB15_RESET BIT(0)
+#define MT8188_SMI_LARB16B_RESET BIT(4)
+#define MT8188_SMI_LARB17B_RESET BIT(4)
+#define MT8188_SMI_LARB16A_RESET BIT(4)
+#define MT8188_SMI_LARB17A_RESET BIT(4)
+
#define MT8186_TOP_AXI_PROT_EN_SET (0x2A0)
#define MT8186_TOP_AXI_PROT_EN_CLR (0x2A4)
#define MT8186_TOP_AXI_PROT_EN_STA (0x228)
diff --git a/include/linux/soc/mediatek/mtk-mmsys.h b/include/linux/soc/mediatek/mtk-mmsys.h
index b85f66db33e1..dc2963a0a0f7 100644
--- a/include/linux/soc/mediatek/mtk-mmsys.h
+++ b/include/linux/soc/mediatek/mtk-mmsys.h
@@ -6,6 +6,10 @@
#ifndef __MTK_MMSYS_H
#define __MTK_MMSYS_H
+#include <linux/mailbox_controller.h>
+#include <linux/mailbox/mtk-cmdq-mailbox.h>
+#include <linux/soc/mediatek/mtk-cmdq.h>
+
enum mtk_ddp_comp_id;
struct device;
@@ -36,7 +40,16 @@ enum mtk_ddp_comp_id {
DDP_COMPONENT_DSI1,
DDP_COMPONENT_DSI2,
DDP_COMPONENT_DSI3,
+ DDP_COMPONENT_ETHDR_MIXER,
DDP_COMPONENT_GAMMA,
+ DDP_COMPONENT_MDP_RDMA0,
+ DDP_COMPONENT_MDP_RDMA1,
+ DDP_COMPONENT_MDP_RDMA2,
+ DDP_COMPONENT_MDP_RDMA3,
+ DDP_COMPONENT_MDP_RDMA4,
+ DDP_COMPONENT_MDP_RDMA5,
+ DDP_COMPONENT_MDP_RDMA6,
+ DDP_COMPONENT_MDP_RDMA7,
DDP_COMPONENT_MERGE0,
DDP_COMPONENT_MERGE1,
DDP_COMPONENT_MERGE2,
@@ -74,4 +87,16 @@ void mtk_mmsys_ddp_disconnect(struct device *dev,
void mtk_mmsys_ddp_dpi_fmt_config(struct device *dev, u32 val);
+void mtk_mmsys_merge_async_config(struct device *dev, int idx, int width,
+ int height, struct cmdq_pkt *cmdq_pkt);
+
+void mtk_mmsys_hdr_config(struct device *dev, int be_width, int be_height,
+ struct cmdq_pkt *cmdq_pkt);
+
+void mtk_mmsys_mixer_in_config(struct device *dev, int idx, bool alpha_sel, u16 alpha,
+ u8 mode, u32 biwidth, struct cmdq_pkt *cmdq_pkt);
+
+void mtk_mmsys_mixer_in_channel_swap(struct device *dev, int idx, bool channel_swap,
+ struct cmdq_pkt *cmdq_pkt);
+
#endif /* __MTK_MMSYS_H */
diff --git a/include/linux/soc/mediatek/mtk_wed.h b/include/linux/soc/mediatek/mtk_wed.h
index a0746d4aec20..fd0b0605cf90 100644
--- a/include/linux/soc/mediatek/mtk_wed.h
+++ b/include/linux/soc/mediatek/mtk_wed.h
@@ -103,7 +103,6 @@ struct mtk_wed_device {
struct {
int size;
- struct page_frag_cache rx_page;
struct mtk_rxbm_desc *desc;
dma_addr_t desc_phys;
} rx_buf_ring;
@@ -151,6 +150,8 @@ struct mtk_wed_device {
void (*release_rx_buf)(struct mtk_wed_device *wed);
void (*update_wo_rx_stats)(struct mtk_wed_device *wed,
struct mtk_wed_wo_rx_stats *stats);
+ int (*reset)(struct mtk_wed_device *wed);
+ void (*reset_complete)(struct mtk_wed_device *wed);
} wlan;
#endif
};
diff --git a/include/linux/soc/qcom/apr.h b/include/linux/soc/qcom/apr.h
index 23c5b30f3511..be98aebcb3e1 100644
--- a/include/linux/soc/qcom/apr.h
+++ b/include/linux/soc/qcom/apr.h
@@ -153,7 +153,7 @@ typedef struct apr_device gpr_device_t;
struct apr_driver {
int (*probe)(struct apr_device *sl);
- int (*remove)(struct apr_device *sl);
+ void (*remove)(struct apr_device *sl);
int (*callback)(struct apr_device *a,
struct apr_resp_pkt *d);
int (*gpr_callback)(struct gpr_resp_pkt *d, void *data, int op);
diff --git a/include/linux/qcom-geni-se.h b/include/linux/soc/qcom/geni-se.h
index f5672785c0c4..400213daa461 100644
--- a/include/linux/qcom-geni-se.h
+++ b/include/linux/soc/qcom/geni-se.h
@@ -103,6 +103,7 @@ struct geni_se {
#define SE_DMA_TX_FSM_RST 0xc58
#define SE_DMA_RX_IRQ_STAT 0xd40
#define SE_DMA_RX_IRQ_CLR 0xd44
+#define SE_DMA_RX_LEN_IN 0xd54
#define SE_DMA_RX_FSM_RST 0xd58
#define SE_HW_PARAM_0 0xe24
#define SE_HW_PARAM_1 0xe28
@@ -235,6 +236,8 @@ struct geni_se {
#define RX_SBE BIT(2)
#define RX_RESET_DONE BIT(3)
#define RX_FLUSH_DONE BIT(4)
+#define RX_DMA_PARITY_ERR BIT(5)
+#define RX_DMA_BREAK GENMASK(8, 7)
#define RX_GENI_GP_IRQ GENMASK(10, 5)
#define RX_GENI_CANCEL_IRQ BIT(11)
#define RX_GENI_GP_IRQ_EXT GENMASK(13, 12)
diff --git a/include/linux/soc/qcom/pmic_glink.h b/include/linux/soc/qcom/pmic_glink.h
new file mode 100644
index 000000000000..fd124aa18c81
--- /dev/null
+++ b/include/linux/soc/qcom/pmic_glink.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022, Linaro Ltd
+ */
+#ifndef __SOC_QCOM_PMIC_GLINK_H__
+#define __SOC_QCOM_PMIC_GLINK_H__
+
+struct pmic_glink;
+struct pmic_glink_client;
+
+#define PMIC_GLINK_OWNER_BATTMGR 32778
+#define PMIC_GLINK_OWNER_USBC 32779
+#define PMIC_GLINK_OWNER_USBC_PAN 32780
+
+#define PMIC_GLINK_REQ_RESP 1
+#define PMIC_GLINK_NOTIFY 2
+
+struct pmic_glink_hdr {
+ __le32 owner;
+ __le32 type;
+ __le32 opcode;
+};
+
+int pmic_glink_send(struct pmic_glink_client *client, void *data, size_t len);
+
+struct pmic_glink_client *devm_pmic_glink_register_client(struct device *dev,
+ unsigned int id,
+ void (*cb)(const void *, size_t, void *),
+ void (*pdr)(void *, int),
+ void *priv);
+
+#endif
diff --git a/include/linux/soc/qcom/smd-rpm.h b/include/linux/soc/qcom/smd-rpm.h
index 62de54992e49..2990f425fdef 100644
--- a/include/linux/soc/qcom/smd-rpm.h
+++ b/include/linux/soc/qcom/smd-rpm.h
@@ -43,7 +43,6 @@ struct qcom_smd_rpm;
#define QCOM_SMD_RPM_HWKM_CLK 0x6d6b7768
#define QCOM_SMD_RPM_PKA_CLK 0x616b70
#define QCOM_SMD_RPM_MCFG_CLK 0x6766636d
-#define QCOM_SMD_RPM_MMXI_CLK 0x69786d6d
int qcom_rpm_smd_write(struct qcom_smd_rpm *rpm,
int state,
diff --git a/include/linux/soundwire/sdw.h b/include/linux/soundwire/sdw.h
index 9e4537f409c2..ef645de13ae9 100644
--- a/include/linux/soundwire/sdw.h
+++ b/include/linux/soundwire/sdw.h
@@ -4,6 +4,7 @@
#ifndef __SOUNDWIRE_H
#define __SOUNDWIRE_H
+#include <linux/bug.h>
#include <linux/mod_devicetable.h>
#include <linux/bitfield.h>
@@ -365,7 +366,9 @@ struct sdw_dpn_prop {
* @sink_dpn_prop: Sink Data Port N properties
* @scp_int1_mask: SCP_INT1_MASK desired settings
* @quirks: bitmask identifying deltas from the MIPI specification
- * @is_sdca: the Slave supports the SDCA specification
+ * @clock_reg_supported: the Peripheral implements the clock base and scale
+ * registers introduced with the SoundWire 1.2 specification. SDCA devices
+ * do not need to set this boolean property as the registers are required.
*/
struct sdw_slave_prop {
u32 mipi_revision;
@@ -389,7 +392,7 @@ struct sdw_slave_prop {
struct sdw_dpn_prop *sink_dpn_prop;
u8 scp_int1_mask;
u32 quirks;
- bool is_sdca;
+ bool clock_reg_supported;
};
#define SDW_SLAVE_QUIRKS_INVALID_INITIAL_PARITY BIT(0)
@@ -566,13 +569,15 @@ struct sdw_prepare_ch {
* enum sdw_port_prep_ops: Prepare operations for Data Port
*
* @SDW_OPS_PORT_PRE_PREP: Pre prepare operation for the Port
- * @SDW_OPS_PORT_PREP: Prepare operation for the Port
+ * @SDW_OPS_PORT_PRE_DEPREP: Pre deprepare operation for the Port
* @SDW_OPS_PORT_POST_PREP: Post prepare operation for the Port
+ * @SDW_OPS_PORT_POST_DEPREP: Post deprepare operation for the Port
*/
enum sdw_port_prep_ops {
SDW_OPS_PORT_PRE_PREP = 0,
- SDW_OPS_PORT_PREP = 1,
- SDW_OPS_PORT_POST_PREP = 2,
+ SDW_OPS_PORT_PRE_DEPREP,
+ SDW_OPS_PORT_POST_PREP,
+ SDW_OPS_PORT_POST_DEPREP,
};
/**
@@ -834,8 +839,8 @@ struct sdw_defer {
* @read_prop: Read Master properties
* @override_adr: Override value read from firmware (quirk for buggy firmware)
* @xfer_msg: Transfer message callback
- * @xfer_msg_defer: Defer version of transfer message callback
- * @reset_page_addr: Reset the SCP page address registers
+ * @xfer_msg_defer: Defer version of transfer message callback. The message is handled with the
+ * bus struct @sdw_defer
* @set_bus_conf: Set the bus configuration
* @pre_bank_switch: Callback for pre bank switch
* @post_bank_switch: Callback for post bank switch
@@ -849,10 +854,7 @@ struct sdw_master_ops {
enum sdw_command_response (*xfer_msg)
(struct sdw_bus *bus, struct sdw_msg *msg);
enum sdw_command_response (*xfer_msg_defer)
- (struct sdw_bus *bus, struct sdw_msg *msg,
- struct sdw_defer *defer);
- enum sdw_command_response (*reset_page_addr)
- (struct sdw_bus *bus, unsigned int dev_num);
+ (struct sdw_bus *bus);
int (*set_bus_conf)(struct sdw_bus *bus,
struct sdw_bus_params *params);
int (*pre_bank_switch)(struct sdw_bus *bus);
@@ -1021,15 +1023,8 @@ int sdw_stream_add_master(struct sdw_bus *bus,
struct sdw_port_config *port_config,
unsigned int num_ports,
struct sdw_stream_runtime *stream);
-int sdw_stream_add_slave(struct sdw_slave *slave,
- struct sdw_stream_config *stream_config,
- struct sdw_port_config *port_config,
- unsigned int num_ports,
- struct sdw_stream_runtime *stream);
int sdw_stream_remove_master(struct sdw_bus *bus,
struct sdw_stream_runtime *stream);
-int sdw_stream_remove_slave(struct sdw_slave *slave,
- struct sdw_stream_runtime *stream);
int sdw_startup_stream(void *sdw_substream);
int sdw_prepare_stream(struct sdw_stream_runtime *stream);
int sdw_enable_stream(struct sdw_stream_runtime *stream);
@@ -1040,18 +1035,111 @@ int sdw_bus_prep_clk_stop(struct sdw_bus *bus);
int sdw_bus_clk_stop(struct sdw_bus *bus);
int sdw_bus_exit_clk_stop(struct sdw_bus *bus);
-/* messaging and data APIs */
+int sdw_compare_devid(struct sdw_slave *slave, struct sdw_slave_id id);
+void sdw_extract_slave_id(struct sdw_bus *bus, u64 addr, struct sdw_slave_id *id);
+#if IS_ENABLED(CONFIG_SOUNDWIRE)
+
+int sdw_stream_add_slave(struct sdw_slave *slave,
+ struct sdw_stream_config *stream_config,
+ struct sdw_port_config *port_config,
+ unsigned int num_ports,
+ struct sdw_stream_runtime *stream);
+int sdw_stream_remove_slave(struct sdw_slave *slave,
+ struct sdw_stream_runtime *stream);
+
+/* messaging and data APIs */
int sdw_read(struct sdw_slave *slave, u32 addr);
int sdw_write(struct sdw_slave *slave, u32 addr, u8 value);
int sdw_write_no_pm(struct sdw_slave *slave, u32 addr, u8 value);
int sdw_read_no_pm(struct sdw_slave *slave, u32 addr);
int sdw_nread(struct sdw_slave *slave, u32 addr, size_t count, u8 *val);
+int sdw_nread_no_pm(struct sdw_slave *slave, u32 addr, size_t count, u8 *val);
int sdw_nwrite(struct sdw_slave *slave, u32 addr, size_t count, const u8 *val);
+int sdw_nwrite_no_pm(struct sdw_slave *slave, u32 addr, size_t count, const u8 *val);
int sdw_update(struct sdw_slave *slave, u32 addr, u8 mask, u8 val);
int sdw_update_no_pm(struct sdw_slave *slave, u32 addr, u8 mask, u8 val);
-int sdw_compare_devid(struct sdw_slave *slave, struct sdw_slave_id id);
-void sdw_extract_slave_id(struct sdw_bus *bus, u64 addr, struct sdw_slave_id *id);
+#else
+
+static inline int sdw_stream_add_slave(struct sdw_slave *slave,
+ struct sdw_stream_config *stream_config,
+ struct sdw_port_config *port_config,
+ unsigned int num_ports,
+ struct sdw_stream_runtime *stream)
+{
+ WARN_ONCE(1, "SoundWire API is disabled");
+ return -EINVAL;
+}
+
+static inline int sdw_stream_remove_slave(struct sdw_slave *slave,
+ struct sdw_stream_runtime *stream)
+{
+ WARN_ONCE(1, "SoundWire API is disabled");
+ return -EINVAL;
+}
+
+/* messaging and data APIs */
+static inline int sdw_read(struct sdw_slave *slave, u32 addr)
+{
+ WARN_ONCE(1, "SoundWire API is disabled");
+ return -EINVAL;
+}
+
+static inline int sdw_write(struct sdw_slave *slave, u32 addr, u8 value)
+{
+ WARN_ONCE(1, "SoundWire API is disabled");
+ return -EINVAL;
+}
+
+static inline int sdw_write_no_pm(struct sdw_slave *slave, u32 addr, u8 value)
+{
+ WARN_ONCE(1, "SoundWire API is disabled");
+ return -EINVAL;
+}
+
+static inline int sdw_read_no_pm(struct sdw_slave *slave, u32 addr)
+{
+ WARN_ONCE(1, "SoundWire API is disabled");
+ return -EINVAL;
+}
+
+static inline int sdw_nread(struct sdw_slave *slave, u32 addr, size_t count, u8 *val)
+{
+ WARN_ONCE(1, "SoundWire API is disabled");
+ return -EINVAL;
+}
+
+static inline int sdw_nread_no_pm(struct sdw_slave *slave, u32 addr, size_t count, u8 *val)
+{
+ WARN_ONCE(1, "SoundWire API is disabled");
+ return -EINVAL;
+}
+
+static inline int sdw_nwrite(struct sdw_slave *slave, u32 addr, size_t count, const u8 *val)
+{
+ WARN_ONCE(1, "SoundWire API is disabled");
+ return -EINVAL;
+}
+
+static inline int sdw_nwrite_no_pm(struct sdw_slave *slave, u32 addr, size_t count, const u8 *val)
+{
+ WARN_ONCE(1, "SoundWire API is disabled");
+ return -EINVAL;
+}
+
+static inline int sdw_update(struct sdw_slave *slave, u32 addr, u8 mask, u8 val)
+{
+ WARN_ONCE(1, "SoundWire API is disabled");
+ return -EINVAL;
+}
+
+static inline int sdw_update_no_pm(struct sdw_slave *slave, u32 addr, u8 mask, u8 val)
+{
+ WARN_ONCE(1, "SoundWire API is disabled");
+ return -EINVAL;
+}
+
+#endif /* CONFIG_SOUNDWIRE */
#endif /* __SOUNDWIRE_H */
diff --git a/include/linux/soundwire/sdw_type.h b/include/linux/soundwire/sdw_type.h
index 52eb66cd11bc..d8c27f1e5559 100644
--- a/include/linux/soundwire/sdw_type.h
+++ b/include/linux/soundwire/sdw_type.h
@@ -21,7 +21,7 @@ static inline int is_sdw_slave(const struct device *dev)
int __sdw_register_driver(struct sdw_driver *drv, struct module *owner);
void sdw_unregister_driver(struct sdw_driver *drv);
-int sdw_slave_uevent(struct device *dev, struct kobj_uevent_env *env);
+int sdw_slave_uevent(const struct device *dev, struct kobj_uevent_env *env);
/**
* module_sdw_driver() - Helper macro for registering a Soundwire driver
diff --git a/include/linux/spi/altera.h b/include/linux/spi/altera.h
index 2e2a622e56da..3b74c3750caf 100644
--- a/include/linux/spi/altera.h
+++ b/include/linux/spi/altera.h
@@ -14,7 +14,7 @@
/**
* struct altera_spi_platform_data - Platform data of the Altera SPI driver
- * @mode_bits: Mode bits of SPI master.
+ * @mode_bits: Mode bits of SPI host.
* @num_chipselect: Number of chipselects.
* @bits_per_word_mask: bitmask of supported bits_per_word for transfers.
* @num_devices: Number of devices that shall be added when the driver
@@ -46,5 +46,5 @@ struct altera_spi {
};
extern irqreturn_t altera_spi_irq(int irq, void *dev);
-extern void altera_spi_init_master(struct spi_master *master);
+extern void altera_spi_init_host(struct spi_controller *host);
#endif /* __LINUX_SPI_ALTERA_H */
diff --git a/include/linux/spi/at86rf230.h b/include/linux/spi/at86rf230.h
deleted file mode 100644
index d278576ab692..000000000000
--- a/include/linux/spi/at86rf230.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * AT86RF230/RF231 driver
- *
- * Copyright (C) 2009-2012 Siemens AG
- *
- * Written by:
- * Dmitry Eremin-Solenikov <dmitry.baryshkov@siemens.com>
- */
-#ifndef AT86RF230_H
-#define AT86RF230_H
-
-struct at86rf230_platform_data {
- int rstn;
- int slp_tr;
- int dig2;
- u8 xtal_trim;
-};
-
-#endif
diff --git a/include/linux/spi/cc2520.h b/include/linux/spi/cc2520.h
deleted file mode 100644
index 449bacf10700..000000000000
--- a/include/linux/spi/cc2520.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/* Header file for cc2520 radio driver
- *
- * Copyright (C) 2014 Varka Bhadram <varkab@cdac.in>
- * Md.Jamal Mohiuddin <mjmohiuddin@cdac.in>
- * P Sowjanya <sowjanyap@cdac.in>
- */
-
-#ifndef __CC2520_H
-#define __CC2520_H
-
-struct cc2520_platform_data {
- int fifo;
- int fifop;
- int cca;
- int sfd;
- int reset;
- int vreg;
-};
-
-#endif
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 9a32495fbb1f..4fa26b9a3572 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -26,6 +26,7 @@ struct spi_controller;
struct spi_transfer;
struct spi_controller_mem_ops;
struct spi_controller_mem_caps;
+struct spi_message;
/*
* INTERFACES between SPI master-side drivers and SPI slave protocol handlers,
@@ -119,6 +120,8 @@ struct spi_delay {
extern int spi_delay_to_ns(struct spi_delay *_delay, struct spi_transfer *xfer);
extern int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer);
+extern void spi_transfer_cs_change_delay_exec(struct spi_message *msg,
+ struct spi_transfer *xfer);
/**
* struct spi_device - Controller side proxy for an SPI slave device
@@ -223,7 +226,7 @@ struct spi_device {
static_assert((SPI_MODE_KERNEL_MASK & SPI_MODE_USER_MASK) == 0,
"SPI_MODE_USER_MASK & SPI_MODE_KERNEL_MASK must not overlap");
-static inline struct spi_device *to_spi_device(struct device *dev)
+static inline struct spi_device *to_spi_device(const struct device *dev)
{
return dev ? container_of(dev, struct spi_device, dev) : NULL;
}
@@ -263,7 +266,25 @@ static inline void *spi_get_drvdata(struct spi_device *spi)
return dev_get_drvdata(&spi->dev);
}
-struct spi_message;
+static inline u8 spi_get_chipselect(struct spi_device *spi, u8 idx)
+{
+ return spi->chip_select;
+}
+
+static inline void spi_set_chipselect(struct spi_device *spi, u8 idx, u8 chipselect)
+{
+ spi->chip_select = chipselect;
+}
+
+static inline struct gpio_desc *spi_get_csgpiod(struct spi_device *spi, u8 idx)
+{
+ return spi->cs_gpiod;
+}
+
+static inline void spi_set_csgpiod(struct spi_device *spi, u8 idx, struct gpio_desc *csgpiod)
+{
+ spi->cs_gpiod = csgpiod;
+}
/**
* struct spi_driver - Host side "protocol" driver
@@ -1001,6 +1022,9 @@ struct spi_transfer {
void *rx_buf;
unsigned len;
+#define SPI_TRANS_FAIL_NO_START BIT(0)
+ u16 error;
+
dma_addr_t tx_dma;
dma_addr_t rx_dma;
struct sg_table tx_sg;
@@ -1011,6 +1035,7 @@ struct spi_transfer {
unsigned cs_change:1;
unsigned tx_nbits:3;
unsigned rx_nbits:3;
+ unsigned timestamped:1;
#define SPI_NBITS_SINGLE 0x01 /* 1bit transfer */
#define SPI_NBITS_DUAL 0x02 /* 2bits transfer */
#define SPI_NBITS_QUAD 0x04 /* 4bits transfer */
@@ -1027,12 +1052,7 @@ struct spi_transfer {
struct ptp_system_timestamp *ptp_sts;
- bool timestamped;
-
struct list_head transfer_list;
-
-#define SPI_TRANS_FAIL_NO_START BIT(0)
- u16 error;
};
/**
diff --git a/include/linux/spi/xilinx_spi.h b/include/linux/spi/xilinx_spi.h
index c15d69d28e68..3934ce789d87 100644
--- a/include/linux/spi/xilinx_spi.h
+++ b/include/linux/spi/xilinx_spi.h
@@ -15,6 +15,7 @@ struct xspi_platform_data {
u8 bits_per_word;
struct spi_board_info *devices;
u8 num_devices;
+ bool force_irq;
};
#endif /* __LINUX_SPI_XILINX_SPI_H */
diff --git a/include/linux/ssb/ssb.h b/include/linux/ssb/ssb.h
index f9b53acb4e02..1f326da289d3 100644
--- a/include/linux/ssb/ssb.h
+++ b/include/linux/ssb/ssb.h
@@ -285,7 +285,7 @@ struct ssb_device {
/* Go from struct device to struct ssb_device. */
static inline
-struct ssb_device * dev_to_ssb_dev(struct device *dev)
+struct ssb_device * dev_to_ssb_dev(const struct device *dev)
{
struct __ssb_dev_wrapper *wrap;
wrap = container_of(dev, struct __ssb_dev_wrapper, dev);
diff --git a/include/linux/stackdepot.h b/include/linux/stackdepot.h
index 9ca7798d7a31..e58306783d8e 100644
--- a/include/linux/stackdepot.h
+++ b/include/linux/stackdepot.h
@@ -1,11 +1,22 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
- * A generic stack depot implementation
+ * Stack depot - a stack trace storage that avoids duplication.
+ *
+ * Stack depot is intended to be used by subsystems that need to store and
+ * later retrieve many potentially duplicated stack traces without wasting
+ * memory.
+ *
+ * For example, KASAN needs to save allocation and free stack traces for each
+ * object. Storing two stack traces per object requires a lot of memory (e.g.
+ * SLUB_DEBUG needs 256 bytes per object for that). Since allocation and free
+ * stack traces often repeat, using stack depot allows to save about 100x space.
+ *
+ * Stack traces are never removed from the stack depot.
*
* Author: Alexander Potapenko <glider@google.com>
* Copyright (C) 2016 Google, Inc.
*
- * Based on code by Dmitry Chernenkov.
+ * Based on the code by Dmitry Chernenkov.
*/
#ifndef _LINUX_STACKDEPOT_H
@@ -14,62 +25,143 @@
#include <linux/gfp.h>
typedef u32 depot_stack_handle_t;
+
/*
* Number of bits in the handle that stack depot doesn't use. Users may store
- * information in them.
+ * information in them via stack_depot_set/get_extra_bits.
*/
#define STACK_DEPOT_EXTRA_BITS 5
-depot_stack_handle_t __stack_depot_save(unsigned long *entries,
- unsigned int nr_entries,
- unsigned int extra_bits,
- gfp_t gfp_flags, bool can_alloc);
-
/*
- * Every user of stack depot has to call stack_depot_init() during its own init
- * when it's decided that it will be calling stack_depot_save() later. This is
- * recommended for e.g. modules initialized later in the boot process, when
- * slab_is_available() is true.
- *
- * The alternative is to select STACKDEPOT_ALWAYS_INIT to have stack depot
- * enabled as part of mm_init(), for subsystems where it's known at compile time
- * that stack depot will be used.
- *
- * Another alternative is to call stack_depot_want_early_init(), when the
- * decision to use stack depot is taken e.g. when evaluating kernel boot
- * parameters, which precedes the enablement point in mm_init().
- *
- * stack_depot_init() and stack_depot_want_early_init() can be called regardless
- * of CONFIG_STACKDEPOT and are no-op when disabled. The actual save/fetch/print
- * functions should only be called from code that makes sure CONFIG_STACKDEPOT
- * is enabled.
+ * Using stack depot requires its initialization, which can be done in 3 ways:
+ *
+ * 1. Selecting CONFIG_STACKDEPOT_ALWAYS_INIT. This option is suitable in
+ * scenarios where it's known at compile time that stack depot will be used.
+ * Enabling this config makes the kernel initialize stack depot in mm_init().
+ *
+ * 2. Calling stack_depot_request_early_init() during early boot, before
+ * stack_depot_early_init() in mm_init() completes. For example, this can
+ * be done when evaluating kernel boot parameters.
+ *
+ * 3. Calling stack_depot_init(). Possible after boot is complete. This option
+ * is recommended for modules initialized later in the boot process, after
+ * mm_init() completes.
+ *
+ * stack_depot_init() and stack_depot_request_early_init() can be called
+ * regardless of whether CONFIG_STACKDEPOT is enabled and are no-op when this
+ * config is disabled. The save/fetch/print stack depot functions can only be
+ * called from the code that makes sure CONFIG_STACKDEPOT is enabled _and_
+ * initializes stack depot via one of the ways listed above.
*/
#ifdef CONFIG_STACKDEPOT
int stack_depot_init(void);
-void __init stack_depot_want_early_init(void);
+void __init stack_depot_request_early_init(void);
-/* This is supposed to be called only from mm_init() */
+/* Must be only called from mm_init(). */
int __init stack_depot_early_init(void);
#else
static inline int stack_depot_init(void) { return 0; }
-static inline void stack_depot_want_early_init(void) { }
+static inline void stack_depot_request_early_init(void) { }
static inline int stack_depot_early_init(void) { return 0; }
#endif
+/**
+ * __stack_depot_save - Save a stack trace to stack depot
+ *
+ * @entries: Pointer to the stack trace
+ * @nr_entries: Number of frames in the stack
+ * @alloc_flags: Allocation GFP flags
+ * @can_alloc: Allocate stack pools (increased chance of failure if false)
+ *
+ * Saves a stack trace from @entries array of size @nr_entries. If @can_alloc is
+ * %true, stack depot can replenish the stack pools in case no space is left
+ * (allocates using GFP flags of @alloc_flags). If @can_alloc is %false, avoids
+ * any allocations and fails if no space is left to store the stack trace.
+ *
+ * If the provided stack trace comes from the interrupt context, only the part
+ * up to the interrupt entry is saved.
+ *
+ * Context: Any context, but setting @can_alloc to %false is required if
+ * alloc_pages() cannot be used from the current context. Currently
+ * this is the case for contexts where neither %GFP_ATOMIC nor
+ * %GFP_NOWAIT can be used (NMI, raw_spin_lock).
+ *
+ * Return: Handle of the stack struct stored in depot, 0 on failure
+ */
+depot_stack_handle_t __stack_depot_save(unsigned long *entries,
+ unsigned int nr_entries,
+ gfp_t gfp_flags, bool can_alloc);
+
+/**
+ * stack_depot_save - Save a stack trace to stack depot
+ *
+ * @entries: Pointer to the stack trace
+ * @nr_entries: Number of frames in the stack
+ * @alloc_flags: Allocation GFP flags
+ *
+ * Context: Contexts where allocations via alloc_pages() are allowed.
+ * See __stack_depot_save() for more details.
+ *
+ * Return: Handle of the stack trace stored in depot, 0 on failure
+ */
depot_stack_handle_t stack_depot_save(unsigned long *entries,
unsigned int nr_entries, gfp_t gfp_flags);
+/**
+ * stack_depot_fetch - Fetch a stack trace from stack depot
+ *
+ * @handle: Stack depot handle returned from stack_depot_save()
+ * @entries: Pointer to store the address of the stack trace
+ *
+ * Return: Number of frames for the fetched stack
+ */
unsigned int stack_depot_fetch(depot_stack_handle_t handle,
unsigned long **entries);
-unsigned int stack_depot_get_extra_bits(depot_stack_handle_t handle);
+/**
+ * stack_depot_print - Print a stack trace from stack depot
+ *
+ * @stack: Stack depot handle returned from stack_depot_save()
+ */
+void stack_depot_print(depot_stack_handle_t stack);
+/**
+ * stack_depot_snprint - Print a stack trace from stack depot into a buffer
+ *
+ * @handle: Stack depot handle returned from stack_depot_save()
+ * @buf: Pointer to the print buffer
+ * @size: Size of the print buffer
+ * @spaces: Number of leading spaces to print
+ *
+ * Return: Number of bytes printed
+ */
int stack_depot_snprint(depot_stack_handle_t handle, char *buf, size_t size,
int spaces);
-void stack_depot_print(depot_stack_handle_t stack);
+/**
+ * stack_depot_set_extra_bits - Set extra bits in a stack depot handle
+ *
+ * @handle: Stack depot handle returned from stack_depot_save()
+ * @extra_bits: Value to set the extra bits
+ *
+ * Return: Stack depot handle with extra bits set
+ *
+ * Stack depot handles have a few unused bits, which can be used for storing
+ * user-specific information. These bits are transparent to the stack depot.
+ */
+depot_stack_handle_t __must_check stack_depot_set_extra_bits(
+ depot_stack_handle_t handle, unsigned int extra_bits);
+
+/**
+ * stack_depot_get_extra_bits - Retrieve extra bits from a stack depot handle
+ *
+ * @handle: Stack depot handle with extra bits saved
+ *
+ * Return: Extra bits retrieved from the stack depot handle
+ */
+unsigned int stack_depot_get_extra_bits(depot_stack_handle_t handle);
#endif
diff --git a/include/linux/string.h b/include/linux/string.h
index db28802ab0a6..c062c581a98b 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -177,6 +177,7 @@ extern char *kstrdup(const char *s, gfp_t gfp) __malloc;
extern const char *kstrdup_const(const char *s, gfp_t gfp);
extern char *kstrndup(const char *s, size_t len, gfp_t gfp);
extern void *kmemdup(const void *src, size_t len, gfp_t gfp) __realloc_size(2);
+extern void *kvmemdup(const void *src, size_t len, gfp_t gfp) __realloc_size(2);
extern char *kmemdup_nul(const char *s, size_t len, gfp_t gfp);
extern char **argv_split(gfp_t gfp, const char *str, int *argcp);
diff --git a/include/linux/string_helpers.h b/include/linux/string_helpers.h
index 8530c7328269..fae6beaaa217 100644
--- a/include/linux/string_helpers.h
+++ b/include/linux/string_helpers.h
@@ -11,6 +11,11 @@ struct device;
struct file;
struct task_struct;
+static inline bool string_is_terminated(const char *s, int len)
+{
+ return memchr(s, '\0', len) ? true : false;
+}
+
/* Descriptions of the types of units to
* print in */
enum string_size_units {
diff --git a/include/linux/sunrpc/gss_krb5.h b/include/linux/sunrpc/gss_krb5.h
index 91f43d86879d..78a80bf3fdcb 100644
--- a/include/linux/sunrpc/gss_krb5.h
+++ b/include/linux/sunrpc/gss_krb5.h
@@ -1,6 +1,4 @@
/*
- * linux/include/linux/sunrpc/gss_krb5_types.h
- *
* Adapted from MIT Kerberos 5-1.2.1 lib/include/krb5.h,
* lib/gssapi/krb5/gssapiP_krb5.h, and others
*
@@ -36,6 +34,9 @@
*
*/
+#ifndef _LINUX_SUNRPC_GSS_KRB5_H
+#define _LINUX_SUNRPC_GSS_KRB5_H
+
#include <crypto/skcipher.h>
#include <linux/sunrpc/auth_gss.h>
#include <linux/sunrpc/gss_err.h>
@@ -44,80 +45,15 @@
/* Length of constant used in key derivation */
#define GSS_KRB5_K5CLENGTH (5)
-/* Maximum key length (in bytes) for the supported crypto algorithms*/
+/* Maximum key length (in bytes) for the supported crypto algorithms */
#define GSS_KRB5_MAX_KEYLEN (32)
-/* Maximum checksum function output for the supported crypto algorithms */
-#define GSS_KRB5_MAX_CKSUM_LEN (20)
+/* Maximum checksum function output for the supported enctypes */
+#define GSS_KRB5_MAX_CKSUM_LEN (24)
/* Maximum blocksize for the supported crypto algorithms */
#define GSS_KRB5_MAX_BLOCKSIZE (16)
-struct krb5_ctx;
-
-struct gss_krb5_enctype {
- const u32 etype; /* encryption (key) type */
- const u32 ctype; /* checksum type */
- const char *name; /* "friendly" name */
- const char *encrypt_name; /* crypto encrypt name */
- const char *cksum_name; /* crypto checksum name */
- const u16 signalg; /* signing algorithm */
- const u16 sealalg; /* sealing algorithm */
- const u32 blocksize; /* encryption blocksize */
- const u32 conflen; /* confounder length
- (normally the same as
- the blocksize) */
- const u32 cksumlength; /* checksum length */
- const u32 keyed_cksum; /* is it a keyed cksum? */
- const u32 keybytes; /* raw key len, in bytes */
- const u32 keylength; /* final key len, in bytes */
- u32 (*encrypt) (struct crypto_sync_skcipher *tfm,
- void *iv, void *in, void *out,
- int length); /* encryption function */
- u32 (*decrypt) (struct crypto_sync_skcipher *tfm,
- void *iv, void *in, void *out,
- int length); /* decryption function */
- u32 (*mk_key) (const struct gss_krb5_enctype *gk5e,
- struct xdr_netobj *in,
- struct xdr_netobj *out); /* complete key generation */
- u32 (*encrypt_v2) (struct krb5_ctx *kctx, u32 offset,
- struct xdr_buf *buf,
- struct page **pages); /* v2 encryption function */
- u32 (*decrypt_v2) (struct krb5_ctx *kctx, u32 offset, u32 len,
- struct xdr_buf *buf, u32 *headskip,
- u32 *tailskip); /* v2 decryption function */
-};
-
-/* krb5_ctx flags definitions */
-#define KRB5_CTX_FLAG_INITIATOR 0x00000001
-#define KRB5_CTX_FLAG_CFX 0x00000002
-#define KRB5_CTX_FLAG_ACCEPTOR_SUBKEY 0x00000004
-
-struct krb5_ctx {
- int initiate; /* 1 = initiating, 0 = accepting */
- u32 enctype;
- u32 flags;
- const struct gss_krb5_enctype *gk5e; /* enctype-specific info */
- struct crypto_sync_skcipher *enc;
- struct crypto_sync_skcipher *seq;
- struct crypto_sync_skcipher *acceptor_enc;
- struct crypto_sync_skcipher *initiator_enc;
- struct crypto_sync_skcipher *acceptor_enc_aux;
- struct crypto_sync_skcipher *initiator_enc_aux;
- u8 Ksess[GSS_KRB5_MAX_KEYLEN]; /* session key */
- u8 cksum[GSS_KRB5_MAX_KEYLEN];
- atomic_t seq_send;
- atomic64_t seq_send64;
- time64_t endtime;
- struct xdr_netobj mech_used;
- u8 initiator_sign[GSS_KRB5_MAX_KEYLEN];
- u8 acceptor_sign[GSS_KRB5_MAX_KEYLEN];
- u8 initiator_seal[GSS_KRB5_MAX_KEYLEN];
- u8 acceptor_seal[GSS_KRB5_MAX_KEYLEN];
- u8 initiator_integ[GSS_KRB5_MAX_KEYLEN];
- u8 acceptor_integ[GSS_KRB5_MAX_KEYLEN];
-};
-
/* The length of the Kerberos GSS token header */
#define GSS_KRB5_TOK_HDR_LEN (16)
@@ -150,6 +86,12 @@ enum seal_alg {
SEAL_ALG_DES3KD = 0x0002
};
+/*
+ * These values are assigned by IANA and published via the
+ * subregistry at the link below:
+ *
+ * https://www.iana.org/assignments/kerberos-parameters/kerberos-parameters.xhtml#kerberos-parameters-2
+ */
#define CKSUMTYPE_CRC32 0x0001
#define CKSUMTYPE_RSA_MD4 0x0002
#define CKSUMTYPE_RSA_MD4_DES 0x0003
@@ -160,6 +102,10 @@ enum seal_alg {
#define CKSUMTYPE_HMAC_SHA1_DES3 0x000c
#define CKSUMTYPE_HMAC_SHA1_96_AES128 0x000f
#define CKSUMTYPE_HMAC_SHA1_96_AES256 0x0010
+#define CKSUMTYPE_CMAC_CAMELLIA128 0x0011
+#define CKSUMTYPE_CMAC_CAMELLIA256 0x0012
+#define CKSUMTYPE_HMAC_SHA256_128_AES128 0x0013
+#define CKSUMTYPE_HMAC_SHA384_192_AES256 0x0014
#define CKSUMTYPE_HMAC_MD5_ARCFOUR -138 /* Microsoft md5 hmac cksumtype */
/* from gssapi_err_krb5.h */
@@ -180,6 +126,11 @@ enum seal_alg {
/* per Kerberos v5 protocol spec crypto types from the wire.
* these get mapped to linux kernel crypto routines.
+ *
+ * These values are assigned by IANA and published via the
+ * subregistry at the link below:
+ *
+ * https://www.iana.org/assignments/kerberos-parameters/kerberos-parameters.xhtml#kerberos-parameters-1
*/
#define ENCTYPE_NULL 0x0000
#define ENCTYPE_DES_CBC_CRC 0x0001 /* DES cbc mode with CRC-32 */
@@ -193,8 +144,12 @@ enum seal_alg {
#define ENCTYPE_DES3_CBC_SHA1 0x0010
#define ENCTYPE_AES128_CTS_HMAC_SHA1_96 0x0011
#define ENCTYPE_AES256_CTS_HMAC_SHA1_96 0x0012
+#define ENCTYPE_AES128_CTS_HMAC_SHA256_128 0x0013
+#define ENCTYPE_AES256_CTS_HMAC_SHA384_192 0x0014
#define ENCTYPE_ARCFOUR_HMAC 0x0017
#define ENCTYPE_ARCFOUR_HMAC_EXP 0x0018
+#define ENCTYPE_CAMELLIA128_CTS_CMAC 0x0019
+#define ENCTYPE_CAMELLIA256_CTS_CMAC 0x001A
#define ENCTYPE_UNKNOWN 0x01ff
/*
@@ -216,103 +171,4 @@ enum seal_alg {
#define KG_USAGE_INITIATOR_SEAL (24)
#define KG_USAGE_INITIATOR_SIGN (25)
-/*
- * This compile-time check verifies that we will not exceed the
- * slack space allotted by the client and server auth_gss code
- * before they call gss_wrap().
- */
-#define GSS_KRB5_MAX_SLACK_NEEDED \
- (GSS_KRB5_TOK_HDR_LEN /* gss token header */ \
- + GSS_KRB5_MAX_CKSUM_LEN /* gss token checksum */ \
- + GSS_KRB5_MAX_BLOCKSIZE /* confounder */ \
- + GSS_KRB5_MAX_BLOCKSIZE /* possible padding */ \
- + GSS_KRB5_TOK_HDR_LEN /* encrypted hdr in v2 token */\
- + GSS_KRB5_MAX_CKSUM_LEN /* encryption hmac */ \
- + 4 + 4 /* RPC verifier */ \
- + GSS_KRB5_TOK_HDR_LEN \
- + GSS_KRB5_MAX_CKSUM_LEN)
-
-u32
-make_checksum(struct krb5_ctx *kctx, char *header, int hdrlen,
- struct xdr_buf *body, int body_offset, u8 *cksumkey,
- unsigned int usage, struct xdr_netobj *cksumout);
-
-u32
-make_checksum_v2(struct krb5_ctx *, char *header, int hdrlen,
- struct xdr_buf *body, int body_offset, u8 *key,
- unsigned int usage, struct xdr_netobj *cksum);
-
-u32 gss_get_mic_kerberos(struct gss_ctx *, struct xdr_buf *,
- struct xdr_netobj *);
-
-u32 gss_verify_mic_kerberos(struct gss_ctx *, struct xdr_buf *,
- struct xdr_netobj *);
-
-u32
-gss_wrap_kerberos(struct gss_ctx *ctx_id, int offset,
- struct xdr_buf *outbuf, struct page **pages);
-
-u32
-gss_unwrap_kerberos(struct gss_ctx *ctx_id, int offset, int len,
- struct xdr_buf *buf);
-
-
-u32
-krb5_encrypt(struct crypto_sync_skcipher *key,
- void *iv, void *in, void *out, int length);
-
-u32
-krb5_decrypt(struct crypto_sync_skcipher *key,
- void *iv, void *in, void *out, int length);
-
-int
-gss_encrypt_xdr_buf(struct crypto_sync_skcipher *tfm, struct xdr_buf *outbuf,
- int offset, struct page **pages);
-
-int
-gss_decrypt_xdr_buf(struct crypto_sync_skcipher *tfm, struct xdr_buf *inbuf,
- int offset);
-
-s32
-krb5_make_seq_num(struct krb5_ctx *kctx,
- struct crypto_sync_skcipher *key,
- int direction,
- u32 seqnum, unsigned char *cksum, unsigned char *buf);
-
-s32
-krb5_get_seq_num(struct krb5_ctx *kctx,
- unsigned char *cksum,
- unsigned char *buf, int *direction, u32 *seqnum);
-
-int
-xdr_extend_head(struct xdr_buf *buf, unsigned int base, unsigned int shiftlen);
-
-u32
-krb5_derive_key(const struct gss_krb5_enctype *gk5e,
- const struct xdr_netobj *inkey,
- struct xdr_netobj *outkey,
- const struct xdr_netobj *in_constant,
- gfp_t gfp_mask);
-
-u32
-gss_krb5_des3_make_key(const struct gss_krb5_enctype *gk5e,
- struct xdr_netobj *randombits,
- struct xdr_netobj *key);
-
-u32
-gss_krb5_aes_make_key(const struct gss_krb5_enctype *gk5e,
- struct xdr_netobj *randombits,
- struct xdr_netobj *key);
-
-u32
-gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset,
- struct xdr_buf *buf,
- struct page **pages);
-
-u32
-gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, u32 len,
- struct xdr_buf *buf, u32 *plainoffset,
- u32 *plainlen);
-
-void
-gss_krb5_make_confounder(char *p, u32 conflen);
+#endif /* _LINUX_SUNRPC_GSS_KRB5_H */
diff --git a/include/linux/sunrpc/gss_krb5_enctypes.h b/include/linux/sunrpc/gss_krb5_enctypes.h
deleted file mode 100644
index 87eea679d750..000000000000
--- a/include/linux/sunrpc/gss_krb5_enctypes.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Define the string that exports the set of kernel-supported
- * Kerberos enctypes. This list is sent via upcall to gssd, and
- * is also exposed via the nfsd /proc API. The consumers generally
- * treat this as an ordered list, where the first item in the list
- * is the most preferred.
- */
-
-#ifndef _LINUX_SUNRPC_GSS_KRB5_ENCTYPES_H
-#define _LINUX_SUNRPC_GSS_KRB5_ENCTYPES_H
-
-#ifdef CONFIG_SUNRPC_DISABLE_INSECURE_ENCTYPES
-
-/*
- * NB: This list includes DES3_CBC_SHA1, which was deprecated by RFC 8429.
- *
- * ENCTYPE_AES256_CTS_HMAC_SHA1_96
- * ENCTYPE_AES128_CTS_HMAC_SHA1_96
- * ENCTYPE_DES3_CBC_SHA1
- */
-#define KRB5_SUPPORTED_ENCTYPES "18,17,16"
-
-#else /* CONFIG_SUNRPC_DISABLE_INSECURE_ENCTYPES */
-
-/*
- * NB: This list includes encryption types that were deprecated
- * by RFC 8429 and RFC 6649.
- *
- * ENCTYPE_AES256_CTS_HMAC_SHA1_96
- * ENCTYPE_AES128_CTS_HMAC_SHA1_96
- * ENCTYPE_DES3_CBC_SHA1
- * ENCTYPE_DES_CBC_MD5
- * ENCTYPE_DES_CBC_CRC
- * ENCTYPE_DES_CBC_MD4
- */
-#define KRB5_SUPPORTED_ENCTYPES "18,17,16,3,1,2"
-
-#endif /* CONFIG_SUNRPC_DISABLE_INSECURE_ENCTYPES */
-
-#endif /* _LINUX_SUNRPC_GSS_KRB5_ENCTYPES_H */
diff --git a/include/linux/sunrpc/msg_prot.h b/include/linux/sunrpc/msg_prot.h
index 02117ed0fa2e..c4b0eb2b2f04 100644
--- a/include/linux/sunrpc/msg_prot.h
+++ b/include/linux/sunrpc/msg_prot.h
@@ -34,6 +34,11 @@ enum rpc_auth_flavors {
RPC_AUTH_GSS_SPKMP = 390011,
};
+/* Maximum size (in octets) of the machinename in an AUTH_UNIX
+ * credential (per RFC 5531 Appendix A)
+ */
+#define RPC_MAX_MACHINENAME (255)
+
/* Maximum size (in bytes) of an rpc credential or verifier */
#define RPC_MAX_AUTH_SIZE (400)
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index ed722dd33b46..877891536c2f 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -21,14 +21,6 @@
#include <linux/mm.h>
#include <linux/pagevec.h>
-/* statistics for svc_pool structures */
-struct svc_pool_stats {
- atomic_long_t packets;
- unsigned long sockets_queued;
- atomic_long_t threads_woken;
- atomic_long_t threads_timedout;
-};
-
/*
*
* RPC service thread pool.
@@ -45,7 +37,12 @@ struct svc_pool {
struct list_head sp_sockets; /* pending sockets */
unsigned int sp_nrthreads; /* # of threads in pool */
struct list_head sp_all_threads; /* all server threads */
- struct svc_pool_stats sp_stats; /* statistics on pool operation */
+
+ /* statistics on pool operation */
+ struct percpu_counter sp_sockets_queued;
+ struct percpu_counter sp_threads_woken;
+ struct percpu_counter sp_threads_timedout;
+
#define SP_TASK_PENDING (0) /* still work to do even if no
* xprt is queued. */
#define SP_CONGESTED (1)
@@ -193,40 +190,6 @@ extern u32 svc_max_payload(const struct svc_rqst *rqstp);
#define RPCSVC_MAXPAGES ((RPCSVC_MAXPAYLOAD+PAGE_SIZE-1)/PAGE_SIZE \
+ 2 + 1)
-static inline u32 svc_getnl(struct kvec *iov)
-{
- __be32 val, *vp;
- vp = iov->iov_base;
- val = *vp++;
- iov->iov_base = (void*)vp;
- iov->iov_len -= sizeof(__be32);
- return ntohl(val);
-}
-
-static inline void svc_putnl(struct kvec *iov, u32 val)
-{
- __be32 *vp = iov->iov_base + iov->iov_len;
- *vp = htonl(val);
- iov->iov_len += sizeof(__be32);
-}
-
-static inline __be32 svc_getu32(struct kvec *iov)
-{
- __be32 val, *vp;
- vp = iov->iov_base;
- val = *vp++;
- iov->iov_base = (void*)vp;
- iov->iov_len -= sizeof(__be32);
- return val;
-}
-
-static inline void svc_putu32(struct kvec *iov, __be32 val)
-{
- __be32 *vp = iov->iov_base + iov->iov_len;
- *vp = val;
- iov->iov_len += sizeof(__be32);
-}
-
/*
* The context of a single thread, including the request currently being
* processed.
@@ -285,6 +248,7 @@ struct svc_rqst {
void * rq_argp; /* decoded arguments */
void * rq_resp; /* xdr'd results */
+ __be32 *rq_accept_statp;
void * rq_auth_data; /* flavor-specific data */
__be32 rq_auth_stat; /* authentication status */
int rq_auth_slack; /* extra space xdr code
@@ -345,29 +309,6 @@ static inline struct sockaddr *svc_daddr(const struct svc_rqst *rqst)
return (struct sockaddr *) &rqst->rq_daddr;
}
-/*
- * Check buffer bounds after decoding arguments
- */
-static inline int
-xdr_argsize_check(struct svc_rqst *rqstp, __be32 *p)
-{
- char *cp = (char *)p;
- struct kvec *vec = &rqstp->rq_arg.head[0];
- return cp >= (char*)vec->iov_base
- && cp <= (char*)vec->iov_base + vec->iov_len;
-}
-
-static inline int
-xdr_ressize_check(struct svc_rqst *rqstp, __be32 *p)
-{
- struct kvec *vec = &rqstp->rq_res.head[0];
- char *cp = (char*)p;
-
- vec->iov_len = cp - (char*)vec->iov_base;
-
- return vec->iov_len <= PAGE_SIZE;
-}
-
static inline void svc_free_res_pages(struct svc_rqst *rqstp)
{
while (rqstp->rq_next_page != rqstp->rq_respages) {
@@ -394,7 +335,7 @@ struct svc_deferred_req {
struct svc_process_info {
union {
- int (*dispatch)(struct svc_rqst *, __be32 *);
+ int (*dispatch)(struct svc_rqst *rqstp);
struct {
unsigned int lovers;
unsigned int hivers;
@@ -433,7 +374,7 @@ struct svc_version {
u32 vs_vers; /* version number */
u32 vs_nproc; /* number of procedures */
const struct svc_procedure *vs_proc; /* per-procedure info */
- unsigned int *vs_count; /* call counts */
+ unsigned long __percpu *vs_count; /* call counts */
u32 vs_xdrsize; /* xdrsize needed for this version */
/* Don't register with rpcbind */
@@ -446,7 +387,7 @@ struct svc_version {
bool vs_need_cong_ctrl;
/* Dispatch function */
- int (*vs_dispatch)(struct svc_rqst *, __be32 *);
+ int (*vs_dispatch)(struct svc_rqst *rqstp);
};
/*
@@ -540,9 +481,6 @@ static inline void svc_reserve_auth(struct svc_rqst *rqstp, int space)
* svcxdr_init_decode - Prepare an xdr_stream for Call decoding
* @rqstp: controlling server RPC transaction context
*
- * This function currently assumes the RPC header in rq_arg has
- * already been decoded. Upon return, xdr->p points to the
- * location of the upper layer header.
*/
static inline void svcxdr_init_decode(struct svc_rqst *rqstp)
{
@@ -550,11 +488,7 @@ static inline void svcxdr_init_decode(struct svc_rqst *rqstp)
struct xdr_buf *buf = &rqstp->rq_arg;
struct kvec *argv = buf->head;
- /*
- * svc_getnl() and friends do not keep the xdr_buf's ::len
- * field up to date. Refresh that field before initializing
- * the argument decoding stream.
- */
+ WARN_ON(buf->len != buf->head->iov_len + buf->page_len + buf->tail->iov_len);
buf->len = buf->head->iov_len + buf->page_len + buf->tail->iov_len;
xdr_init_decode(xdr, buf, argv->iov_base, NULL);
@@ -577,12 +511,53 @@ static inline void svcxdr_init_encode(struct svc_rqst *rqstp)
xdr->buf = buf;
xdr->iov = resv;
xdr->p = resv->iov_base + resv->iov_len;
- xdr->end = resv->iov_base + PAGE_SIZE - rqstp->rq_auth_slack;
+ xdr->end = resv->iov_base + PAGE_SIZE;
buf->len = resv->iov_len;
xdr->page_ptr = buf->pages - 1;
buf->buflen = PAGE_SIZE * (rqstp->rq_page_end - buf->pages);
- buf->buflen -= rqstp->rq_auth_slack;
xdr->rqst = NULL;
}
+/**
+ * svcxdr_set_auth_slack -
+ * @rqstp: RPC transaction
+ * @slack: buffer space to reserve for the transaction's security flavor
+ *
+ * Set the request's slack space requirement, and set aside that much
+ * space in the rqstp's rq_res.head for use when the auth wraps the Reply.
+ */
+static inline void svcxdr_set_auth_slack(struct svc_rqst *rqstp, int slack)
+{
+ struct xdr_stream *xdr = &rqstp->rq_res_stream;
+ struct xdr_buf *buf = &rqstp->rq_res;
+ struct kvec *resv = buf->head;
+
+ rqstp->rq_auth_slack = slack;
+
+ xdr->end -= XDR_QUADLEN(slack);
+ buf->buflen -= rqstp->rq_auth_slack;
+
+ WARN_ON(xdr->iov != resv);
+ WARN_ON(xdr->p > xdr->end);
+}
+
+/**
+ * svcxdr_set_accept_stat - Reserve space for the accept_stat field
+ * @rqstp: RPC transaction context
+ *
+ * Return values:
+ * %true: Success
+ * %false: No response buffer space was available
+ */
+static inline bool svcxdr_set_accept_stat(struct svc_rqst *rqstp)
+{
+ struct xdr_stream *xdr = &rqstp->rq_res_stream;
+
+ rqstp->rq_accept_statp = xdr_reserve_space(xdr, XDR_UNIT);
+ if (unlikely(!rqstp->rq_accept_statp))
+ return false;
+ *rqstp->rq_accept_statp = rpc_success;
+ return true;
+}
+
#endif /* SUNRPC_SVC_H */
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
index d42a75b3be10..775368802762 100644
--- a/include/linux/sunrpc/svc_xprt.h
+++ b/include/linux/sunrpc/svc_xprt.h
@@ -26,7 +26,6 @@ struct svc_xprt_ops {
void (*xpo_release_rqst)(struct svc_rqst *);
void (*xpo_detach)(struct svc_xprt *);
void (*xpo_free)(struct svc_xprt *);
- void (*xpo_secure_port)(struct svc_rqst *rqstp);
void (*xpo_kill_temp_xprt)(struct svc_xprt *);
void (*xpo_start_tls)(struct svc_xprt *);
};
diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h
index f84e2a1358e1..72014c9216fc 100644
--- a/include/linux/sunrpc/xdr.h
+++ b/include/linux/sunrpc/xdr.h
@@ -188,7 +188,6 @@ xdr_adjust_iovec(struct kvec *iov, __be32 *p)
/*
* XDR buffer helper functions
*/
-extern void xdr_shift_buf(struct xdr_buf *, size_t);
extern void xdr_buf_from_iov(const struct kvec *, struct xdr_buf *);
extern int xdr_buf_subsegment(const struct xdr_buf *, struct xdr_buf *, unsigned int, unsigned int);
extern void xdr_buf_trim(struct xdr_buf *, unsigned int);
@@ -247,6 +246,7 @@ extern int xdr_reserve_space_vec(struct xdr_stream *xdr, struct kvec *vec,
size_t nbytes);
extern void __xdr_commit_encode(struct xdr_stream *xdr);
extern void xdr_truncate_encode(struct xdr_stream *xdr, size_t len);
+extern void xdr_truncate_decode(struct xdr_stream *xdr, size_t len);
extern int xdr_restrict_buflen(struct xdr_stream *xdr, int newbuflen);
extern void xdr_write_pages(struct xdr_stream *xdr, struct page **pages,
unsigned int base, unsigned int len);
@@ -346,6 +346,11 @@ ssize_t xdr_stream_decode_string(struct xdr_stream *xdr, char *str,
size_t size);
ssize_t xdr_stream_decode_string_dup(struct xdr_stream *xdr, char **str,
size_t maxlen, gfp_t gfp_flags);
+ssize_t xdr_stream_decode_opaque_auth(struct xdr_stream *xdr, u32 *flavor,
+ void **body, unsigned int *body_len);
+ssize_t xdr_stream_encode_opaque_auth(struct xdr_stream *xdr, u32 flavor,
+ void *body, unsigned int body_len);
+
/**
* xdr_align_size - Calculate padded size of an object
* @n: Size of an object being XDR encoded (in bytes)
@@ -470,6 +475,27 @@ xdr_stream_encode_u32(struct xdr_stream *xdr, __u32 n)
}
/**
+ * xdr_stream_encode_be32 - Encode a big-endian 32-bit integer
+ * @xdr: pointer to xdr_stream
+ * @n: integer to encode
+ *
+ * Return values:
+ * On success, returns length in bytes of XDR buffer consumed
+ * %-EMSGSIZE on XDR buffer overflow
+ */
+static inline ssize_t
+xdr_stream_encode_be32(struct xdr_stream *xdr, __be32 n)
+{
+ const size_t len = sizeof(n);
+ __be32 *p = xdr_reserve_space(xdr, len);
+
+ if (unlikely(!p))
+ return -EMSGSIZE;
+ *p = n;
+ return len;
+}
+
+/**
* xdr_stream_encode_u64 - Encode a 64-bit integer
* @xdr: pointer to xdr_stream
* @n: 64-bit integer to encode
diff --git a/include/linux/surface_aggregator/device.h b/include/linux/surface_aggregator/device.h
index 1545e5567b15..df81043b9e71 100644
--- a/include/linux/surface_aggregator/device.h
+++ b/include/linux/surface_aggregator/device.h
@@ -229,10 +229,7 @@ static inline bool is_ssam_device(struct device *d)
* Return: Returns a pointer to the &struct ssam_device wrapping the given
* device @d.
*/
-static inline struct ssam_device *to_ssam_device(struct device *d)
-{
- return container_of(d, struct ssam_device, dev);
-}
+#define to_ssam_device(d) container_of_const(d, struct ssam_device, dev)
/**
* to_ssam_device_driver() - Casts the given device driver to a SSAM client
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 0ceed49516ad..209a425739a9 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -401,8 +401,8 @@ extern void lru_add_drain(void);
extern void lru_add_drain_cpu(int cpu);
extern void lru_add_drain_cpu_zone(struct zone *zone);
extern void lru_add_drain_all(void);
-extern void deactivate_page(struct page *page);
-extern void mark_page_lazyfree(struct page *page);
+void folio_deactivate(struct folio *folio);
+void folio_mark_lazyfree(struct folio *folio);
extern void swap_setup(void);
extern void lru_cache_add_inactive_or_unevictable(struct page *page,
diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index b982dd614572..3a451b7afcb3 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -337,7 +337,8 @@ extern void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
unsigned long address);
#ifdef CONFIG_HUGETLB_PAGE
-extern void __migration_entry_wait_huge(pte_t *ptep, spinlock_t *ptl);
+extern void __migration_entry_wait_huge(struct vm_area_struct *vma,
+ pte_t *ptep, spinlock_t *ptl);
extern void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *pte);
#endif /* CONFIG_HUGETLB_PAGE */
#else /* CONFIG_MIGRATION */
@@ -366,7 +367,8 @@ static inline void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
unsigned long address) { }
#ifdef CONFIG_HUGETLB_PAGE
-static inline void __migration_entry_wait_huge(pte_t *ptep, spinlock_t *ptl) { }
+static inline void __migration_entry_wait_huge(struct vm_area_struct *vma,
+ pte_t *ptep, spinlock_t *ptl) { }
static inline void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *pte) { }
#endif /* CONFIG_HUGETLB_PAGE */
static inline int is_writable_migration_entry(swp_entry_t entry)
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index 35bc4e281c21..bcef10e20ea4 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -121,7 +121,6 @@ static inline bool is_swiotlb_force_bounce(struct device *dev)
void swiotlb_init(bool addressing_limited, unsigned int flags);
void __init swiotlb_exit(void);
-unsigned int swiotlb_max_segment(void);
size_t swiotlb_max_mapping_size(struct device *dev);
bool is_swiotlb_active(struct device *dev);
void __init swiotlb_adjust_size(unsigned long size);
@@ -140,10 +139,6 @@ static inline bool is_swiotlb_force_bounce(struct device *dev)
static inline void swiotlb_exit(void)
{
}
-static inline unsigned int swiotlb_max_segment(void)
-{
- return 0;
-}
static inline size_t swiotlb_max_mapping_size(struct device *dev)
{
return SIZE_MAX;
diff --git a/include/linux/topology.h b/include/linux/topology.h
index 4564faafd0e1..fea32377f7c7 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -245,5 +245,38 @@ static inline const struct cpumask *cpu_cpu_mask(int cpu)
return cpumask_of_node(cpu_to_node(cpu));
}
+#ifdef CONFIG_NUMA
+int sched_numa_find_nth_cpu(const struct cpumask *cpus, int cpu, int node);
+extern const struct cpumask *sched_numa_hop_mask(unsigned int node, unsigned int hops);
+#else
+static __always_inline int sched_numa_find_nth_cpu(const struct cpumask *cpus, int cpu, int node)
+{
+ return cpumask_nth(cpu, cpus);
+}
+
+static inline const struct cpumask *
+sched_numa_hop_mask(unsigned int node, unsigned int hops)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+#endif /* CONFIG_NUMA */
+
+/**
+ * for_each_numa_hop_mask - iterate over cpumasks of increasing NUMA distance
+ * from a given node.
+ * @mask: the iteration variable.
+ * @node: the NUMA node to start the search from.
+ *
+ * Requires rcu_lock to be held.
+ *
+ * Yields cpu_online_mask for @node == NUMA_NO_NODE.
+ */
+#define for_each_numa_hop_mask(mask, node) \
+ for (unsigned int __hops = 0; \
+ mask = (node != NUMA_NO_NODE || __hops) ? \
+ sched_numa_hop_mask(node, __hops) : \
+ cpu_online_mask, \
+ !IS_ERR_OR_NULL(mask); \
+ __hops++)
#endif /* _LINUX_TOPOLOGY_H */
diff --git a/include/linux/trace.h b/include/linux/trace.h
index 80ffda871749..2a70a447184c 100644
--- a/include/linux/trace.h
+++ b/include/linux/trace.h
@@ -33,6 +33,18 @@ struct trace_array;
int register_ftrace_export(struct trace_export *export);
int unregister_ftrace_export(struct trace_export *export);
+/**
+ * trace_array_puts - write a constant string into the trace buffer.
+ * @tr: The trace array to write to
+ * @str: The constant string to write
+ */
+#define trace_array_puts(tr, str) \
+ ({ \
+ str ? __trace_array_puts(tr, _THIS_IP_, str, strlen(str)) : -1; \
+ })
+int __trace_array_puts(struct trace_array *tr, unsigned long ip,
+ const char *str, int size);
+
void trace_printk_init_buffers(void);
__printf(3, 4)
int trace_array_printk(struct trace_array *tr, unsigned long ip,
diff --git a/include/linux/trace_seq.h b/include/linux/trace_seq.h
index 0c4c7587d6c3..6be92bf559fe 100644
--- a/include/linux/trace_seq.h
+++ b/include/linux/trace_seq.h
@@ -95,6 +95,7 @@ extern void trace_seq_bitmask(struct trace_seq *s, const unsigned long *maskp,
extern int trace_seq_hex_dump(struct trace_seq *s, const char *prefix_str,
int prefix_type, int rowsize, int groupsize,
const void *buf, size_t len, bool ascii);
+char *trace_seq_acquire(struct trace_seq *s, unsigned int len);
#else /* CONFIG_TRACING */
static inline __printf(2, 3)
@@ -139,6 +140,10 @@ static inline int trace_seq_path(struct trace_seq *s, const struct path *path)
{
return 0;
}
+static inline char *trace_seq_acquire(struct trace_seq *s, unsigned int len)
+{
+ return NULL;
+}
#endif /* CONFIG_TRACING */
#endif /* _LINUX_TRACE_SEQ_H */
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index 552f80b8362f..e299f29375bb 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -482,7 +482,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
* * This is how the trace record is structured and will
* * be saved into the ring buffer. These are the fields
* * that will be exposed to user-space in
- * * /sys/kernel/debug/tracing/events/<*>/format.
+ * * /sys/kernel/tracing/events/<*>/format.
* *
* * The declared 'local variable' is called '__entry'
* *
@@ -542,7 +542,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
* tracepoint callback (this is used by programmatic plugins and
* can also by used by generic instrumentation like SystemTap), and
* it is also used to expose a structured trace record in
- * /sys/kernel/debug/tracing/events/.
+ * /sys/kernel/tracing/events/.
*
* A set of (un)registration functions can be passed to the variant
* TRACE_EVENT_FN to perform any (un)registration work.
diff --git a/include/linux/transport_class.h b/include/linux/transport_class.h
index 63076fb835e3..2efc271a96fa 100644
--- a/include/linux/transport_class.h
+++ b/include/linux/transport_class.h
@@ -70,8 +70,14 @@ void transport_destroy_device(struct device *);
static inline int
transport_register_device(struct device *dev)
{
+ int ret;
+
transport_setup_device(dev);
- return transport_add_device(dev);
+ ret = transport_add_device(dev);
+ if (ret)
+ transport_destroy_device(dev);
+
+ return ret;
}
static inline void
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 730c3301d710..093935e97f42 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -453,7 +453,7 @@ unsigned char tty_get_char_size(unsigned int cflag);
unsigned char tty_get_frame_size(unsigned int cflag);
void tty_termios_copy_hw(struct ktermios *new, const struct ktermios *old);
-int tty_termios_hw_change(const struct ktermios *a, const struct ktermios *b);
+bool tty_termios_hw_change(const struct ktermios *a, const struct ktermios *b);
int tty_set_termios(struct tty_struct *tty, struct ktermios *kt);
void tty_wakeup(struct tty_struct *tty);
diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
index dcb61ec11424..49dc172dedc7 100644
--- a/include/linux/tty_ldisc.h
+++ b/include/linux/tty_ldisc.h
@@ -170,7 +170,7 @@ int ldsem_down_write_nested(struct ld_semaphore *sem, int subclass,
* send, please arise a tasklet or workqueue to do the real data transfer.
* Do not send data in this hook, it may lead to a deadlock.
*
- * @dcd_change: [DRV] ``void ()(struct tty_struct *tty, unsigned int status)``
+ * @dcd_change: [DRV] ``void ()(struct tty_struct *tty, bool active)``
*
* Tells the discipline that the DCD pin has changed its status. Used
* exclusively by the %N_PPS (Pulse-Per-Second) line discipline.
@@ -238,7 +238,7 @@ struct tty_ldisc_ops {
void (*receive_buf)(struct tty_struct *tty, const unsigned char *cp,
const char *fp, int count);
void (*write_wakeup)(struct tty_struct *tty);
- void (*dcd_change)(struct tty_struct *tty, unsigned int status);
+ void (*dcd_change)(struct tty_struct *tty, bool active);
int (*receive_buf2)(struct tty_struct *tty, const unsigned char *cp,
const char *fp, int count);
void (*lookahead_buf)(struct tty_struct *tty, const unsigned char *cp,
diff --git a/include/linux/tty_port.h b/include/linux/tty_port.h
index fa3c3bdaa234..edf685a24f7c 100644
--- a/include/linux/tty_port.h
+++ b/include/linux/tty_port.h
@@ -15,8 +15,8 @@ struct tty_struct;
/**
* struct tty_port_operations -- operations on tty_port
- * @carrier_raised: return 1 if the carrier is raised on @port
- * @dtr_rts: raise the DTR line if @raise is nonzero, otherwise lower DTR
+ * @carrier_raised: return true if the carrier is raised on @port
+ * @dtr_rts: raise the DTR line if @active is true, otherwise lower DTR
* @shutdown: called when the last close completes or a hangup finishes IFF the
* port was initialized. Do not use to free resources. Turn off the device
* only. Called under the port mutex to serialize against @activate and
@@ -31,8 +31,8 @@ struct tty_struct;
* the port itself.
*/
struct tty_port_operations {
- int (*carrier_raised)(struct tty_port *port);
- void (*dtr_rts)(struct tty_port *port, int raise);
+ bool (*carrier_raised)(struct tty_port *port);
+ void (*dtr_rts)(struct tty_port *port, bool active);
void (*shutdown)(struct tty_port *port);
int (*activate)(struct tty_port *port, struct tty_struct *tty);
void (*destruct)(struct tty_port *port);
@@ -230,7 +230,7 @@ static inline void tty_port_set_kopened(struct tty_port *port, bool val)
struct tty_struct *tty_port_tty_get(struct tty_port *port);
void tty_port_tty_set(struct tty_port *port, struct tty_struct *tty);
-int tty_port_carrier_raised(struct tty_port *port);
+bool tty_port_carrier_raised(struct tty_port *port);
void tty_port_raise_dtr_rts(struct tty_port *port);
void tty_port_lower_dtr_rts(struct tty_port *port);
void tty_port_hangup(struct tty_port *port);
diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h
index 46040d66334a..ffe48e69b3f3 100644
--- a/include/linux/u64_stats_sync.h
+++ b/include/linux/u64_stats_sync.h
@@ -213,16 +213,4 @@ static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
return __u64_stats_fetch_retry(syncp, start);
}
-/* Obsolete interfaces */
-static inline unsigned int u64_stats_fetch_begin_irq(const struct u64_stats_sync *syncp)
-{
- return u64_stats_fetch_begin(syncp);
-}
-
-static inline bool u64_stats_fetch_retry_irq(const struct u64_stats_sync *syncp,
- unsigned int start)
-{
- return u64_stats_fetch_retry(syncp, start);
-}
-
#endif /* _LINUX_U64_STATS_SYNC_H */
diff --git a/include/linux/uacce.h b/include/linux/uacce.h
index 9ce88c28b0a8..0a81c3dfd26c 100644
--- a/include/linux/uacce.h
+++ b/include/linux/uacce.h
@@ -8,6 +8,7 @@
#define UACCE_NAME "uacce"
#define UACCE_MAX_REGION 2
#define UACCE_MAX_NAME_SIZE 64
+#define UACCE_MAX_ERR_THRESHOLD 65535
struct uacce_queue;
struct uacce_device;
@@ -30,6 +31,9 @@ struct uacce_qfile_region {
* @is_q_updated: check whether the task is finished
* @mmap: mmap addresses of queue to user space
* @ioctl: ioctl for user space users of the queue
+ * @get_isolate_state: get the device state after set the isolate strategy
+ * @isolate_err_threshold_write: stored the isolate error threshold to the device
+ * @isolate_err_threshold_read: read the isolate error threshold value from the device
*/
struct uacce_ops {
int (*get_available_instances)(struct uacce_device *uacce);
@@ -43,6 +47,9 @@ struct uacce_ops {
struct uacce_qfile_region *qfr);
long (*ioctl)(struct uacce_queue *q, unsigned int cmd,
unsigned long arg);
+ enum uacce_dev_state (*get_isolate_state)(struct uacce_device *uacce);
+ int (*isolate_err_threshold_write)(struct uacce_device *uacce, u32 num);
+ u32 (*isolate_err_threshold_read)(struct uacce_device *uacce);
};
/**
@@ -57,6 +64,11 @@ struct uacce_interface {
const struct uacce_ops *ops;
};
+enum uacce_dev_state {
+ UACCE_DEV_NORMAL,
+ UACCE_DEV_ISOLATE,
+};
+
enum uacce_q_state {
UACCE_Q_ZOMBIE = 0,
UACCE_Q_INIT,
diff --git a/include/linux/uio.h b/include/linux/uio.h
index 73b1d5d1e4f1..27e3fd942960 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -13,6 +13,8 @@
struct page;
struct pipe_inode_info;
+typedef unsigned int __bitwise iov_iter_extraction_t;
+
struct kvec {
void *iov_base; /* and that should *never* hold a userland pointer */
size_t iov_len;
@@ -252,12 +254,12 @@ void iov_iter_xarray(struct iov_iter *i, unsigned int direction, struct xarray *
loff_t start, size_t count);
ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages,
size_t maxsize, unsigned maxpages, size_t *start,
- unsigned gup_flags);
+ iov_iter_extraction_t extraction_flags);
ssize_t iov_iter_get_pages2(struct iov_iter *i, struct page **pages,
size_t maxsize, unsigned maxpages, size_t *start);
ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
struct page ***pages, size_t maxsize, size_t *start,
- unsigned gup_flags);
+ iov_iter_extraction_t extraction_flags);
ssize_t iov_iter_get_pages_alloc2(struct iov_iter *i, struct page ***pages,
size_t maxsize, size_t *start);
int iov_iter_npages(const struct iov_iter *i, int maxpages);
@@ -360,5 +362,34 @@ static inline void iov_iter_ubuf(struct iov_iter *i, unsigned int direction,
.count = count
};
}
+/* Flags for iov_iter_get/extract_pages*() */
+/* Allow P2PDMA on the extracted pages */
+#define ITER_ALLOW_P2PDMA ((__force iov_iter_extraction_t)0x01)
+
+ssize_t iov_iter_extract_pages(struct iov_iter *i, struct page ***pages,
+ size_t maxsize, unsigned int maxpages,
+ iov_iter_extraction_t extraction_flags,
+ size_t *offset0);
+
+/**
+ * iov_iter_extract_will_pin - Indicate how pages from the iterator will be retained
+ * @iter: The iterator
+ *
+ * Examine the iterator and indicate by returning true or false as to how, if
+ * at all, pages extracted from the iterator will be retained by the extraction
+ * function.
+ *
+ * %true indicates that the pages will have a pin placed in them that the
+ * caller must unpin. This is must be done for DMA/async DIO to force fork()
+ * to forcibly copy a page for the child (the parent must retain the original
+ * page).
+ *
+ * %false indicates that no measures are taken and that it's up to the caller
+ * to retain the pages.
+ */
+static inline bool iov_iter_extract_will_pin(const struct iov_iter *iter)
+{
+ return user_backed_iter(iter);
+}
#endif
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 86d1c8e79566..9642ee02d713 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -1628,14 +1628,25 @@ struct urb {
* @urb: pointer to the urb to initialize.
* @dev: pointer to the struct usb_device for this urb.
* @pipe: the endpoint pipe
- * @setup_packet: pointer to the setup_packet buffer
- * @transfer_buffer: pointer to the transfer buffer
+ * @setup_packet: pointer to the setup_packet buffer. The buffer must be
+ * suitable for DMA.
+ * @transfer_buffer: pointer to the transfer buffer. The buffer must be
+ * suitable for DMA.
* @buffer_length: length of the transfer buffer
* @complete_fn: pointer to the usb_complete_t function
* @context: what to set the urb context to.
*
* Initializes a control urb with the proper information needed to submit
* it to a device.
+ *
+ * The transfer buffer and the setup_packet buffer will most likely be filled
+ * or read via DMA. The simplest way to get a buffer that can be DMAed to is
+ * allocating it via kmalloc() or equivalent, even for very small buffers.
+ * If the buffers are embedded in a bigger structure, there is a risk that
+ * the buffer itself, the previous fields and/or the next fields are corrupted
+ * due to cache incoherencies; or slowed down if they are evicted from the
+ * cache. For more information, check &struct urb.
+ *
*/
static inline void usb_fill_control_urb(struct urb *urb,
struct usb_device *dev,
@@ -1660,13 +1671,17 @@ static inline void usb_fill_control_urb(struct urb *urb,
* @urb: pointer to the urb to initialize.
* @dev: pointer to the struct usb_device for this urb.
* @pipe: the endpoint pipe
- * @transfer_buffer: pointer to the transfer buffer
+ * @transfer_buffer: pointer to the transfer buffer. The buffer must be
+ * suitable for DMA.
* @buffer_length: length of the transfer buffer
* @complete_fn: pointer to the usb_complete_t function
* @context: what to set the urb context to.
*
* Initializes a bulk urb with the proper information needed to submit it
* to a device.
+ *
+ * Refer to usb_fill_control_urb() for a description of the requirements for
+ * transfer_buffer.
*/
static inline void usb_fill_bulk_urb(struct urb *urb,
struct usb_device *dev,
@@ -1689,7 +1704,8 @@ static inline void usb_fill_bulk_urb(struct urb *urb,
* @urb: pointer to the urb to initialize.
* @dev: pointer to the struct usb_device for this urb.
* @pipe: the endpoint pipe
- * @transfer_buffer: pointer to the transfer buffer
+ * @transfer_buffer: pointer to the transfer buffer. The buffer must be
+ * suitable for DMA.
* @buffer_length: length of the transfer buffer
* @complete_fn: pointer to the usb_complete_t function
* @context: what to set the urb context to.
@@ -1699,6 +1715,9 @@ static inline void usb_fill_bulk_urb(struct urb *urb,
* Initializes a interrupt urb with the proper information needed to submit
* it to a device.
*
+ * Refer to usb_fill_control_urb() for a description of the requirements for
+ * transfer_buffer.
+ *
* Note that High Speed and SuperSpeed(+) interrupt endpoints use a logarithmic
* encoding of the endpoint interval, and express polling intervals in
* microframes (eight per millisecond) rather than in frames (one per
diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h
index 43ac3fa760db..608dc962748b 100644
--- a/include/linux/usb/composite.h
+++ b/include/linux/usb/composite.h
@@ -25,6 +25,7 @@
#include <linux/version.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
+#include <linux/usb/webusb.h>
#include <linux/log2.h>
#include <linux/configfs.h>
@@ -431,6 +432,10 @@ static inline struct usb_composite_driver *to_cdriver(
* @qw_sign: qwSignature part of the OS string
* @b_vendor_code: bMS_VendorCode part of the OS string
* @use_os_string: false by default, interested gadgets set it
+ * @bcd_webusb_version: 0x0100 by default, WebUSB specification version
+ * @b_webusb_vendor_code: 0x0 by default, vendor code for WebUSB
+ * @landing_page: empty by default, landing page to announce in WebUSB
+ * @use_webusb:: false by default, interested gadgets set it
* @os_desc_config: the configuration to be used with OS descriptors
* @setup_pending: true when setup request is queued but not completed
* @os_desc_pending: true when os_desc request is queued but not completed
@@ -474,6 +479,12 @@ struct usb_composite_dev {
struct usb_configuration *os_desc_config;
unsigned int use_os_string:1;
+ /* WebUSB */
+ u16 bcd_webusb_version;
+ u8 b_webusb_vendor_code;
+ char landing_page[WEBUSB_URL_RAW_MAX_LENGTH];
+ unsigned int use_webusb:1;
+
/* private: */
/* internals */
unsigned int suspended:1;
@@ -483,6 +494,7 @@ struct usb_composite_dev {
struct usb_composite_driver *driver;
u8 next_string_id;
char *def_manufacturer;
+ struct usb_string *usb_strings;
/* the gadget driver won't enable the data pullup
* while the deactivation count is nonzero.
diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h
index dc3092cea99e..00750f7020f3 100644
--- a/include/linux/usb/gadget.h
+++ b/include/linux/usb/gadget.h
@@ -15,6 +15,7 @@
#ifndef __LINUX_USB_GADGET_H
#define __LINUX_USB_GADGET_H
+#include <linux/configfs.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/init.h>
@@ -821,6 +822,16 @@ int usb_gadget_get_string(const struct usb_gadget_strings *table, int id, u8 *bu
/* check if the given language identifier is valid */
bool usb_validate_langid(u16 langid);
+struct gadget_string {
+ struct config_item item;
+ struct list_head list;
+ char string[USB_MAX_STRING_LEN];
+ struct usb_string usb_string;
+};
+
+#define to_gadget_string(str_item)\
+container_of(str_item, struct gadget_string, item)
+
/*-------------------------------------------------------------------------*/
/* utility to simplify managing config descriptors */
diff --git a/include/linux/usb/rzv2m_usb3drd.h b/include/linux/usb/rzv2m_usb3drd.h
new file mode 100644
index 000000000000..4cc848de229f
--- /dev/null
+++ b/include/linux/usb/rzv2m_usb3drd.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __RZV2M_USB3DRD_H
+#define __RZV2M_USB3DRD_H
+
+#include <linux/types.h>
+
+struct rzv2m_usb3drd {
+ void __iomem *reg;
+ int drd_irq;
+ struct device *dev;
+ struct reset_control *drd_rstc;
+};
+
+#if IS_ENABLED(CONFIG_USB_RZV2M_USB3DRD)
+void rzv2m_usb3drd_reset(struct device *dev, bool host);
+#else
+static inline void rzv2m_usb3drd_reset(struct device *dev, bool host) { }
+#endif
+
+#endif /* __RZV2M_USB3DRD_H */
diff --git a/include/linux/usb/tcpci.h b/include/linux/usb/tcpci.h
index 17657451c762..85e95a3251d3 100644
--- a/include/linux/usb/tcpci.h
+++ b/include/linux/usb/tcpci.h
@@ -188,6 +188,12 @@ struct tcpci;
* Optional; The USB Communications Capable bit indicates if port
* partner is capable of communication over the USB data lines
* (e.g. D+/- or SS Tx/Rx). Called to notify the status of the bit.
+ * @check_contaminant:
+ * Optional; The callback is invoked when chiplevel drivers indicated
+ * that the USB port needs to be checked for contaminant presence.
+ * Chip level drivers are expected to check for contaminant and call
+ * tcpm_clean_port when the port is clean to put the port back into
+ * toggling state.
*/
struct tcpci_data {
struct regmap *regmap;
@@ -204,6 +210,7 @@ struct tcpci_data {
void (*frs_sourcing_vbus)(struct tcpci *tcpci, struct tcpci_data *data);
void (*set_partner_usb_comm_capable)(struct tcpci *tcpci, struct tcpci_data *data,
bool capable);
+ void (*check_contaminant)(struct tcpci *tcpci, struct tcpci_data *data);
};
struct tcpci *tcpci_register_port(struct device *dev, struct tcpci_data *data);
diff --git a/include/linux/usb/tcpm.h b/include/linux/usb/tcpm.h
index bffc8d3e14ad..ab7ca872950b 100644
--- a/include/linux/usb/tcpm.h
+++ b/include/linux/usb/tcpm.h
@@ -114,6 +114,11 @@ enum tcpm_transmit_type {
* Optional; The USB Communications Capable bit indicates if port
* partner is capable of communication over the USB data lines
* (e.g. D+/- or SS Tx/Rx). Called to notify the status of the bit.
+ * @check_contaminant:
+ * Optional; The callback is called when CC pins report open status
+ * at the end of the deboumce period or when the port is still
+ * toggling. Chip level drivers are expected to check for contaminant
+ * and call tcpm_clean_port when the port is clean.
*/
struct tcpc_dev {
struct fwnode_handle *fwnode;
@@ -148,6 +153,7 @@ struct tcpc_dev {
bool pps_active, u32 requested_vbus_voltage);
bool (*is_vbus_vsafe0v)(struct tcpc_dev *dev);
void (*set_partner_usb_comm_capable)(struct tcpc_dev *dev, bool enable);
+ void (*check_contaminant)(struct tcpc_dev *dev);
};
struct tcpm_port;
@@ -165,5 +171,7 @@ void tcpm_pd_transmit_complete(struct tcpm_port *port,
enum tcpm_transmit_status status);
void tcpm_pd_hard_reset(struct tcpm_port *port);
void tcpm_tcpc_reset(struct tcpm_port *port);
+void tcpm_port_clean(struct tcpm_port *port);
+bool tcpm_port_is_toggling(struct tcpm_port *port);
#endif /* __LINUX_USB_TCPM_H */
diff --git a/include/media/v4l2-uvc.h b/include/linux/usb/uvc.h
index f83e31661333..88d96095bcb1 100644
--- a/include/media/v4l2-uvc.h
+++ b/include/linux/usb/uvc.h
@@ -99,6 +99,9 @@
#define UVC_GUID_FORMAT_BGR3 \
{ 0x7d, 0xeb, 0x36, 0xe4, 0x4f, 0x52, 0xce, 0x11, \
0x9f, 0x53, 0x00, 0x20, 0xaf, 0x0b, 0xa7, 0x70}
+#define UVC_GUID_FORMAT_BGR4 \
+ { 0x7e, 0xeb, 0x36, 0xe4, 0x4f, 0x52, 0xce, 0x11, \
+ 0x9f, 0x53, 0x00, 0x20, 0xaf, 0x0b, 0xa7, 0x70}
#define UVC_GUID_FORMAT_M420 \
{ 'M', '4', '2', '0', 0x00, 0x00, 0x10, 0x00, \
0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
@@ -145,215 +148,11 @@
{ 'H', 'E', 'V', 'C', 0x00, 0x00, 0x10, 0x00, \
0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
-/* ------------------------------------------------------------------------
- * Video formats
- */
-
struct uvc_format_desc {
- char *name;
u8 guid[16];
u32 fcc;
};
-static struct uvc_format_desc uvc_fmts[] = {
- {
- .name = "YUV 4:2:2 (YUYV)",
- .guid = UVC_GUID_FORMAT_YUY2,
- .fcc = V4L2_PIX_FMT_YUYV,
- },
- {
- .name = "YUV 4:2:2 (YUYV)",
- .guid = UVC_GUID_FORMAT_YUY2_ISIGHT,
- .fcc = V4L2_PIX_FMT_YUYV,
- },
- {
- .name = "YUV 4:2:0 (NV12)",
- .guid = UVC_GUID_FORMAT_NV12,
- .fcc = V4L2_PIX_FMT_NV12,
- },
- {
- .name = "MJPEG",
- .guid = UVC_GUID_FORMAT_MJPEG,
- .fcc = V4L2_PIX_FMT_MJPEG,
- },
- {
- .name = "YVU 4:2:0 (YV12)",
- .guid = UVC_GUID_FORMAT_YV12,
- .fcc = V4L2_PIX_FMT_YVU420,
- },
- {
- .name = "YUV 4:2:0 (I420)",
- .guid = UVC_GUID_FORMAT_I420,
- .fcc = V4L2_PIX_FMT_YUV420,
- },
- {
- .name = "YUV 4:2:0 (M420)",
- .guid = UVC_GUID_FORMAT_M420,
- .fcc = V4L2_PIX_FMT_M420,
- },
- {
- .name = "YUV 4:2:2 (UYVY)",
- .guid = UVC_GUID_FORMAT_UYVY,
- .fcc = V4L2_PIX_FMT_UYVY,
- },
- {
- .name = "Greyscale 8-bit (Y800)",
- .guid = UVC_GUID_FORMAT_Y800,
- .fcc = V4L2_PIX_FMT_GREY,
- },
- {
- .name = "Greyscale 8-bit (Y8 )",
- .guid = UVC_GUID_FORMAT_Y8,
- .fcc = V4L2_PIX_FMT_GREY,
- },
- {
- .name = "Greyscale 8-bit (D3DFMT_L8)",
- .guid = UVC_GUID_FORMAT_D3DFMT_L8,
- .fcc = V4L2_PIX_FMT_GREY,
- },
- {
- .name = "IR 8-bit (L8_IR)",
- .guid = UVC_GUID_FORMAT_KSMEDIA_L8_IR,
- .fcc = V4L2_PIX_FMT_GREY,
- },
- {
- .name = "Greyscale 10-bit (Y10 )",
- .guid = UVC_GUID_FORMAT_Y10,
- .fcc = V4L2_PIX_FMT_Y10,
- },
- {
- .name = "Greyscale 12-bit (Y12 )",
- .guid = UVC_GUID_FORMAT_Y12,
- .fcc = V4L2_PIX_FMT_Y12,
- },
- {
- .name = "Greyscale 16-bit (Y16 )",
- .guid = UVC_GUID_FORMAT_Y16,
- .fcc = V4L2_PIX_FMT_Y16,
- },
- {
- .name = "BGGR Bayer (BY8 )",
- .guid = UVC_GUID_FORMAT_BY8,
- .fcc = V4L2_PIX_FMT_SBGGR8,
- },
- {
- .name = "BGGR Bayer (BA81)",
- .guid = UVC_GUID_FORMAT_BA81,
- .fcc = V4L2_PIX_FMT_SBGGR8,
- },
- {
- .name = "GBRG Bayer (GBRG)",
- .guid = UVC_GUID_FORMAT_GBRG,
- .fcc = V4L2_PIX_FMT_SGBRG8,
- },
- {
- .name = "GRBG Bayer (GRBG)",
- .guid = UVC_GUID_FORMAT_GRBG,
- .fcc = V4L2_PIX_FMT_SGRBG8,
- },
- {
- .name = "RGGB Bayer (RGGB)",
- .guid = UVC_GUID_FORMAT_RGGB,
- .fcc = V4L2_PIX_FMT_SRGGB8,
- },
- {
- .name = "RGB565",
- .guid = UVC_GUID_FORMAT_RGBP,
- .fcc = V4L2_PIX_FMT_RGB565,
- },
- {
- .name = "BGR 8:8:8 (BGR3)",
- .guid = UVC_GUID_FORMAT_BGR3,
- .fcc = V4L2_PIX_FMT_BGR24,
- },
- {
- .name = "H.264",
- .guid = UVC_GUID_FORMAT_H264,
- .fcc = V4L2_PIX_FMT_H264,
- },
- {
- .name = "H.265",
- .guid = UVC_GUID_FORMAT_H265,
- .fcc = V4L2_PIX_FMT_HEVC,
- },
- {
- .name = "Greyscale 8 L/R (Y8I)",
- .guid = UVC_GUID_FORMAT_Y8I,
- .fcc = V4L2_PIX_FMT_Y8I,
- },
- {
- .name = "Greyscale 12 L/R (Y12I)",
- .guid = UVC_GUID_FORMAT_Y12I,
- .fcc = V4L2_PIX_FMT_Y12I,
- },
- {
- .name = "Depth data 16-bit (Z16)",
- .guid = UVC_GUID_FORMAT_Z16,
- .fcc = V4L2_PIX_FMT_Z16,
- },
- {
- .name = "Bayer 10-bit (SRGGB10P)",
- .guid = UVC_GUID_FORMAT_RW10,
- .fcc = V4L2_PIX_FMT_SRGGB10P,
- },
- {
- .name = "Bayer 16-bit (SBGGR16)",
- .guid = UVC_GUID_FORMAT_BG16,
- .fcc = V4L2_PIX_FMT_SBGGR16,
- },
- {
- .name = "Bayer 16-bit (SGBRG16)",
- .guid = UVC_GUID_FORMAT_GB16,
- .fcc = V4L2_PIX_FMT_SGBRG16,
- },
- {
- .name = "Bayer 16-bit (SRGGB16)",
- .guid = UVC_GUID_FORMAT_RG16,
- .fcc = V4L2_PIX_FMT_SRGGB16,
- },
- {
- .name = "Bayer 16-bit (SGRBG16)",
- .guid = UVC_GUID_FORMAT_GR16,
- .fcc = V4L2_PIX_FMT_SGRBG16,
- },
- {
- .name = "Depth data 16-bit (Z16)",
- .guid = UVC_GUID_FORMAT_INVZ,
- .fcc = V4L2_PIX_FMT_Z16,
- },
- {
- .name = "Greyscale 10-bit (Y10 )",
- .guid = UVC_GUID_FORMAT_INVI,
- .fcc = V4L2_PIX_FMT_Y10,
- },
- {
- .name = "IR:Depth 26-bit (INZI)",
- .guid = UVC_GUID_FORMAT_INZI,
- .fcc = V4L2_PIX_FMT_INZI,
- },
- {
- .name = "4-bit Depth Confidence (Packed)",
- .guid = UVC_GUID_FORMAT_CNF4,
- .fcc = V4L2_PIX_FMT_CNF4,
- },
- {
- .name = "HEVC",
- .guid = UVC_GUID_FORMAT_HEVC,
- .fcc = V4L2_PIX_FMT_HEVC,
- },
-};
-
-static inline struct uvc_format_desc *uvc_format_by_guid(const u8 guid[16])
-{
- unsigned int len = ARRAY_SIZE(uvc_fmts);
- unsigned int i;
-
- for (i = 0; i < len; ++i) {
- if (memcmp(guid, uvc_fmts[i].guid, 16) == 0)
- return &uvc_fmts[i];
- }
-
- return NULL;
-}
+const struct uvc_format_desc *uvc_format_by_guid(const u8 guid[16]);
#endif /* __LINUX_V4L2_UVC_H */
diff --git a/include/linux/usb/webusb.h b/include/linux/usb/webusb.h
new file mode 100644
index 000000000000..fe43020b4a48
--- /dev/null
+++ b/include/linux/usb/webusb.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * WebUSB descriptors and constants
+ *
+ * Copyright (C) 2023 Jó Ágila Bitsch <jgilab@gmail.com>
+ */
+
+#ifndef __LINUX_USB_WEBUSB_H
+#define __LINUX_USB_WEBUSB_H
+
+#include "uapi/linux/usb/ch9.h"
+
+/*
+ * Little Endian PlatformCapablityUUID for WebUSB
+ * 3408b638-09a9-47a0-8bfd-a0768815b665
+ * to identify Platform Device Capability descriptors as referring to WebUSB.
+ */
+#define WEBUSB_UUID \
+ GUID_INIT(0x3408b638, 0x09a9, 0x47a0, 0x8b, 0xfd, 0xa0, 0x76, 0x88, 0x15, 0xb6, 0x65)
+
+/*
+ * WebUSB Platform Capability data
+ *
+ * A device announces support for the
+ * WebUSB command set by including the following Platform Descriptor Data in its
+ * Binary Object Store associated with the WebUSB_UUID above.
+ * See: https://wicg.github.io/webusb/#webusb-platform-capability-descriptor
+ */
+struct usb_webusb_cap_data {
+ __le16 bcdVersion;
+#define WEBUSB_VERSION_1_00 cpu_to_le16(0x0100) /* currently only version 1.00 is defined */
+ u8 bVendorCode;
+ u8 iLandingPage;
+#define WEBUSB_LANDING_PAGE_NOT_PRESENT 0
+#define WEBUSB_LANDING_PAGE_PRESENT 1 /* we chose the fixed index 1 for the URL descriptor */
+} __packed;
+
+#define USB_WEBUSB_CAP_DATA_SIZE 4
+
+/*
+ * Get URL Request
+ *
+ * The request to fetch an URL is defined in https://wicg.github.io/webusb/#get-url as:
+ * bmRequestType: (USB_DIR_IN | USB_TYPE_VENDOR) = 11000000B
+ * bRequest: bVendorCode
+ * wValue: iLandingPage
+ * wIndex: GET_URL = 2
+ * wLength: Descriptor Length (typically U8_MAX = 255)
+ * Data: URL Descriptor
+ */
+#define WEBUSB_GET_URL 2
+
+/*
+ * This descriptor contains a single URL and is returned by the Get URL request.
+ *
+ * See: https://wicg.github.io/webusb/#url-descriptor
+ */
+struct webusb_url_descriptor {
+ u8 bLength;
+#define WEBUSB_URL_DESCRIPTOR_HEADER_LENGTH 3
+ u8 bDescriptorType;
+#define WEBUSB_URL_DESCRIPTOR_TYPE 3
+ u8 bScheme;
+#define WEBUSB_URL_SCHEME_HTTP 0
+#define WEBUSB_URL_SCHEME_HTTPS 1
+#define WEBUSB_URL_SCHEME_NONE 255
+ u8 URL[U8_MAX - WEBUSB_URL_DESCRIPTOR_HEADER_LENGTH];
+} __packed;
+
+/*
+ * Buffer size to hold the longest URL that can be in an URL descriptor
+ *
+ * The descriptor can be U8_MAX bytes long.
+ * WEBUSB_URL_DESCRIPTOR_HEADER_LENGTH bytes are used for a header.
+ * Since the longest prefix that might be stripped is "https://", we may accommodate an additional
+ * 8 bytes.
+ */
+#define WEBUSB_URL_RAW_MAX_LENGTH (U8_MAX - WEBUSB_URL_DESCRIPTOR_HEADER_LENGTH + 8)
+
+#endif /* __LINUX_USB_USBNET_H */
diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h
index 9df0b9a762cc..3767f18114ef 100644
--- a/include/linux/userfaultfd_k.h
+++ b/include/linux/userfaultfd_k.h
@@ -73,7 +73,7 @@ extern ssize_t mcopy_continue(struct mm_struct *dst_mm, unsigned long dst_start,
extern int mwriteprotect_range(struct mm_struct *dst_mm,
unsigned long start, unsigned long len,
bool enable_wp, atomic_t *mmap_changing);
-extern void uffd_wp_range(struct mm_struct *dst_mm, struct vm_area_struct *vma,
+extern long uffd_wp_range(struct mm_struct *dst_mm, struct vm_area_struct *vma,
unsigned long start, unsigned long len, bool enable_wp);
/* mm helpers */
diff --git a/include/linux/util_macros.h b/include/linux/util_macros.h
index 43db6e47503c..6bb460c3e818 100644
--- a/include/linux/util_macros.h
+++ b/include/linux/util_macros.h
@@ -2,6 +2,8 @@
#ifndef _LINUX_HELPER_MACROS_H_
#define _LINUX_HELPER_MACROS_H_
+#include <linux/math.h>
+
#define __find_closest(x, a, as, op) \
({ \
typeof(as) __fc_i, __fc_as = (as) - 1; \
diff --git a/include/linux/uuid.h b/include/linux/uuid.h
index 8cdc0d3567cd..6b1a3efa1e0b 100644
--- a/include/linux/uuid.h
+++ b/include/linux/uuid.h
@@ -8,15 +8,25 @@
#ifndef _LINUX_UUID_H_
#define _LINUX_UUID_H_
-#include <uapi/linux/uuid.h>
#include <linux/string.h>
#define UUID_SIZE 16
typedef struct {
__u8 b[UUID_SIZE];
+} guid_t;
+
+typedef struct {
+ __u8 b[UUID_SIZE];
} uuid_t;
+#define GUID_INIT(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) \
+((guid_t) \
+{{ (a) & 0xff, ((a) >> 8) & 0xff, ((a) >> 16) & 0xff, ((a) >> 24) & 0xff, \
+ (b) & 0xff, ((b) >> 8) & 0xff, \
+ (c) & 0xff, ((c) >> 8) & 0xff, \
+ (d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) }})
+
#define UUID_INIT(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) \
((uuid_t) \
{{ ((a) >> 24) & 0xff, ((a) >> 16) & 0xff, ((a) >> 8) & 0xff, (a) & 0xff, \
@@ -97,10 +107,7 @@ extern const u8 uuid_index[16];
int guid_parse(const char *uuid, guid_t *u);
int uuid_parse(const char *uuid, uuid_t *u);
-/* backwards compatibility, don't use in new code */
-static inline int uuid_le_cmp(const guid_t u1, const guid_t u2)
-{
- return memcmp(&u1, &u2, sizeof(guid_t));
-}
+/* MEI UUID type, don't use anywhere else */
+#include <uapi/linux/uuid.h>
#endif
diff --git a/include/linux/vdpa.h b/include/linux/vdpa.h
index 6d0f5e4e82c2..43f59ef10cc9 100644
--- a/include/linux/vdpa.h
+++ b/include/linux/vdpa.h
@@ -219,7 +219,10 @@ struct vdpa_map_file {
* @reset: Reset device
* @vdev: vdpa device
* Returns integer: success (0) or error (< 0)
- * @suspend: Suspend or resume the device (optional)
+ * @suspend: Suspend the device (optional)
+ * @vdev: vdpa device
+ * Returns integer: success (0) or error (< 0)
+ * @resume: Resume the device (optional)
* @vdev: vdpa device
* Returns integer: success (0) or error (< 0)
* @get_config_size: Get the size of the configuration space includes
@@ -282,6 +285,11 @@ struct vdpa_map_file {
* @iova: iova to be unmapped
* @size: size of the area
* Returns integer: success (0) or error (< 0)
+ * @get_vq_dma_dev: Get the dma device for a specific
+ * virtqueue (optional)
+ * @vdev: vdpa device
+ * @idx: virtqueue index
+ * Returns pointer to structure device or error (NULL)
* @free: Free resources that belongs to vDPA (optional)
* @vdev: vdpa device
*/
@@ -324,6 +332,7 @@ struct vdpa_config_ops {
void (*set_status)(struct vdpa_device *vdev, u8 status);
int (*reset)(struct vdpa_device *vdev);
int (*suspend)(struct vdpa_device *vdev);
+ int (*resume)(struct vdpa_device *vdev);
size_t (*get_config_size)(struct vdpa_device *vdev);
void (*get_config)(struct vdpa_device *vdev, unsigned int offset,
void *buf, unsigned int len);
@@ -341,6 +350,7 @@ struct vdpa_config_ops {
u64 iova, u64 size);
int (*set_group_asid)(struct vdpa_device *vdev, unsigned int group,
unsigned int asid);
+ struct device *(*get_vq_dma_dev)(struct vdpa_device *vdev, u16 idx);
/* Free device resources */
void (*free)(struct vdpa_device *vdev);
diff --git a/include/linux/vfio.h b/include/linux/vfio.h
index 35be78e9ae57..93134b023968 100644
--- a/include/linux/vfio.h
+++ b/include/linux/vfio.h
@@ -46,7 +46,6 @@ struct vfio_device {
struct vfio_device_set *dev_set;
struct list_head dev_set_list;
unsigned int migration_flags;
- /* Driver must reference the kvm during open_device or never touch it */
struct kvm *kvm;
/* Members below here are private, not for driver use */
@@ -58,6 +57,7 @@ struct vfio_device {
struct list_head group_next;
struct list_head iommu_entry;
struct iommufd_access *iommufd_access;
+ void (*put_kvm)(struct kvm *kvm);
#if IS_ENABLED(CONFIG_IOMMUFD)
struct iommufd_device *iommufd_device;
struct iommufd_ctx *iommufd_ictx;
@@ -70,6 +70,10 @@ struct vfio_device {
*
* @init: initialize private fields in device structure
* @release: Reclaim private fields in device structure
+ * @bind_iommufd: Called when binding the device to an iommufd
+ * @unbind_iommufd: Opposite of bind_iommufd
+ * @attach_ioas: Called when attaching device to an IOAS/HWPT managed by the
+ * bound iommufd. Undo in unbind_iommufd.
* @open_device: Called when the first file descriptor is opened for this device
* @close_device: Opposite of open_device
* @read: Perform read(2) on device file descriptor
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index dcab9c7e8784..2b472514c49b 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -127,10 +127,7 @@ struct virtio_device {
void *priv;
};
-static inline struct virtio_device *dev_to_virtio(struct device *_dev)
-{
- return container_of(_dev, struct virtio_device, dev);
-}
+#define dev_to_virtio(_dev) container_of_const(_dev, struct virtio_device, dev)
void virtio_add_status(struct virtio_device *dev, unsigned int status);
int register_virtio_device(struct virtio_device *dev);
diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h
index 4b517649cfe8..2b3438de2c4d 100644
--- a/include/linux/virtio_config.h
+++ b/include/linux/virtio_config.h
@@ -16,8 +16,10 @@ struct virtio_shm_region {
u64 len;
};
+typedef void vq_callback_t(struct virtqueue *);
+
/**
- * virtio_config_ops - operations for configuring a virtio device
+ * struct virtio_config_ops - operations for configuring a virtio device
* Note: Do not assume that a transport implements all of the operations
* getting/setting a value as a simple read/write! Generally speaking,
* any of @get/@set, @get_status/@set_status, or @get_features/
@@ -69,7 +71,8 @@ struct virtio_shm_region {
* vdev: the virtio_device
* This sends the driver feature bits to the device: it can change
* the dev->feature bits if it wants.
- * Note: despite the name this can be called any number of times.
+ * Note that despite the name this can be called any number of
+ * times.
* Returns 0 on success or error status
* @bus_name: return the bus name associated with the device (optional)
* vdev: the virtio_device
@@ -91,7 +94,6 @@ struct virtio_shm_region {
* If disable_vq_and_reset is set, then enable_vq_after_reset must also be
* set.
*/
-typedef void vq_callback_t(struct virtqueue *);
struct virtio_config_ops {
void (*get)(struct virtio_device *vdev, unsigned offset,
void *buf, unsigned len);
diff --git a/include/linux/virtio_ring.h b/include/linux/virtio_ring.h
index 8b8af1a38991..8b95b69ef694 100644
--- a/include/linux/virtio_ring.h
+++ b/include/linux/virtio_ring.h
@@ -77,6 +77,22 @@ struct virtqueue *vring_create_virtqueue(unsigned int index,
const char *name);
/*
+ * Creates a virtqueue and allocates the descriptor ring with per
+ * virtqueue DMA device.
+ */
+struct virtqueue *vring_create_virtqueue_dma(unsigned int index,
+ unsigned int num,
+ unsigned int vring_align,
+ struct virtio_device *vdev,
+ bool weak_barriers,
+ bool may_reduce_num,
+ bool ctx,
+ bool (*notify)(struct virtqueue *vq),
+ void (*callback)(struct virtqueue *vq),
+ const char *name,
+ struct device *dma_dev);
+
+/*
* Creates a virtqueue with a standard layout but a caller-allocated
* ring.
*/
diff --git a/include/linux/virtio_vsock.h b/include/linux/virtio_vsock.h
index 35d7eedb5e8e..3f9c16611306 100644
--- a/include/linux/virtio_vsock.h
+++ b/include/linux/virtio_vsock.h
@@ -7,6 +7,109 @@
#include <net/sock.h>
#include <net/af_vsock.h>
+#define VIRTIO_VSOCK_SKB_HEADROOM (sizeof(struct virtio_vsock_hdr))
+
+struct virtio_vsock_skb_cb {
+ bool reply;
+ bool tap_delivered;
+};
+
+#define VIRTIO_VSOCK_SKB_CB(skb) ((struct virtio_vsock_skb_cb *)((skb)->cb))
+
+static inline struct virtio_vsock_hdr *virtio_vsock_hdr(struct sk_buff *skb)
+{
+ return (struct virtio_vsock_hdr *)skb->head;
+}
+
+static inline bool virtio_vsock_skb_reply(struct sk_buff *skb)
+{
+ return VIRTIO_VSOCK_SKB_CB(skb)->reply;
+}
+
+static inline void virtio_vsock_skb_set_reply(struct sk_buff *skb)
+{
+ VIRTIO_VSOCK_SKB_CB(skb)->reply = true;
+}
+
+static inline bool virtio_vsock_skb_tap_delivered(struct sk_buff *skb)
+{
+ return VIRTIO_VSOCK_SKB_CB(skb)->tap_delivered;
+}
+
+static inline void virtio_vsock_skb_set_tap_delivered(struct sk_buff *skb)
+{
+ VIRTIO_VSOCK_SKB_CB(skb)->tap_delivered = true;
+}
+
+static inline void virtio_vsock_skb_clear_tap_delivered(struct sk_buff *skb)
+{
+ VIRTIO_VSOCK_SKB_CB(skb)->tap_delivered = false;
+}
+
+static inline void virtio_vsock_skb_rx_put(struct sk_buff *skb)
+{
+ u32 len;
+
+ len = le32_to_cpu(virtio_vsock_hdr(skb)->len);
+
+ if (len > 0)
+ skb_put(skb, len);
+}
+
+static inline struct sk_buff *virtio_vsock_alloc_skb(unsigned int size, gfp_t mask)
+{
+ struct sk_buff *skb;
+
+ if (size < VIRTIO_VSOCK_SKB_HEADROOM)
+ return NULL;
+
+ skb = alloc_skb(size, mask);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, VIRTIO_VSOCK_SKB_HEADROOM);
+ return skb;
+}
+
+static inline void
+virtio_vsock_skb_queue_head(struct sk_buff_head *list, struct sk_buff *skb)
+{
+ spin_lock_bh(&list->lock);
+ __skb_queue_head(list, skb);
+ spin_unlock_bh(&list->lock);
+}
+
+static inline void
+virtio_vsock_skb_queue_tail(struct sk_buff_head *list, struct sk_buff *skb)
+{
+ spin_lock_bh(&list->lock);
+ __skb_queue_tail(list, skb);
+ spin_unlock_bh(&list->lock);
+}
+
+static inline struct sk_buff *virtio_vsock_skb_dequeue(struct sk_buff_head *list)
+{
+ struct sk_buff *skb;
+
+ spin_lock_bh(&list->lock);
+ skb = __skb_dequeue(list);
+ spin_unlock_bh(&list->lock);
+
+ return skb;
+}
+
+static inline void virtio_vsock_skb_queue_purge(struct sk_buff_head *list)
+{
+ spin_lock_bh(&list->lock);
+ __skb_queue_purge(list);
+ spin_unlock_bh(&list->lock);
+}
+
+static inline size_t virtio_vsock_skb_len(struct sk_buff *skb)
+{
+ return (size_t)(skb_end_pointer(skb) - skb->head);
+}
+
#define VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE (1024 * 4)
#define VIRTIO_VSOCK_MAX_BUF_SIZE 0xFFFFFFFFUL
#define VIRTIO_VSOCK_MAX_PKT_BUF_SIZE (1024 * 64)
@@ -35,23 +138,10 @@ struct virtio_vsock_sock {
u32 last_fwd_cnt;
u32 rx_bytes;
u32 buf_alloc;
- struct list_head rx_queue;
+ struct sk_buff_head rx_queue;
u32 msg_count;
};
-struct virtio_vsock_pkt {
- struct virtio_vsock_hdr hdr;
- struct list_head list;
- /* socket refcnt not held, only use for cancellation */
- struct vsock_sock *vsk;
- void *buf;
- u32 buf_len;
- u32 len;
- u32 off;
- bool reply;
- bool tap_delivered;
-};
-
struct virtio_vsock_pkt_info {
u32 remote_cid, remote_port;
struct vsock_sock *vsk;
@@ -68,7 +158,7 @@ struct virtio_transport {
struct vsock_transport transport;
/* Takes ownership of the packet */
- int (*send_pkt)(struct virtio_vsock_pkt *pkt);
+ int (*send_pkt)(struct sk_buff *skb);
};
ssize_t
@@ -149,11 +239,10 @@ virtio_transport_dgram_enqueue(struct vsock_sock *vsk,
void virtio_transport_destruct(struct vsock_sock *vsk);
void virtio_transport_recv_pkt(struct virtio_transport *t,
- struct virtio_vsock_pkt *pkt);
-void virtio_transport_free_pkt(struct virtio_vsock_pkt *pkt);
-void virtio_transport_inc_tx_pkt(struct virtio_vsock_sock *vvs, struct virtio_vsock_pkt *pkt);
+ struct sk_buff *skb);
+void virtio_transport_inc_tx_pkt(struct virtio_vsock_sock *vvs, struct sk_buff *skb);
u32 virtio_transport_get_credit(struct virtio_vsock_sock *vvs, u32 wanted);
void virtio_transport_put_credit(struct virtio_vsock_sock *vvs, u32 credit);
-void virtio_transport_deliver_tap_pkt(struct virtio_vsock_pkt *pkt);
-
+void virtio_transport_deliver_tap_pkt(struct sk_buff *skb);
+int virtio_transport_purge_skbs(void *vsk, struct sk_buff_head *list);
#endif /* _LINUX_VIRTIO_VSOCK_H */
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 096d48aa3437..69250efa03d1 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -76,6 +76,7 @@ struct vmap_area {
unsigned long subtree_max_size; /* in "free" tree */
struct vm_struct *vm; /* in "busy" tree */
};
+ unsigned long flags; /* mark type of vm_map_ram area */
};
/* archs that select HAVE_ARCH_HUGE_VMAP should override one or more of these */
diff --git a/include/linux/vringh.h b/include/linux/vringh.h
index 212892cf9822..1991a02c6431 100644
--- a/include/linux/vringh.h
+++ b/include/linux/vringh.h
@@ -92,7 +92,7 @@ struct vringh_iov {
};
/**
- * struct vringh_iov - kvec mangler.
+ * struct vringh_kiov - kvec mangler.
*
* Mangles kvec in place, and restores it.
* Remaining data is iov + i, of used - i elements.
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index 06f9291b6fd5..46020373e155 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -207,7 +207,7 @@ static inline void wait_on_inode(struct inode *inode)
#include <linux/cgroup.h>
#include <linux/bio.h>
-void __inode_attach_wb(struct inode *inode, struct page *page);
+void __inode_attach_wb(struct inode *inode, struct folio *folio);
void wbc_attach_and_unlock_inode(struct writeback_control *wbc,
struct inode *inode)
__releases(&inode->i_lock);
@@ -222,16 +222,16 @@ bool cleanup_offline_cgwb(struct bdi_writeback *wb);
/**
* inode_attach_wb - associate an inode with its wb
* @inode: inode of interest
- * @page: page being dirtied (may be NULL)
+ * @folio: folio being dirtied (may be NULL)
*
* If @inode doesn't have its wb, associate it with the wb matching the
- * memcg of @page or, if @page is NULL, %current. May be called w/ or w/o
+ * memcg of @folio or, if @folio is NULL, %current. May be called w/ or w/o
* @inode->i_lock.
*/
-static inline void inode_attach_wb(struct inode *inode, struct page *page)
+static inline void inode_attach_wb(struct inode *inode, struct folio *folio)
{
if (!inode->i_wb)
- __inode_attach_wb(inode, page);
+ __inode_attach_wb(inode, folio);
}
/**
@@ -290,7 +290,7 @@ static inline void wbc_init_bio(struct writeback_control *wbc, struct bio *bio)
#else /* CONFIG_CGROUP_WRITEBACK */
-static inline void inode_attach_wb(struct inode *inode, struct page *page)
+static inline void inode_attach_wb(struct inode *inode, struct folio *folio)
{
}
@@ -366,11 +366,9 @@ int balance_dirty_pages_ratelimited_flags(struct address_space *mapping,
bool wb_over_bg_thresh(struct bdi_writeback *wb);
-typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc,
+typedef int (*writepage_t)(struct folio *folio, struct writeback_control *wbc,
void *data);
-int generic_writepages(struct address_space *mapping,
- struct writeback_control *wbc);
void tag_pages_for_writeback(struct address_space *mapping,
pgoff_t start, pgoff_t end);
int write_cache_pages(struct address_space *mapping,
diff --git a/include/linux/xarray.h b/include/linux/xarray.h
index 44dd6d6e01bc..741703b45f61 100644
--- a/include/linux/xarray.h
+++ b/include/linux/xarray.h
@@ -1643,7 +1643,8 @@ static inline void xas_set_order(struct xa_state *xas, unsigned long index,
* @update: Function to call when updating a node.
*
* The XArray can notify a caller after it has updated an xa_node.
- * This is advanced functionality and is only needed by the page cache.
+ * This is advanced functionality and is only needed by the page
+ * cache and swap cache.
*/
static inline void xas_set_update(struct xa_state *xas, xa_update_node_t update)
{
diff --git a/include/media/davinci/ccdc_types.h b/include/media/davinci/ccdc_types.h
deleted file mode 100644
index 971984dc1ce4..000000000000
--- a/include/media/davinci/ccdc_types.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2008-2009 Texas Instruments Inc
- *
- **************************************************************************/
-#ifndef _CCDC_TYPES_H
-#define _CCDC_TYPES_H
-enum ccdc_pixfmt {
- CCDC_PIXFMT_RAW,
- CCDC_PIXFMT_YCBCR_16BIT,
- CCDC_PIXFMT_YCBCR_8BIT
-};
-
-enum ccdc_frmfmt {
- CCDC_FRMFMT_PROGRESSIVE,
- CCDC_FRMFMT_INTERLACED
-};
-
-/* PIXEL ORDER IN MEMORY from LSB to MSB */
-/* only applicable for 8-bit input mode */
-enum ccdc_pixorder {
- CCDC_PIXORDER_YCBYCR,
- CCDC_PIXORDER_CBYCRY,
-};
-
-enum ccdc_buftype {
- CCDC_BUFTYPE_FLD_INTERLEAVED,
- CCDC_BUFTYPE_FLD_SEPARATED
-};
-#endif
diff --git a/include/media/drv-intf/saa7146.h b/include/media/drv-intf/saa7146.h
new file mode 100644
index 000000000000..71ce63c99cb4
--- /dev/null
+++ b/include/media/drv-intf/saa7146.h
@@ -0,0 +1,472 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __SAA7146__
+#define __SAA7146__
+
+#include <linux/delay.h> /* for delay-stuff */
+#include <linux/slab.h> /* for kmalloc/kfree */
+#include <linux/pci.h> /* for pci-config-stuff, vendor ids etc. */
+#include <linux/init.h> /* for "__init" */
+#include <linux/interrupt.h> /* for IMMEDIATE_BH */
+#include <linux/kmod.h> /* for kernel module loader */
+#include <linux/i2c.h> /* for i2c subsystem */
+#include <asm/io.h> /* for accessing devices */
+#include <linux/stringify.h>
+#include <linux/mutex.h>
+#include <linux/scatterlist.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ctrls.h>
+
+#include <linux/vmalloc.h> /* for vmalloc() */
+#include <linux/mm.h> /* for vmalloc_to_page() */
+
+#define saa7146_write(sxy,adr,dat) writel((dat),(sxy->mem+(adr)))
+#define saa7146_read(sxy,adr) readl(sxy->mem+(adr))
+
+extern unsigned int saa7146_debug;
+
+#ifndef DEBUG_VARIABLE
+ #define DEBUG_VARIABLE saa7146_debug
+#endif
+
+#define ERR(fmt, ...) pr_err("%s: " fmt, __func__, ##__VA_ARGS__)
+
+#define _DBG(mask, fmt, ...) \
+do { \
+ if (DEBUG_VARIABLE & mask) \
+ pr_debug("%s(): " fmt, __func__, ##__VA_ARGS__); \
+} while (0)
+
+/* simple debug messages */
+#define DEB_S(fmt, ...) _DBG(0x01, fmt, ##__VA_ARGS__)
+/* more detailed debug messages */
+#define DEB_D(fmt, ...) _DBG(0x02, fmt, ##__VA_ARGS__)
+/* print enter and exit of functions */
+#define DEB_EE(fmt, ...) _DBG(0x04, fmt, ##__VA_ARGS__)
+/* i2c debug messages */
+#define DEB_I2C(fmt, ...) _DBG(0x08, fmt, ##__VA_ARGS__)
+/* vbi debug messages */
+#define DEB_VBI(fmt, ...) _DBG(0x10, fmt, ##__VA_ARGS__)
+/* interrupt debug messages */
+#define DEB_INT(fmt, ...) _DBG(0x20, fmt, ##__VA_ARGS__)
+/* capture debug messages */
+#define DEB_CAP(fmt, ...) _DBG(0x40, fmt, ##__VA_ARGS__)
+
+#define SAA7146_ISR_CLEAR(x,y) \
+ saa7146_write(x, ISR, (y));
+
+struct module;
+
+struct saa7146_dev;
+struct saa7146_extension;
+struct saa7146_vv;
+
+/* saa7146 page table */
+struct saa7146_pgtable {
+ unsigned int size;
+ __le32 *cpu;
+ dma_addr_t dma;
+ /* used for offsets for u,v planes for planar capture modes */
+ unsigned long offset;
+ /* used for custom pagetables (used for example by budget dvb cards) */
+ struct scatterlist *slist;
+ int nents;
+};
+
+struct saa7146_pci_extension_data {
+ struct saa7146_extension *ext;
+ void *ext_priv; /* most likely a name string */
+};
+
+#define MAKE_EXTENSION_PCI(x_var, x_vendor, x_device) \
+ { \
+ .vendor = PCI_VENDOR_ID_PHILIPS, \
+ .device = PCI_DEVICE_ID_PHILIPS_SAA7146, \
+ .subvendor = x_vendor, \
+ .subdevice = x_device, \
+ .driver_data = (unsigned long)& x_var, \
+ }
+
+struct saa7146_extension
+{
+ char name[32]; /* name of the device */
+#define SAA7146_USE_I2C_IRQ 0x1
+#define SAA7146_I2C_SHORT_DELAY 0x2
+ int flags;
+
+ /* pairs of subvendor and subdevice ids for
+ supported devices, last entry 0xffff, 0xfff */
+ struct module *module;
+ struct pci_driver driver;
+ const struct pci_device_id *pci_tbl;
+
+ /* extension functions */
+ int (*probe)(struct saa7146_dev *);
+ int (*attach)(struct saa7146_dev *, struct saa7146_pci_extension_data *);
+ int (*detach)(struct saa7146_dev*);
+
+ u32 irq_mask; /* mask to indicate, which irq-events are handled by the extension */
+ void (*irq_func)(struct saa7146_dev*, u32* irq_mask);
+};
+
+struct saa7146_dma
+{
+ dma_addr_t dma_handle;
+ __le32 *cpu_addr;
+};
+
+struct saa7146_dev
+{
+ struct module *module;
+
+ struct v4l2_device v4l2_dev;
+ struct v4l2_ctrl_handler ctrl_handler;
+
+ /* different device locks */
+ spinlock_t slock;
+ struct mutex v4l2_lock;
+
+ unsigned char __iomem *mem; /* pointer to mapped IO memory */
+ u32 revision; /* chip revision; needed for bug-workarounds*/
+
+ /* pci-device & irq stuff*/
+ char name[32];
+ struct pci_dev *pci;
+ u32 int_todo;
+ spinlock_t int_slock;
+
+ /* extension handling */
+ struct saa7146_extension *ext; /* indicates if handled by extension */
+ void *ext_priv; /* pointer for extension private use (most likely some private data) */
+ struct saa7146_ext_vv *ext_vv_data;
+
+ /* per device video/vbi information (if available) */
+ struct saa7146_vv *vv_data;
+ void (*vv_callback)(struct saa7146_dev *dev, unsigned long status);
+
+ /* i2c-stuff */
+ struct mutex i2c_lock;
+
+ u32 i2c_bitrate;
+ struct saa7146_dma d_i2c; /* pointer to i2c memory */
+ wait_queue_head_t i2c_wq;
+ int i2c_op;
+
+ /* memories */
+ struct saa7146_dma d_rps0;
+ struct saa7146_dma d_rps1;
+};
+
+static inline struct saa7146_dev *to_saa7146_dev(struct v4l2_device *v4l2_dev)
+{
+ return container_of(v4l2_dev, struct saa7146_dev, v4l2_dev);
+}
+
+/* from saa7146_i2c.c */
+int saa7146_i2c_adapter_prepare(struct saa7146_dev *dev, struct i2c_adapter *i2c_adapter, u32 bitrate);
+
+/* from saa7146_core.c */
+int saa7146_register_extension(struct saa7146_extension*);
+int saa7146_unregister_extension(struct saa7146_extension*);
+struct saa7146_format* saa7146_format_by_fourcc(struct saa7146_dev *dev, int fourcc);
+int saa7146_pgtable_alloc(struct pci_dev *pci, struct saa7146_pgtable *pt);
+void saa7146_pgtable_free(struct pci_dev *pci, struct saa7146_pgtable *pt);
+int saa7146_pgtable_build_single(struct pci_dev *pci, struct saa7146_pgtable *pt, struct scatterlist *list, int length );
+void *saa7146_vmalloc_build_pgtable(struct pci_dev *pci, long length, struct saa7146_pgtable *pt);
+void saa7146_vfree_destroy_pgtable(struct pci_dev *pci, void *mem, struct saa7146_pgtable *pt);
+void saa7146_setgpio(struct saa7146_dev *dev, int port, u32 data);
+int saa7146_wait_for_debi_done(struct saa7146_dev *dev, int nobusyloop);
+
+/* some memory sizes */
+#define SAA7146_I2C_MEM ( 1*PAGE_SIZE)
+#define SAA7146_RPS_MEM ( 1*PAGE_SIZE)
+
+/* some i2c constants */
+#define SAA7146_I2C_TIMEOUT 100 /* i2c-timeout-value in ms */
+#define SAA7146_I2C_RETRIES 3 /* how many times shall we retry an i2c-operation? */
+#define SAA7146_I2C_DELAY 5 /* time we wait after certain i2c-operations */
+
+/* unsorted defines */
+#define ME1 0x0000000800
+#define PV1 0x0000000008
+
+/* gpio defines */
+#define SAA7146_GPIO_INPUT 0x00
+#define SAA7146_GPIO_IRQHI 0x10
+#define SAA7146_GPIO_IRQLO 0x20
+#define SAA7146_GPIO_IRQHL 0x30
+#define SAA7146_GPIO_OUTLO 0x40
+#define SAA7146_GPIO_OUTHI 0x50
+
+/* debi defines */
+#define DEBINOSWAP 0x000e0000
+
+/* define for the register programming sequencer (rps) */
+#define CMD_NOP 0x00000000 /* No operation */
+#define CMD_CLR_EVENT 0x00000000 /* Clear event */
+#define CMD_SET_EVENT 0x10000000 /* Set signal event */
+#define CMD_PAUSE 0x20000000 /* Pause */
+#define CMD_CHECK_LATE 0x30000000 /* Check late */
+#define CMD_UPLOAD 0x40000000 /* Upload */
+#define CMD_STOP 0x50000000 /* Stop */
+#define CMD_INTERRUPT 0x60000000 /* Interrupt */
+#define CMD_JUMP 0x80000000 /* Jump */
+#define CMD_WR_REG 0x90000000 /* Write (load) register */
+#define CMD_RD_REG 0xa0000000 /* Read (store) register */
+#define CMD_WR_REG_MASK 0xc0000000 /* Write register with mask */
+
+#define CMD_OAN MASK_27
+#define CMD_INV MASK_26
+#define CMD_SIG4 MASK_25
+#define CMD_SIG3 MASK_24
+#define CMD_SIG2 MASK_23
+#define CMD_SIG1 MASK_22
+#define CMD_SIG0 MASK_21
+#define CMD_O_FID_B MASK_14
+#define CMD_E_FID_B MASK_13
+#define CMD_O_FID_A MASK_12
+#define CMD_E_FID_A MASK_11
+
+/* some events and command modifiers for rps1 squarewave generator */
+#define EVT_HS (1<<15) // Source Line Threshold reached
+#define EVT_VBI_B (1<<9) // VSYNC Event
+#define RPS_OAN (1<<27) // 1: OR events, 0: AND events
+#define RPS_INV (1<<26) // Invert (compound) event
+#define GPIO3_MSK 0xFF000000 // GPIO #3 control bits
+
+/* Bit mask constants */
+#define MASK_00 0x00000001 /* Mask value for bit 0 */
+#define MASK_01 0x00000002 /* Mask value for bit 1 */
+#define MASK_02 0x00000004 /* Mask value for bit 2 */
+#define MASK_03 0x00000008 /* Mask value for bit 3 */
+#define MASK_04 0x00000010 /* Mask value for bit 4 */
+#define MASK_05 0x00000020 /* Mask value for bit 5 */
+#define MASK_06 0x00000040 /* Mask value for bit 6 */
+#define MASK_07 0x00000080 /* Mask value for bit 7 */
+#define MASK_08 0x00000100 /* Mask value for bit 8 */
+#define MASK_09 0x00000200 /* Mask value for bit 9 */
+#define MASK_10 0x00000400 /* Mask value for bit 10 */
+#define MASK_11 0x00000800 /* Mask value for bit 11 */
+#define MASK_12 0x00001000 /* Mask value for bit 12 */
+#define MASK_13 0x00002000 /* Mask value for bit 13 */
+#define MASK_14 0x00004000 /* Mask value for bit 14 */
+#define MASK_15 0x00008000 /* Mask value for bit 15 */
+#define MASK_16 0x00010000 /* Mask value for bit 16 */
+#define MASK_17 0x00020000 /* Mask value for bit 17 */
+#define MASK_18 0x00040000 /* Mask value for bit 18 */
+#define MASK_19 0x00080000 /* Mask value for bit 19 */
+#define MASK_20 0x00100000 /* Mask value for bit 20 */
+#define MASK_21 0x00200000 /* Mask value for bit 21 */
+#define MASK_22 0x00400000 /* Mask value for bit 22 */
+#define MASK_23 0x00800000 /* Mask value for bit 23 */
+#define MASK_24 0x01000000 /* Mask value for bit 24 */
+#define MASK_25 0x02000000 /* Mask value for bit 25 */
+#define MASK_26 0x04000000 /* Mask value for bit 26 */
+#define MASK_27 0x08000000 /* Mask value for bit 27 */
+#define MASK_28 0x10000000 /* Mask value for bit 28 */
+#define MASK_29 0x20000000 /* Mask value for bit 29 */
+#define MASK_30 0x40000000 /* Mask value for bit 30 */
+#define MASK_31 0x80000000 /* Mask value for bit 31 */
+
+#define MASK_B0 0x000000ff /* Mask value for byte 0 */
+#define MASK_B1 0x0000ff00 /* Mask value for byte 1 */
+#define MASK_B2 0x00ff0000 /* Mask value for byte 2 */
+#define MASK_B3 0xff000000 /* Mask value for byte 3 */
+
+#define MASK_W0 0x0000ffff /* Mask value for word 0 */
+#define MASK_W1 0xffff0000 /* Mask value for word 1 */
+
+#define MASK_PA 0xfffffffc /* Mask value for physical address */
+#define MASK_PR 0xfffffffe /* Mask value for protection register */
+#define MASK_ER 0xffffffff /* Mask value for the entire register */
+
+#define MASK_NONE 0x00000000 /* No mask */
+
+/* register aliases */
+#define BASE_ODD1 0x00 /* Video DMA 1 registers */
+#define BASE_EVEN1 0x04
+#define PROT_ADDR1 0x08
+#define PITCH1 0x0C
+#define BASE_PAGE1 0x10 /* Video DMA 1 base page */
+#define NUM_LINE_BYTE1 0x14
+
+#define BASE_ODD2 0x18 /* Video DMA 2 registers */
+#define BASE_EVEN2 0x1C
+#define PROT_ADDR2 0x20
+#define PITCH2 0x24
+#define BASE_PAGE2 0x28 /* Video DMA 2 base page */
+#define NUM_LINE_BYTE2 0x2C
+
+#define BASE_ODD3 0x30 /* Video DMA 3 registers */
+#define BASE_EVEN3 0x34
+#define PROT_ADDR3 0x38
+#define PITCH3 0x3C
+#define BASE_PAGE3 0x40 /* Video DMA 3 base page */
+#define NUM_LINE_BYTE3 0x44
+
+#define PCI_BT_V1 0x48 /* Video/FIFO 1 */
+#define PCI_BT_V2 0x49 /* Video/FIFO 2 */
+#define PCI_BT_V3 0x4A /* Video/FIFO 3 */
+#define PCI_BT_DEBI 0x4B /* DEBI */
+#define PCI_BT_A 0x4C /* Audio */
+
+#define DD1_INIT 0x50 /* Init setting of DD1 interface */
+
+#define DD1_STREAM_B 0x54 /* DD1 B video data stream handling */
+#define DD1_STREAM_A 0x56 /* DD1 A video data stream handling */
+
+#define BRS_CTRL 0x58 /* BRS control register */
+#define HPS_CTRL 0x5C /* HPS control register */
+#define HPS_V_SCALE 0x60 /* HPS vertical scale */
+#define HPS_V_GAIN 0x64 /* HPS vertical ACL and gain */
+#define HPS_H_PRESCALE 0x68 /* HPS horizontal prescale */
+#define HPS_H_SCALE 0x6C /* HPS horizontal scale */
+#define BCS_CTRL 0x70 /* BCS control */
+#define CHROMA_KEY_RANGE 0x74
+#define CLIP_FORMAT_CTRL 0x78 /* HPS outputs formats & clipping */
+
+#define DEBI_CONFIG 0x7C
+#define DEBI_COMMAND 0x80
+#define DEBI_PAGE 0x84
+#define DEBI_AD 0x88
+
+#define I2C_TRANSFER 0x8C
+#define I2C_STATUS 0x90
+
+#define BASE_A1_IN 0x94 /* Audio 1 input DMA */
+#define PROT_A1_IN 0x98
+#define PAGE_A1_IN 0x9C
+
+#define BASE_A1_OUT 0xA0 /* Audio 1 output DMA */
+#define PROT_A1_OUT 0xA4
+#define PAGE_A1_OUT 0xA8
+
+#define BASE_A2_IN 0xAC /* Audio 2 input DMA */
+#define PROT_A2_IN 0xB0
+#define PAGE_A2_IN 0xB4
+
+#define BASE_A2_OUT 0xB8 /* Audio 2 output DMA */
+#define PROT_A2_OUT 0xBC
+#define PAGE_A2_OUT 0xC0
+
+#define RPS_PAGE0 0xC4 /* RPS task 0 page register */
+#define RPS_PAGE1 0xC8 /* RPS task 1 page register */
+
+#define RPS_THRESH0 0xCC /* HBI threshold for task 0 */
+#define RPS_THRESH1 0xD0 /* HBI threshold for task 1 */
+
+#define RPS_TOV0 0xD4 /* RPS timeout for task 0 */
+#define RPS_TOV1 0xD8 /* RPS timeout for task 1 */
+
+#define IER 0xDC /* Interrupt enable register */
+
+#define GPIO_CTRL 0xE0 /* GPIO 0-3 register */
+
+#define EC1SSR 0xE4 /* Event cnt set 1 source select */
+#define EC2SSR 0xE8 /* Event cnt set 2 source select */
+#define ECT1R 0xEC /* Event cnt set 1 thresholds */
+#define ECT2R 0xF0 /* Event cnt set 2 thresholds */
+
+#define ACON1 0xF4
+#define ACON2 0xF8
+
+#define MC1 0xFC /* Main control register 1 */
+#define MC2 0x100 /* Main control register 2 */
+
+#define RPS_ADDR0 0x104 /* RPS task 0 address register */
+#define RPS_ADDR1 0x108 /* RPS task 1 address register */
+
+#define ISR 0x10C /* Interrupt status register */
+#define PSR 0x110 /* Primary status register */
+#define SSR 0x114 /* Secondary status register */
+
+#define EC1R 0x118 /* Event counter set 1 register */
+#define EC2R 0x11C /* Event counter set 2 register */
+
+#define PCI_VDP1 0x120 /* Video DMA pointer of FIFO 1 */
+#define PCI_VDP2 0x124 /* Video DMA pointer of FIFO 2 */
+#define PCI_VDP3 0x128 /* Video DMA pointer of FIFO 3 */
+#define PCI_ADP1 0x12C /* Audio DMA pointer of audio out 1 */
+#define PCI_ADP2 0x130 /* Audio DMA pointer of audio in 1 */
+#define PCI_ADP3 0x134 /* Audio DMA pointer of audio out 2 */
+#define PCI_ADP4 0x138 /* Audio DMA pointer of audio in 2 */
+#define PCI_DMA_DDP 0x13C /* DEBI DMA pointer */
+
+#define LEVEL_REP 0x140,
+#define A_TIME_SLOT1 0x180, /* from 180 - 1BC */
+#define A_TIME_SLOT2 0x1C0, /* from 1C0 - 1FC */
+
+/* isr masks */
+#define SPCI_PPEF 0x80000000 /* PCI parity error */
+#define SPCI_PABO 0x40000000 /* PCI access error (target or master abort) */
+#define SPCI_PPED 0x20000000 /* PCI parity error on 'real time data' */
+#define SPCI_RPS_I1 0x10000000 /* Interrupt issued by RPS1 */
+#define SPCI_RPS_I0 0x08000000 /* Interrupt issued by RPS0 */
+#define SPCI_RPS_LATE1 0x04000000 /* RPS task 1 is late */
+#define SPCI_RPS_LATE0 0x02000000 /* RPS task 0 is late */
+#define SPCI_RPS_E1 0x01000000 /* RPS error from task 1 */
+#define SPCI_RPS_E0 0x00800000 /* RPS error from task 0 */
+#define SPCI_RPS_TO1 0x00400000 /* RPS timeout task 1 */
+#define SPCI_RPS_TO0 0x00200000 /* RPS timeout task 0 */
+#define SPCI_UPLD 0x00100000 /* RPS in upload */
+#define SPCI_DEBI_S 0x00080000 /* DEBI status */
+#define SPCI_DEBI_E 0x00040000 /* DEBI error */
+#define SPCI_IIC_S 0x00020000 /* I2C status */
+#define SPCI_IIC_E 0x00010000 /* I2C error */
+#define SPCI_A2_IN 0x00008000 /* Audio 2 input DMA protection / limit */
+#define SPCI_A2_OUT 0x00004000 /* Audio 2 output DMA protection / limit */
+#define SPCI_A1_IN 0x00002000 /* Audio 1 input DMA protection / limit */
+#define SPCI_A1_OUT 0x00001000 /* Audio 1 output DMA protection / limit */
+#define SPCI_AFOU 0x00000800 /* Audio FIFO over- / underflow */
+#define SPCI_V_PE 0x00000400 /* Video protection address */
+#define SPCI_VFOU 0x00000200 /* Video FIFO over- / underflow */
+#define SPCI_FIDA 0x00000100 /* Field ID video port A */
+#define SPCI_FIDB 0x00000080 /* Field ID video port B */
+#define SPCI_PIN3 0x00000040 /* GPIO pin 3 */
+#define SPCI_PIN2 0x00000020 /* GPIO pin 2 */
+#define SPCI_PIN1 0x00000010 /* GPIO pin 1 */
+#define SPCI_PIN0 0x00000008 /* GPIO pin 0 */
+#define SPCI_ECS 0x00000004 /* Event counter 1, 2, 4, 5 */
+#define SPCI_EC3S 0x00000002 /* Event counter 3 */
+#define SPCI_EC0S 0x00000001 /* Event counter 0 */
+
+/* i2c */
+#define SAA7146_I2C_ABORT (1<<7)
+#define SAA7146_I2C_SPERR (1<<6)
+#define SAA7146_I2C_APERR (1<<5)
+#define SAA7146_I2C_DTERR (1<<4)
+#define SAA7146_I2C_DRERR (1<<3)
+#define SAA7146_I2C_AL (1<<2)
+#define SAA7146_I2C_ERR (1<<1)
+#define SAA7146_I2C_BUSY (1<<0)
+
+#define SAA7146_I2C_START (0x3)
+#define SAA7146_I2C_CONT (0x2)
+#define SAA7146_I2C_STOP (0x1)
+#define SAA7146_I2C_NOP (0x0)
+
+#define SAA7146_I2C_BUS_BIT_RATE_6400 (0x500)
+#define SAA7146_I2C_BUS_BIT_RATE_3200 (0x100)
+#define SAA7146_I2C_BUS_BIT_RATE_480 (0x400)
+#define SAA7146_I2C_BUS_BIT_RATE_320 (0x600)
+#define SAA7146_I2C_BUS_BIT_RATE_240 (0x700)
+#define SAA7146_I2C_BUS_BIT_RATE_120 (0x000)
+#define SAA7146_I2C_BUS_BIT_RATE_80 (0x200)
+#define SAA7146_I2C_BUS_BIT_RATE_60 (0x300)
+
+static inline void SAA7146_IER_DISABLE(struct saa7146_dev *x, unsigned y)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&x->int_slock, flags);
+ saa7146_write(x, IER, saa7146_read(x, IER) & ~y);
+ spin_unlock_irqrestore(&x->int_slock, flags);
+}
+
+static inline void SAA7146_IER_ENABLE(struct saa7146_dev *x, unsigned y)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&x->int_slock, flags);
+ saa7146_write(x, IER, saa7146_read(x, IER) | y);
+ spin_unlock_irqrestore(&x->int_slock, flags);
+}
+
+#endif
diff --git a/include/media/drv-intf/saa7146_vv.h b/include/media/drv-intf/saa7146_vv.h
new file mode 100644
index 000000000000..635805fb35e8
--- /dev/null
+++ b/include/media/drv-intf/saa7146_vv.h
@@ -0,0 +1,266 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __SAA7146_VV__
+#define __SAA7146_VV__
+
+#include <media/v4l2-common.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-fh.h>
+#include <media/drv-intf/saa7146.h>
+#include <media/videobuf-dma-sg.h>
+
+#define MAX_SAA7146_CAPTURE_BUFFERS 32 /* arbitrary */
+#define BUFFER_TIMEOUT (HZ/2) /* 0.5 seconds */
+
+#define WRITE_RPS0(x) do { \
+ dev->d_rps0.cpu_addr[ count++ ] = cpu_to_le32(x); \
+ } while (0);
+
+#define WRITE_RPS1(x) do { \
+ dev->d_rps1.cpu_addr[ count++ ] = cpu_to_le32(x); \
+ } while (0);
+
+struct saa7146_video_dma {
+ u32 base_odd;
+ u32 base_even;
+ u32 prot_addr;
+ u32 pitch;
+ u32 base_page;
+ u32 num_line_byte;
+};
+
+#define FORMAT_BYTE_SWAP 0x1
+#define FORMAT_IS_PLANAR 0x2
+
+struct saa7146_format {
+ u32 pixelformat;
+ u32 trans;
+ u8 depth;
+ u8 flags;
+ u8 swap;
+};
+
+struct saa7146_standard
+{
+ char *name;
+ v4l2_std_id id;
+
+ int v_offset; /* number of lines of vertical offset before processing */
+ int v_field; /* number of lines in a field for HPS to process */
+
+ int h_offset; /* horizontal offset of processing window */
+ int h_pixels; /* number of horizontal pixels to process */
+
+ int v_max_out;
+ int h_max_out;
+};
+
+/* buffer for one video/vbi frame */
+struct saa7146_buf {
+ /* common v4l buffer stuff -- must be first */
+ struct videobuf_buffer vb;
+
+ /* saa7146 specific */
+ struct v4l2_pix_format *fmt;
+ int (*activate)(struct saa7146_dev *dev,
+ struct saa7146_buf *buf,
+ struct saa7146_buf *next);
+
+ /* page tables */
+ struct saa7146_pgtable pt[3];
+};
+
+struct saa7146_dmaqueue {
+ struct saa7146_dev *dev;
+ struct saa7146_buf *curr;
+ struct list_head queue;
+ struct timer_list timeout;
+};
+
+struct saa7146_overlay {
+ struct saa7146_fh *fh;
+ struct v4l2_window win;
+ struct v4l2_clip clips[16];
+ int nclips;
+};
+
+/* per open data */
+struct saa7146_fh {
+ /* Must be the first field! */
+ struct v4l2_fh fh;
+ struct saa7146_dev *dev;
+
+ /* video capture */
+ struct videobuf_queue video_q;
+
+ /* vbi capture */
+ struct videobuf_queue vbi_q;
+
+ unsigned int resources; /* resource management for device open */
+};
+
+#define STATUS_OVERLAY 0x01
+#define STATUS_CAPTURE 0x02
+
+struct saa7146_vv
+{
+ /* vbi capture */
+ struct saa7146_dmaqueue vbi_dmaq;
+ struct v4l2_vbi_format vbi_fmt;
+ struct timer_list vbi_read_timeout;
+ struct file *vbi_read_timeout_file;
+ /* vbi workaround interrupt queue */
+ wait_queue_head_t vbi_wq;
+ int vbi_fieldcount;
+ struct saa7146_fh *vbi_streaming;
+
+ int video_status;
+ struct saa7146_fh *video_fh;
+
+ /* video overlay */
+ struct saa7146_overlay ov;
+ struct v4l2_framebuffer ov_fb;
+ struct saa7146_format *ov_fmt;
+ struct saa7146_fh *ov_suspend;
+
+ /* video capture */
+ struct saa7146_dmaqueue video_dmaq;
+ struct v4l2_pix_format video_fmt;
+ enum v4l2_field last_field;
+
+ /* common: fixme? shouldn't this be in saa7146_fh?
+ (this leads to a more complicated question: shall the driver
+ store the different settings (for example S_INPUT) for every open
+ and restore it appropriately, or should all settings be common for
+ all opens? currently, we do the latter, like all other
+ drivers do... */
+ struct saa7146_standard *standard;
+
+ int vflip;
+ int hflip;
+ int current_hps_source;
+ int current_hps_sync;
+
+ struct saa7146_dma d_clipping; /* pointer to clipping memory */
+
+ unsigned int resources; /* resource management for device */
+};
+
+/* flags */
+#define SAA7146_USE_PORT_B_FOR_VBI 0x2 /* use input port b for vbi hardware bug workaround */
+
+struct saa7146_ext_vv
+{
+ /* information about the video capabilities of the device */
+ int inputs;
+ int audios;
+ u32 capabilities;
+ int flags;
+
+ /* additionally supported transmission standards */
+ struct saa7146_standard *stds;
+ int num_stds;
+ int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
+
+ /* the extension can override this */
+ struct v4l2_ioctl_ops vid_ops;
+ struct v4l2_ioctl_ops vbi_ops;
+ /* pointer to the saa7146 core ops */
+ const struct v4l2_ioctl_ops *core_ops;
+
+ struct v4l2_file_operations vbi_fops;
+};
+
+struct saa7146_use_ops {
+ void (*init)(struct saa7146_dev *, struct saa7146_vv *);
+ int(*open)(struct saa7146_dev *, struct file *);
+ void (*release)(struct saa7146_dev *, struct file *);
+ void (*irq_done)(struct saa7146_dev *, unsigned long status);
+ ssize_t (*read)(struct file *, char __user *, size_t, loff_t *);
+};
+
+/* from saa7146_fops.c */
+int saa7146_register_device(struct video_device *vid, struct saa7146_dev *dev, char *name, int type);
+int saa7146_unregister_device(struct video_device *vid, struct saa7146_dev *dev);
+void saa7146_buffer_finish(struct saa7146_dev *dev, struct saa7146_dmaqueue *q, int state);
+void saa7146_buffer_next(struct saa7146_dev *dev, struct saa7146_dmaqueue *q,int vbi);
+int saa7146_buffer_queue(struct saa7146_dev *dev, struct saa7146_dmaqueue *q, struct saa7146_buf *buf);
+void saa7146_buffer_timeout(struct timer_list *t);
+void saa7146_dma_free(struct saa7146_dev* dev,struct videobuf_queue *q,
+ struct saa7146_buf *buf);
+
+int saa7146_vv_init(struct saa7146_dev* dev, struct saa7146_ext_vv *ext_vv);
+int saa7146_vv_release(struct saa7146_dev* dev);
+
+/* from saa7146_hlp.c */
+int saa7146_enable_overlay(struct saa7146_fh *fh);
+void saa7146_disable_overlay(struct saa7146_fh *fh);
+
+void saa7146_set_capture(struct saa7146_dev *dev, struct saa7146_buf *buf, struct saa7146_buf *next);
+void saa7146_write_out_dma(struct saa7146_dev* dev, int which, struct saa7146_video_dma* vdma) ;
+void saa7146_set_hps_source_and_sync(struct saa7146_dev *saa, int source, int sync);
+void saa7146_set_gpio(struct saa7146_dev *saa, u8 pin, u8 data);
+
+/* from saa7146_video.c */
+extern const struct v4l2_ioctl_ops saa7146_video_ioctl_ops;
+extern const struct v4l2_ioctl_ops saa7146_vbi_ioctl_ops;
+extern const struct saa7146_use_ops saa7146_video_uops;
+int saa7146_start_preview(struct saa7146_fh *fh);
+int saa7146_stop_preview(struct saa7146_fh *fh);
+long saa7146_video_do_ioctl(struct file *file, unsigned int cmd, void *arg);
+int saa7146_s_ctrl(struct v4l2_ctrl *ctrl);
+
+/* from saa7146_vbi.c */
+extern const struct saa7146_use_ops saa7146_vbi_uops;
+
+/* resource management functions */
+int saa7146_res_get(struct saa7146_fh *fh, unsigned int bit);
+void saa7146_res_free(struct saa7146_fh *fh, unsigned int bits);
+
+#define RESOURCE_DMA1_HPS 0x1
+#define RESOURCE_DMA2_CLP 0x2
+#define RESOURCE_DMA3_BRS 0x4
+
+/* saa7146 source inputs */
+#define SAA7146_HPS_SOURCE_PORT_A 0x00
+#define SAA7146_HPS_SOURCE_PORT_B 0x01
+#define SAA7146_HPS_SOURCE_YPB_CPA 0x02
+#define SAA7146_HPS_SOURCE_YPA_CPB 0x03
+
+/* sync inputs */
+#define SAA7146_HPS_SYNC_PORT_A 0x00
+#define SAA7146_HPS_SYNC_PORT_B 0x01
+
+/* some memory sizes */
+/* max. 16 clipping rectangles */
+#define SAA7146_CLIPPING_MEM (16 * 4 * sizeof(u32))
+
+/* some defines for the various clipping-modes */
+#define SAA7146_CLIPPING_RECT 0x4
+#define SAA7146_CLIPPING_RECT_INVERTED 0x5
+#define SAA7146_CLIPPING_MASK 0x6
+#define SAA7146_CLIPPING_MASK_INVERTED 0x7
+
+/* output formats: each entry holds four information */
+#define RGB08_COMPOSED 0x0217 /* composed is used in the sense of "not-planar" */
+/* this means: planar?=0, yuv2rgb-conversation-mode=2, dither=yes(=1), format-mode = 7 */
+#define RGB15_COMPOSED 0x0213
+#define RGB16_COMPOSED 0x0210
+#define RGB24_COMPOSED 0x0201
+#define RGB32_COMPOSED 0x0202
+
+#define Y8 0x0006
+#define YUV411_COMPOSED 0x0003
+#define YUV422_COMPOSED 0x0000
+/* this means: planar?=1, yuv2rgb-conversion-mode=0, dither=no(=0), format-mode = b */
+#define YUV411_DECOMPOSED 0x100b
+#define YUV422_DECOMPOSED 0x1009
+#define YUV420_DECOMPOSED 0x100a
+
+#define IS_PLANAR(x) (x & 0xf000)
+
+/* misc defines */
+#define SAA7146_NO_SWAP (0x0)
+#define SAA7146_TWO_BYTE_SWAP (0x1)
+#define SAA7146_FOUR_BYTE_SWAP (0x2)
+
+#endif
diff --git a/include/media/i2c/s5c73m3.h b/include/media/i2c/s5c73m3.h
deleted file mode 100644
index df0769d64523..000000000000
--- a/include/media/i2c/s5c73m3.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Samsung LSI S5C73M3 8M pixel camera driver
- *
- * Copyright (C) 2012, Samsung Electronics, Co., Ltd.
- * Sylwester Nawrocki <s.nawrocki@samsung.com>
- * Andrzej Hajda <a.hajda@samsung.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-#ifndef MEDIA_S5C73M3__
-#define MEDIA_S5C73M3__
-
-#include <linux/videodev2.h>
-#include <media/v4l2-mediabus.h>
-
-/**
- * struct s5c73m3_platform_data - s5c73m3 driver platform data
- * @mclk_frequency: sensor's master clock frequency in Hz
- * @bus_type: bus type
- * @nlanes: maximum number of MIPI-CSI lanes used
- * @horiz_flip: default horizontal image flip value, non zero to enable
- * @vert_flip: default vertical image flip value, non zero to enable
- */
-
-struct s5c73m3_platform_data {
- unsigned long mclk_frequency;
-
- enum v4l2_mbus_type bus_type;
- u8 nlanes;
- u8 horiz_flip;
- u8 vert_flip;
-};
-
-#endif /* MEDIA_S5C73M3__ */
diff --git a/include/media/media-entity.h b/include/media/media-entity.h
index 85ed08ddee9d..741f9c629c6f 100644
--- a/include/media/media-entity.h
+++ b/include/media/media-entity.h
@@ -131,6 +131,26 @@ struct media_pipeline_pad {
};
/**
+ * struct media_pipeline_pad_iter - Iterator for media_pipeline_for_each_pad
+ *
+ * @cursor: The current element
+ */
+struct media_pipeline_pad_iter {
+ struct list_head *cursor;
+};
+
+/**
+ * struct media_pipeline_entity_iter - Iterator for media_pipeline_for_each_entity
+ *
+ * @ent_enum: The entity enumeration tracker
+ * @cursor: The current element
+ */
+struct media_pipeline_entity_iter {
+ struct media_entity_enum ent_enum;
+ struct list_head *cursor;
+};
+
+/**
* struct media_link - A link object part of a media graph.
*
* @graph_obj: Embedded structure containing the media object common data
@@ -242,7 +262,9 @@ struct media_pad {
* part of the same pipeline and enabling one of the pads
* means that the other pad will become "locked" and
* doesn't allow configuration changes. pad0 and pad1 are
- * guaranteed to not both be sinks or sources.
+ * guaranteed to not both be sinks or sources. Never call
+ * the .has_pad_interdep() operation directly, always use
+ * media_entity_has_pad_interdep().
* Optional: If the operation isn't implemented all pads
* will be considered as interdependent.
*
@@ -1066,6 +1088,8 @@ int media_entity_get_fwnode_pad(struct media_entity *entity,
* @graph: Media graph structure that will be used to walk the graph
* @mdev: Pointer to the &media_device that contains the object
*
+ * This function is deprecated, use media_pipeline_for_each_pad() instead.
+ *
* The caller is required to hold the media_device graph_mutex during the graph
* walk until the graph state is released.
*
@@ -1078,6 +1102,8 @@ __must_check int media_graph_walk_init(
* media_graph_walk_cleanup - Release resources used by graph walk.
*
* @graph: Media graph structure that will be used to walk the graph
+ *
+ * This function is deprecated, use media_pipeline_for_each_pad() instead.
*/
void media_graph_walk_cleanup(struct media_graph *graph);
@@ -1088,6 +1114,8 @@ void media_graph_walk_cleanup(struct media_graph *graph);
* @graph: Media graph structure that will be used to walk the graph
* @entity: Starting entity
*
+ * This function is deprecated, use media_pipeline_for_each_pad() instead.
+ *
* Before using this function, media_graph_walk_init() must be
* used to allocate resources used for walking the graph. This
* function initializes the graph traversal structure to walk the
@@ -1103,6 +1131,8 @@ void media_graph_walk_start(struct media_graph *graph,
* media_graph_walk_next - Get the next entity in the graph
* @graph: Media graph structure
*
+ * This function is deprecated, use media_pipeline_for_each_pad() instead.
+ *
* Perform a depth-first traversal of the given media entities graph.
*
* The graph structure must have been previously initialized with a call to
@@ -1163,6 +1193,76 @@ void media_pipeline_stop(struct media_pad *pad);
*/
void __media_pipeline_stop(struct media_pad *pad);
+struct media_pad *
+__media_pipeline_pad_iter_next(struct media_pipeline *pipe,
+ struct media_pipeline_pad_iter *iter,
+ struct media_pad *pad);
+
+/**
+ * media_pipeline_for_each_pad - Iterate on all pads in a media pipeline
+ * @pipe: The pipeline
+ * @iter: The iterator (struct media_pipeline_pad_iter)
+ * @pad: The iterator pad
+ *
+ * Iterate on all pads in a media pipeline. This is only valid after the
+ * pipeline has been built with media_pipeline_start() and before it gets
+ * destroyed with media_pipeline_stop().
+ */
+#define media_pipeline_for_each_pad(pipe, iter, pad) \
+ for (pad = __media_pipeline_pad_iter_next((pipe), iter, NULL); \
+ pad != NULL; \
+ pad = __media_pipeline_pad_iter_next((pipe), iter, pad))
+
+/**
+ * media_pipeline_entity_iter_init - Initialize a pipeline entity iterator
+ * @pipe: The pipeline
+ * @iter: The iterator
+ *
+ * This function must be called to initialize the iterator before using it in a
+ * media_pipeline_for_each_entity() loop. The iterator must be destroyed by a
+ * call to media_pipeline_entity_iter_cleanup after the loop (including in code
+ * paths that break from the loop).
+ *
+ * The same iterator can be used in multiple consecutive loops without being
+ * destroyed and reinitialized.
+ *
+ * Return: 0 on success or a negative error code otherwise.
+ */
+int media_pipeline_entity_iter_init(struct media_pipeline *pipe,
+ struct media_pipeline_entity_iter *iter);
+
+/**
+ * media_pipeline_entity_iter_cleanup - Destroy a pipeline entity iterator
+ * @iter: The iterator
+ *
+ * This function must be called to destroy iterators initialized with
+ * media_pipeline_entity_iter_init().
+ */
+void media_pipeline_entity_iter_cleanup(struct media_pipeline_entity_iter *iter);
+
+struct media_entity *
+__media_pipeline_entity_iter_next(struct media_pipeline *pipe,
+ struct media_pipeline_entity_iter *iter,
+ struct media_entity *entity);
+
+/**
+ * media_pipeline_for_each_entity - Iterate on all entities in a media pipeline
+ * @pipe: The pipeline
+ * @iter: The iterator (struct media_pipeline_entity_iter)
+ * @entity: The iterator entity
+ *
+ * Iterate on all entities in a media pipeline. This is only valid after the
+ * pipeline has been built with media_pipeline_start() and before it gets
+ * destroyed with media_pipeline_stop(). The iterator must be initialized with
+ * media_pipeline_entity_iter_init() before iteration, and destroyed with
+ * media_pipeline_entity_iter_cleanup() after (including in code paths that
+ * break from the loop).
+ */
+#define media_pipeline_for_each_entity(pipe, iter, entity) \
+ for (entity = __media_pipeline_entity_iter_next((pipe), iter, NULL); \
+ entity != NULL; \
+ entity = __media_pipeline_entity_iter_next((pipe), iter, entity))
+
/**
* media_pipeline_alloc_start - Mark a pipeline as streaming
* @pad: Starting pad
diff --git a/include/media/ov_16bit_addr_reg_helpers.h b/include/media/ov_16bit_addr_reg_helpers.h
new file mode 100644
index 000000000000..1c60a50bd795
--- /dev/null
+++ b/include/media/ov_16bit_addr_reg_helpers.h
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * I2C register access helpers for Omnivision OVxxxx image sensors which expect
+ * a 16 bit register address in big-endian format and which have 1-3 byte
+ * wide registers, in big-endian format (for the higher width registers).
+ *
+ * Based on the register helpers from drivers/media/i2c/ov2680.c which is:
+ * Copyright (C) 2018 Linaro Ltd
+ */
+#ifndef __OV_16BIT_ADDR_REG_HELPERS_H
+#define __OV_16BIT_ADDR_REG_HELPERS_H
+
+#include <asm/unaligned.h>
+#include <linux/dev_printk.h>
+#include <linux/i2c.h>
+
+static inline int ov_read_reg(struct i2c_client *client, u16 reg,
+ unsigned int len, u32 *val)
+{
+ u8 addr_buf[2], data_buf[4] = { };
+ struct i2c_msg msgs[2];
+ int ret;
+
+ if (len > 4)
+ return -EINVAL;
+
+ put_unaligned_be16(reg, addr_buf);
+
+ msgs[0].addr = client->addr;
+ msgs[0].flags = 0;
+ msgs[0].len = ARRAY_SIZE(addr_buf);
+ msgs[0].buf = addr_buf;
+
+ msgs[1].addr = client->addr;
+ msgs[1].flags = I2C_M_RD;
+ msgs[1].len = len;
+ msgs[1].buf = &data_buf[4 - len];
+
+ ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (ret != ARRAY_SIZE(msgs)) {
+ dev_err(&client->dev, "read error: reg=0x%4x: %d\n", reg, ret);
+ return -EIO;
+ }
+
+ *val = get_unaligned_be32(data_buf);
+
+ return 0;
+}
+
+#define ov_read_reg8(s, r, v) ov_read_reg(s, r, 1, v)
+#define ov_read_reg16(s, r, v) ov_read_reg(s, r, 2, v)
+#define ov_read_reg24(s, r, v) ov_read_reg(s, r, 3, v)
+
+static inline int ov_write_reg(struct i2c_client *client, u16 reg,
+ unsigned int len, u32 val)
+{
+ u8 buf[6];
+ int ret;
+
+ if (len > 4)
+ return -EINVAL;
+
+ put_unaligned_be16(reg, buf);
+ put_unaligned_be32(val << (8 * (4 - len)), buf + 2);
+ ret = i2c_master_send(client, buf, len + 2);
+ if (ret != len + 2) {
+ dev_err(&client->dev, "write error: reg=0x%4x: %d\n", reg, ret);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+#define ov_write_reg8(s, r, v) ov_write_reg(s, r, 1, v)
+#define ov_write_reg16(s, r, v) ov_write_reg(s, r, 2, v)
+#define ov_write_reg24(s, r, v) ov_write_reg(s, r, 3, v)
+
+static inline int ov_update_reg(struct i2c_client *client, u16 reg, u8 mask, u8 val)
+{
+ u32 readval;
+ int ret;
+
+ ret = ov_read_reg8(client, reg, &readval);
+ if (ret < 0)
+ return ret;
+
+ val = (readval & ~mask) | (val & mask);
+
+ return ov_write_reg8(client, reg, val);
+}
+
+#endif
diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h
index f3fe9b6e26d4..7245887ef002 100644
--- a/include/media/v4l2-subdev.h
+++ b/include/media/v4l2-subdev.h
@@ -346,6 +346,7 @@ enum v4l2_mbus_frame_desc_flags {
* struct v4l2_mbus_frame_desc_entry - media bus frame description structure
*
* @flags: bitmask flags, as defined by &enum v4l2_mbus_frame_desc_flags.
+ * @stream: stream in routing configuration
* @pixelcode: media bus pixel code, valid if @flags
* %FRAME_DESC_FL_BLOB is not set.
* @length: number of octets per frame, valid if @flags
@@ -355,6 +356,7 @@ enum v4l2_mbus_frame_desc_flags {
*/
struct v4l2_mbus_frame_desc_entry {
enum v4l2_mbus_frame_desc_flags flags;
+ u32 stream;
u32 pixelcode;
u32 length;
union {
@@ -702,11 +704,59 @@ struct v4l2_subdev_pad_config {
};
/**
+ * struct v4l2_subdev_stream_config - Used for storing stream configuration.
+ *
+ * @pad: pad number
+ * @stream: stream number
+ * @enabled: has the stream been enabled with v4l2_subdev_enable_stream()
+ * @fmt: &struct v4l2_mbus_framefmt
+ * @crop: &struct v4l2_rect to be used for crop
+ * @compose: &struct v4l2_rect to be used for compose
+ *
+ * This structure stores configuration for a stream.
+ */
+struct v4l2_subdev_stream_config {
+ u32 pad;
+ u32 stream;
+ bool enabled;
+
+ struct v4l2_mbus_framefmt fmt;
+ struct v4l2_rect crop;
+ struct v4l2_rect compose;
+};
+
+/**
+ * struct v4l2_subdev_stream_configs - A collection of stream configs.
+ *
+ * @num_configs: number of entries in @config.
+ * @configs: an array of &struct v4l2_subdev_stream_configs.
+ */
+struct v4l2_subdev_stream_configs {
+ u32 num_configs;
+ struct v4l2_subdev_stream_config *configs;
+};
+
+/**
+ * struct v4l2_subdev_krouting - subdev routing table
+ *
+ * @num_routes: number of routes
+ * @routes: &struct v4l2_subdev_route
+ *
+ * This structure contains the routing table for a subdev.
+ */
+struct v4l2_subdev_krouting {
+ unsigned int num_routes;
+ struct v4l2_subdev_route *routes;
+};
+
+/**
* struct v4l2_subdev_state - Used for storing subdev state information.
*
* @_lock: default for 'lock'
* @lock: mutex for the state. May be replaced by the user.
* @pads: &struct v4l2_subdev_pad_config array
+ * @routing: routing table for the subdev
+ * @stream_configs: stream configurations (only for V4L2_SUBDEV_FL_STREAMS)
*
* This structure only needs to be passed to the pad op if the 'which' field
* of the main argument is set to %V4L2_SUBDEV_FORMAT_TRY. For
@@ -717,6 +767,8 @@ struct v4l2_subdev_state {
struct mutex _lock;
struct mutex *lock;
struct v4l2_subdev_pad_config *pads;
+ struct v4l2_subdev_krouting routing;
+ struct v4l2_subdev_stream_configs stream_configs;
};
/**
@@ -769,6 +821,21 @@ struct v4l2_subdev_state {
* this operation as close as possible to stream on time. The
* operation shall fail if the pad index it has been called on
* is not valid or in case of unrecoverable failures.
+ *
+ * @set_routing: enable or disable data connection routes described in the
+ * subdevice routing table.
+ *
+ * @enable_streams: Enable the streams defined in streams_mask on the given
+ * source pad. Subdevs that implement this operation must use the active
+ * state management provided by the subdev core (enabled through a call to
+ * v4l2_subdev_init_finalize() at initialization time). Do not call
+ * directly, use v4l2_subdev_enable_streams() instead.
+ *
+ * @disable_streams: Disable the streams defined in streams_mask on the given
+ * source pad. Subdevs that implement this operation must use the active
+ * state management provided by the subdev core (enabled through a call to
+ * v4l2_subdev_init_finalize() at initialization time). Do not call
+ * directly, use v4l2_subdev_disable_streams() instead.
*/
struct v4l2_subdev_pad_ops {
int (*init_cfg)(struct v4l2_subdev *sd,
@@ -811,6 +878,16 @@ struct v4l2_subdev_pad_ops {
struct v4l2_mbus_frame_desc *fd);
int (*get_mbus_config)(struct v4l2_subdev *sd, unsigned int pad,
struct v4l2_mbus_config *config);
+ int (*set_routing)(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ enum v4l2_subdev_format_whence which,
+ struct v4l2_subdev_krouting *route);
+ int (*enable_streams)(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state, u32 pad,
+ u64 streams_mask);
+ int (*disable_streams)(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state, u32 pad,
+ u64 streams_mask);
};
/**
@@ -885,6 +962,17 @@ struct v4l2_subdev_internal_ops {
* should set this flag.
*/
#define V4L2_SUBDEV_FL_HAS_EVENTS (1U << 3)
+/*
+ * Set this flag if this subdev supports multiplexed streams. This means
+ * that the driver supports routing and handles the stream parameter in its
+ * v4l2_subdev_pad_ops handlers. More specifically, this means:
+ *
+ * - Centrally managed subdev active state is enabled
+ * - Legacy pad config is _not_ supported (state->pads is NULL)
+ * - Routing ioctls are available
+ * - Multiple streams per pad are supported
+ */
+#define V4L2_SUBDEV_FL_STREAMS (1U << 4)
struct regulator_bulk_data;
@@ -946,6 +1034,10 @@ struct v4l2_subdev_platform_data {
* @active_state: Active state for the subdev (NULL for subdevs tracking the
* state internally). Initialized by calling
* v4l2_subdev_init_finalize().
+ * @enabled_streams: Bitmask of enabled streams used by
+ * v4l2_subdev_enable_streams() and
+ * v4l2_subdev_disable_streams() helper functions for fallback
+ * cases.
*
* Each instance of a subdev driver should create this struct, either
* stand-alone or embedded in a larger struct.
@@ -993,6 +1085,7 @@ struct v4l2_subdev {
* doesn't support it.
*/
struct v4l2_subdev_state *active_state;
+ u64 enabled_streams;
};
@@ -1218,6 +1311,24 @@ int v4l2_subdev_link_validate_default(struct v4l2_subdev *sd,
int v4l2_subdev_link_validate(struct media_link *link);
/**
+ * v4l2_subdev_has_pad_interdep - MC has_pad_interdep implementation for subdevs
+ *
+ * @entity: pointer to &struct media_entity
+ * @pad0: pad number for the first pad
+ * @pad1: pad number for the second pad
+ *
+ * This function is an implementation of the
+ * media_entity_operations.has_pad_interdep operation for subdevs that
+ * implement the multiplexed streams API (as indicated by the
+ * V4L2_SUBDEV_FL_STREAMS subdev flag).
+ *
+ * It considers two pads interdependent if there is an active route between pad0
+ * and pad1.
+ */
+bool v4l2_subdev_has_pad_interdep(struct media_entity *entity,
+ unsigned int pad0, unsigned int pad1);
+
+/**
* __v4l2_subdev_state_alloc - allocate v4l2_subdev_state
*
* @sd: pointer to &struct v4l2_subdev for which the state is being allocated.
@@ -1377,6 +1488,272 @@ v4l2_subdev_lock_and_get_active_state(struct v4l2_subdev *sd)
int v4l2_subdev_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_state *state,
struct v4l2_subdev_format *format);
+/**
+ * v4l2_subdev_set_routing() - Set given routing to subdev state
+ * @sd: The subdevice
+ * @state: The subdevice state
+ * @routing: Routing that will be copied to subdev state
+ *
+ * This will release old routing table (if any) from the state, allocate
+ * enough space for the given routing, and copy the routing.
+ *
+ * This can be used from the subdev driver's set_routing op, after validating
+ * the routing.
+ */
+int v4l2_subdev_set_routing(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ const struct v4l2_subdev_krouting *routing);
+
+struct v4l2_subdev_route *
+__v4l2_subdev_next_active_route(const struct v4l2_subdev_krouting *routing,
+ struct v4l2_subdev_route *route);
+
+/**
+ * for_each_active_route - iterate on all active routes of a routing table
+ * @routing: The routing table
+ * @route: The route iterator
+ */
+#define for_each_active_route(routing, route) \
+ for ((route) = NULL; \
+ ((route) = __v4l2_subdev_next_active_route((routing), (route)));)
+
+/**
+ * v4l2_subdev_set_routing_with_fmt() - Set given routing and format to subdev
+ * state
+ * @sd: The subdevice
+ * @state: The subdevice state
+ * @routing: Routing that will be copied to subdev state
+ * @fmt: Format used to initialize all the streams
+ *
+ * This is the same as v4l2_subdev_set_routing, but additionally initializes
+ * all the streams using the given format.
+ */
+int v4l2_subdev_set_routing_with_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_krouting *routing,
+ const struct v4l2_mbus_framefmt *fmt);
+
+/**
+ * v4l2_subdev_state_get_stream_format() - Get pointer to a stream format
+ * @state: subdevice state
+ * @pad: pad id
+ * @stream: stream id
+ *
+ * This returns a pointer to &struct v4l2_mbus_framefmt for the given pad +
+ * stream in the subdev state.
+ *
+ * If the state does not contain the given pad + stream, NULL is returned.
+ */
+struct v4l2_mbus_framefmt *
+v4l2_subdev_state_get_stream_format(struct v4l2_subdev_state *state,
+ unsigned int pad, u32 stream);
+
+/**
+ * v4l2_subdev_state_get_stream_crop() - Get pointer to a stream crop rectangle
+ * @state: subdevice state
+ * @pad: pad id
+ * @stream: stream id
+ *
+ * This returns a pointer to crop rectangle for the given pad + stream in the
+ * subdev state.
+ *
+ * If the state does not contain the given pad + stream, NULL is returned.
+ */
+struct v4l2_rect *
+v4l2_subdev_state_get_stream_crop(struct v4l2_subdev_state *state,
+ unsigned int pad, u32 stream);
+
+/**
+ * v4l2_subdev_state_get_stream_compose() - Get pointer to a stream compose
+ * rectangle
+ * @state: subdevice state
+ * @pad: pad id
+ * @stream: stream id
+ *
+ * This returns a pointer to compose rectangle for the given pad + stream in the
+ * subdev state.
+ *
+ * If the state does not contain the given pad + stream, NULL is returned.
+ */
+struct v4l2_rect *
+v4l2_subdev_state_get_stream_compose(struct v4l2_subdev_state *state,
+ unsigned int pad, u32 stream);
+
+/**
+ * v4l2_subdev_routing_find_opposite_end() - Find the opposite stream
+ * @routing: routing used to find the opposite side
+ * @pad: pad id
+ * @stream: stream id
+ * @other_pad: pointer used to return the opposite pad
+ * @other_stream: pointer used to return the opposite stream
+ *
+ * This function uses the routing table to find the pad + stream which is
+ * opposite the given pad + stream.
+ *
+ * @other_pad and/or @other_stream can be NULL if the caller does not need the
+ * value.
+ *
+ * Returns 0 on success, or -EINVAL if no matching route is found.
+ */
+int v4l2_subdev_routing_find_opposite_end(const struct v4l2_subdev_krouting *routing,
+ u32 pad, u32 stream, u32 *other_pad,
+ u32 *other_stream);
+
+/**
+ * v4l2_subdev_state_get_opposite_stream_format() - Get pointer to opposite
+ * stream format
+ * @state: subdevice state
+ * @pad: pad id
+ * @stream: stream id
+ *
+ * This returns a pointer to &struct v4l2_mbus_framefmt for the pad + stream
+ * that is opposite the given pad + stream in the subdev state.
+ *
+ * If the state does not contain the given pad + stream, NULL is returned.
+ */
+struct v4l2_mbus_framefmt *
+v4l2_subdev_state_get_opposite_stream_format(struct v4l2_subdev_state *state,
+ u32 pad, u32 stream);
+
+/**
+ * v4l2_subdev_state_xlate_streams() - Translate streams from one pad to another
+ *
+ * @state: Subdevice state
+ * @pad0: The first pad
+ * @pad1: The second pad
+ * @streams: Streams bitmask on the first pad
+ *
+ * Streams on sink pads of a subdev are routed to source pads as expressed in
+ * the subdev state routing table. Stream numbers don't necessarily match on
+ * the sink and source side of a route. This function translates stream numbers
+ * on @pad0, expressed as a bitmask in @streams, to the corresponding streams
+ * on @pad1 using the routing table from the @state. It returns the stream mask
+ * on @pad1, and updates @streams with the streams that have been found in the
+ * routing table.
+ *
+ * @pad0 and @pad1 must be a sink and a source, in any order.
+ *
+ * Return: The bitmask of streams of @pad1 that are routed to @streams on @pad0.
+ */
+u64 v4l2_subdev_state_xlate_streams(const struct v4l2_subdev_state *state,
+ u32 pad0, u32 pad1, u64 *streams);
+
+/**
+ * enum v4l2_subdev_routing_restriction - Subdevice internal routing restrictions
+ *
+ * @V4L2_SUBDEV_ROUTING_NO_1_TO_N:
+ * an input stream may not be routed to multiple output streams (stream
+ * duplication)
+ * @V4L2_SUBDEV_ROUTING_NO_N_TO_1:
+ * multiple input streams may not be routed to the same output stream
+ * (stream merging)
+ * @V4L2_SUBDEV_ROUTING_NO_STREAM_MIX:
+ * streams on the same pad may not be routed to streams on different pads
+ * @V4L2_SUBDEV_ROUTING_ONLY_1_TO_1:
+ * only non-overlapping 1-to-1 stream routing is allowed (a combination of
+ * @V4L2_SUBDEV_ROUTING_NO_1_TO_N and @V4L2_SUBDEV_ROUTING_NO_N_TO_1)
+ */
+enum v4l2_subdev_routing_restriction {
+ V4L2_SUBDEV_ROUTING_NO_1_TO_N = BIT(0),
+ V4L2_SUBDEV_ROUTING_NO_N_TO_1 = BIT(1),
+ V4L2_SUBDEV_ROUTING_NO_STREAM_MIX = BIT(2),
+ V4L2_SUBDEV_ROUTING_ONLY_1_TO_1 =
+ V4L2_SUBDEV_ROUTING_NO_1_TO_N |
+ V4L2_SUBDEV_ROUTING_NO_N_TO_1,
+};
+
+/**
+ * v4l2_subdev_routing_validate() - Verify that routes comply with driver
+ * constraints
+ * @sd: The subdevice
+ * @routing: Routing to verify
+ * @disallow: Restrictions on routes
+ *
+ * This verifies that the given routing complies with the @disallow constraints.
+ *
+ * Returns 0 on success, error value otherwise.
+ */
+int v4l2_subdev_routing_validate(struct v4l2_subdev *sd,
+ const struct v4l2_subdev_krouting *routing,
+ enum v4l2_subdev_routing_restriction disallow);
+
+/**
+ * v4l2_subdev_enable_streams() - Enable streams on a pad
+ * @sd: The subdevice
+ * @pad: The pad
+ * @streams_mask: Bitmask of streams to enable
+ *
+ * This function enables streams on a source @pad of a subdevice. The pad is
+ * identified by its index, while the streams are identified by the
+ * @streams_mask bitmask. This allows enabling multiple streams on a pad at
+ * once.
+ *
+ * Enabling a stream that is already enabled isn't allowed. If @streams_mask
+ * contains an already enabled stream, this function returns -EALREADY without
+ * performing any operation.
+ *
+ * Per-stream enable is only available for subdevs that implement the
+ * .enable_streams() and .disable_streams() operations. For other subdevs, this
+ * function implements a best-effort compatibility by calling the .s_stream()
+ * operation, limited to subdevs that have a single source pad.
+ *
+ * Return:
+ * * 0: Success
+ * * -EALREADY: One of the streams in streams_mask is already enabled
+ * * -EINVAL: The pad index is invalid, or doesn't correspond to a source pad
+ * * -EOPNOTSUPP: Falling back to the legacy .s_stream() operation is
+ * impossible because the subdev has multiple source pads
+ */
+int v4l2_subdev_enable_streams(struct v4l2_subdev *sd, u32 pad,
+ u64 streams_mask);
+
+/**
+ * v4l2_subdev_disable_streams() - Disable streams on a pad
+ * @sd: The subdevice
+ * @pad: The pad
+ * @streams_mask: Bitmask of streams to disable
+ *
+ * This function disables streams on a source @pad of a subdevice. The pad is
+ * identified by its index, while the streams are identified by the
+ * @streams_mask bitmask. This allows disabling multiple streams on a pad at
+ * once.
+ *
+ * Disabling a streams that is not enabled isn't allowed. If @streams_mask
+ * contains a disabled stream, this function returns -EALREADY without
+ * performing any operation.
+ *
+ * Per-stream disable is only available for subdevs that implement the
+ * .enable_streams() and .disable_streams() operations. For other subdevs, this
+ * function implements a best-effort compatibility by calling the .s_stream()
+ * operation, limited to subdevs that have a single source pad.
+ *
+ * Return:
+ * * 0: Success
+ * * -EALREADY: One of the streams in streams_mask is not enabled
+ * * -EINVAL: The pad index is invalid, or doesn't correspond to a source pad
+ * * -EOPNOTSUPP: Falling back to the legacy .s_stream() operation is
+ * impossible because the subdev has multiple source pads
+ */
+int v4l2_subdev_disable_streams(struct v4l2_subdev *sd, u32 pad,
+ u64 streams_mask);
+
+/**
+ * v4l2_subdev_s_stream_helper() - Helper to implement the subdev s_stream
+ * operation using enable_streams and disable_streams
+ * @sd: The subdevice
+ * @enable: Enable or disable streaming
+ *
+ * Subdevice drivers that implement the streams-aware
+ * &v4l2_subdev_pad_ops.enable_streams and &v4l2_subdev_pad_ops.disable_streams
+ * operations can use this helper to implement the legacy
+ * &v4l2_subdev_video_ops.s_stream operation.
+ *
+ * This helper can only be used by subdevs that have a single source pad.
+ *
+ * Return: 0 on success, or a negative error code otherwise.
+ */
+int v4l2_subdev_s_stream_helper(struct v4l2_subdev *sd, int enable);
+
#endif /* CONFIG_VIDEO_V4L2_SUBDEV_API */
#endif /* CONFIG_MEDIA_CONTROLLER */
diff --git a/include/memory/renesas-rpc-if.h b/include/memory/renesas-rpc-if.h
index 862eff613dc7..b8fa30fd6b50 100644
--- a/include/memory/renesas-rpc-if.h
+++ b/include/memory/renesas-rpc-if.h
@@ -65,41 +65,15 @@ enum rpcif_type {
struct rpcif {
struct device *dev;
- void __iomem *base;
void __iomem *dirmap;
- struct regmap *regmap;
- struct reset_control *rstc;
size_t size;
- enum rpcif_type type;
- enum rpcif_data_dir dir;
- u8 bus_size;
- u8 xfer_size;
- void *buffer;
- u32 xferlen;
- u32 smcr;
- u32 smadr;
- u32 command; /* DRCMR or SMCMR */
- u32 option; /* DROPR or SMOPR */
- u32 enable; /* DRENR or SMENR */
- u32 dummy; /* DRDMCR or SMDMCR */
- u32 ddr; /* DRDRENR or SMDRENR */
};
int rpcif_sw_init(struct rpcif *rpc, struct device *dev);
-int rpcif_hw_init(struct rpcif *rpc, bool hyperflash);
-void rpcif_prepare(struct rpcif *rpc, const struct rpcif_op *op, u64 *offs,
+int rpcif_hw_init(struct device *dev, bool hyperflash);
+void rpcif_prepare(struct device *dev, const struct rpcif_op *op, u64 *offs,
size_t *len);
-int rpcif_manual_xfer(struct rpcif *rpc);
-ssize_t rpcif_dirmap_read(struct rpcif *rpc, u64 offs, size_t len, void *buf);
-
-static inline void rpcif_enable_rpm(struct rpcif *rpc)
-{
- pm_runtime_enable(rpc->dev);
-}
-
-static inline void rpcif_disable_rpm(struct rpcif *rpc)
-{
- pm_runtime_disable(rpc->dev);
-}
+int rpcif_manual_xfer(struct device *dev);
+ssize_t rpcif_dirmap_read(struct device *dev, u64 offs, size_t len, void *buf);
#endif // __RENESAS_RPC_IF_H
diff --git a/include/net/act_api.h b/include/net/act_api.h
index 2a6f443f0ef6..4ae0580b63ca 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -39,7 +39,7 @@ struct tc_action {
struct gnet_stats_basic_sync __percpu *cpu_bstats;
struct gnet_stats_basic_sync __percpu *cpu_bstats_hw;
struct gnet_stats_queue __percpu *cpu_qstats;
- struct tc_cookie __rcu *act_cookie;
+ struct tc_cookie __rcu *user_cookie;
struct tcf_chain __rcu *goto_chain;
u32 tcfa_flags;
u8 hw_stats;
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index 8d773b042c85..400f8a7d0c3f 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -2156,7 +2156,7 @@ struct hci_cp_le_big_create_sync {
__u8 mse;
__le16 timeout;
__u8 num_bis;
- __u8 bis[0];
+ __u8 bis[];
} __packed;
#define HCI_OP_LE_BIG_TERM_SYNC 0x206c
@@ -2174,7 +2174,7 @@ struct hci_cp_le_setup_iso_path {
__le16 codec_vid;
__u8 delay[3];
__u8 codec_cfg_len;
- __u8 codec_cfg[0];
+ __u8 codec_cfg[];
} __packed;
struct hci_rp_le_setup_iso_path {
diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h
index 743f6f59dff8..e18a927669c0 100644
--- a/include/net/bluetooth/mgmt.h
+++ b/include/net/bluetooth/mgmt.h
@@ -109,6 +109,8 @@ struct mgmt_rp_read_index_list {
#define MGMT_SETTING_STATIC_ADDRESS 0x00008000
#define MGMT_SETTING_PHY_CONFIGURATION 0x00010000
#define MGMT_SETTING_WIDEBAND_SPEECH 0x00020000
+#define MGMT_SETTING_CIS_CENTRAL 0x00040000
+#define MGMT_SETTING_CIS_PERIPHERAL 0x00080000
#define MGMT_OP_READ_INFO 0x0004
#define MGMT_READ_INFO_SIZE 0
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 03d4f4deadae..f115b2550309 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -1075,7 +1075,6 @@ struct survey_info {
s8 noise;
};
-#define CFG80211_MAX_WEP_KEYS 4
#define CFG80211_MAX_NUM_AKM_SUITES 10
/**
@@ -1099,9 +1098,6 @@ struct survey_info {
* port frames over NL80211 instead of the network interface.
* @control_port_no_preauth: disables pre-auth rx over the nl80211 control
* port for mac80211
- * @wep_keys: static WEP keys, if not NULL points to an array of
- * CFG80211_MAX_WEP_KEYS WEP keys
- * @wep_tx_key: key index (0..3) of the default TX static WEP key
* @psk: PSK (for devices supporting 4-way-handshake offload)
* @sae_pwd: password for SAE authentication (for devices supporting SAE
* offload)
@@ -1134,8 +1130,6 @@ struct cfg80211_crypto_settings {
bool control_port_no_encrypt;
bool control_port_over_nl80211;
bool control_port_no_preauth;
- struct key_params *wep_keys;
- int wep_tx_key;
const u8 *psk;
const u8 *sae_pwd;
u8 sae_pwd_len;
@@ -1322,6 +1316,9 @@ struct cfg80211_unsol_bcast_probe_resp {
* @fils_discovery: FILS discovery transmission parameters
* @unsol_bcast_probe_resp: Unsolicited broadcast probe response parameters
* @mbssid_config: AP settings for multiple bssid
+ * @punct_bitmap: Preamble puncturing bitmap. Each bit represents
+ * a 20 MHz channel, lowest bit corresponding to the lowest channel.
+ * Bit set to 1 indicates that the channel is punctured.
*/
struct cfg80211_ap_settings {
struct cfg80211_chan_def chandef;
@@ -1356,6 +1353,7 @@ struct cfg80211_ap_settings {
struct cfg80211_fils_discovery fils_discovery;
struct cfg80211_unsol_bcast_probe_resp unsol_bcast_probe_resp;
struct cfg80211_mbssid_config mbssid_config;
+ u16 punct_bitmap;
};
/**
@@ -1373,6 +1371,9 @@ struct cfg80211_ap_settings {
* @radar_required: whether radar detection is required on the new channel
* @block_tx: whether transmissions should be blocked while changing
* @count: number of beacons until switch
+ * @punct_bitmap: Preamble puncturing bitmap. Each bit represents
+ * a 20 MHz channel, lowest bit corresponding to the lowest channel.
+ * Bit set to 1 indicates that the channel is punctured.
*/
struct cfg80211_csa_settings {
struct cfg80211_chan_def chandef;
@@ -1385,6 +1386,7 @@ struct cfg80211_csa_settings {
bool radar_required;
bool block_tx;
u8 count;
+ u16 punct_bitmap;
};
/**
@@ -1882,6 +1884,24 @@ struct cfg80211_tid_stats {
* received packet with an FCS error matches the peer MAC address.
* @airtime_link_metric: mesh airtime link metric.
* @connected_to_as: true if mesh STA has a path to authentication server
+ * @mlo_params_valid: Indicates @assoc_link_id and @mld_addr fields are filled
+ * by driver. Drivers use this only in cfg80211_new_sta() calls when AP
+ * MLD's MLME/SME is offload to driver. Drivers won't fill this
+ * information in cfg80211_del_sta_sinfo(), get_station() and
+ * dump_station() callbacks.
+ * @assoc_link_id: Indicates MLO link ID of the AP, with which the station
+ * completed (re)association. This information filled for both MLO
+ * and non-MLO STA connections when the AP affiliated with an MLD.
+ * @mld_addr: For MLO STA connection, filled with MLD address of the station.
+ * For non-MLO STA connection, filled with all zeros.
+ * @assoc_resp_ies: IEs from (Re)Association Response.
+ * This is used only when in AP mode with drivers that do not use user
+ * space MLME/SME implementation. The information is provided only for the
+ * cfg80211_new_sta() calls to notify user space of the IEs. Drivers won't
+ * fill this information in cfg80211_del_sta_sinfo(), get_station() and
+ * dump_station() callbacks. User space needs this information to determine
+ * the accepted and rejected affiliated links of the connected station.
+ * @assoc_resp_ies_len: Length of @assoc_resp_ies buffer in octets.
*/
struct station_info {
u64 filled;
@@ -1941,6 +1961,12 @@ struct station_info {
u32 airtime_link_metric;
u8 connected_to_as;
+
+ bool mlo_params_valid;
+ u8 assoc_link_id;
+ u8 mld_addr[ETH_ALEN] __aligned(2);
+ const u8 *assoc_resp_ies;
+ size_t assoc_resp_ies_len;
};
/**
@@ -3606,6 +3632,17 @@ struct cfg80211_pmk_conf {
* the real status code for failures. Used only for the authentication
* response command interface (user space to driver).
* @pmkid: The identifier to refer a PMKSA.
+ * @mld_addr: MLD address of the peer. Used by the authentication request event
+ * interface. Driver indicates this to enable MLO during the authentication
+ * offload to user space. Driver shall look at %NL80211_ATTR_MLO_SUPPORT
+ * flag capability in NL80211_CMD_CONNECT to know whether the user space
+ * supports enabling MLO during the authentication offload.
+ * User space should use the address of the interface (on which the
+ * authentication request event reported) as self MLD address. User space
+ * and driver should use MLD addresses in RA, TA and BSSID fields of
+ * authentication frames sent or received via cfg80211. The driver
+ * translates the MLD addresses to/from link addresses based on the link
+ * chosen for the authentication.
*/
struct cfg80211_external_auth_params {
enum nl80211_external_auth_action action;
@@ -3614,6 +3651,7 @@ struct cfg80211_external_auth_params {
unsigned int key_mgmt_suite;
u16 status;
const u8 *pmkid;
+ u8 mld_addr[ETH_ALEN] __aligned(2);
};
/**
@@ -3868,12 +3906,22 @@ struct cfg80211_pmsr_request {
* the IEs of the remote peer in the event from the host driver and
* the constructed IEs by the user space in the request interface.
* @ie_len: Length of IEs in octets.
+ * @assoc_link_id: MLO link ID of the AP, with which (re)association requested
+ * by peer. This will be filled by driver for both MLO and non-MLO station
+ * connections when the AP affiliated with an MLD. For non-MLD AP mode, it
+ * will be -1. Used only with OWE update event (driver to user space).
+ * @peer_mld_addr: For MLO connection, MLD address of the peer. For non-MLO
+ * connection, it will be all zeros. This is applicable only when
+ * @assoc_link_id is not -1, i.e., the AP affiliated with an MLD. Used only
+ * with OWE update event (driver to user space).
*/
struct cfg80211_update_owe_info {
u8 peer[ETH_ALEN] __aligned(2);
u16 status;
const u8 *ie;
size_t ie_len;
+ int assoc_link_id;
+ u8 peer_mld_addr[ETH_ALEN] __aligned(2);
};
/**
@@ -4683,13 +4731,14 @@ struct cfg80211_ops {
* @WIPHY_FLAG_SUPPORTS_5_10_MHZ: Device supports 5 MHz and 10 MHz channels.
* @WIPHY_FLAG_HAS_CHANNEL_SWITCH: Device supports channel switch in
* beaconing mode (AP, IBSS, Mesh, ...).
- * @WIPHY_FLAG_HAS_STATIC_WEP: The device supports static WEP key installation
- * before connection.
* @WIPHY_FLAG_SUPPORTS_EXT_KEK_KCK: The device supports bigger kek and kck keys
* @WIPHY_FLAG_SUPPORTS_MLO: This is a temporary flag gating the MLO APIs,
* in order to not have them reachable in normal drivers, until we have
* complete feature/interface combinations/etc. advertisement. No driver
* should set this flag for now.
+ * @WIPHY_FLAG_SUPPORTS_EXT_KCK_32: The device supports 32-byte KCK keys.
+ * @WIPHY_FLAG_NOTIFY_REGDOM_BY_DRIVER: The device could handle reg notify for
+ * NL80211_REGDOM_SET_BY_DRIVER.
*/
enum wiphy_flags {
WIPHY_FLAG_SUPPORTS_EXT_KEK_KCK = BIT(0),
@@ -4702,7 +4751,7 @@ enum wiphy_flags {
WIPHY_FLAG_CONTROL_PORT_PROTOCOL = BIT(7),
WIPHY_FLAG_IBSS_RSN = BIT(8),
WIPHY_FLAG_MESH_AUTH = BIT(10),
- /* use hole at 11 */
+ WIPHY_FLAG_SUPPORTS_EXT_KCK_32 = BIT(11),
/* use hole at 12 */
WIPHY_FLAG_SUPPORTS_FW_ROAM = BIT(13),
WIPHY_FLAG_AP_UAPSD = BIT(14),
@@ -4715,7 +4764,7 @@ enum wiphy_flags {
WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL = BIT(21),
WIPHY_FLAG_SUPPORTS_5_10_MHZ = BIT(22),
WIPHY_FLAG_HAS_CHANNEL_SWITCH = BIT(23),
- WIPHY_FLAG_HAS_STATIC_WEP = BIT(24),
+ WIPHY_FLAG_NOTIFY_REGDOM_BY_DRIVER = BIT(24),
};
/**
@@ -6191,6 +6240,19 @@ static inline int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
}
/**
+ * ieee80211_is_valid_amsdu - check if subframe lengths of an A-MSDU are valid
+ *
+ * This is used to detect non-standard A-MSDU frames, e.g. the ones generated
+ * by ath10k and ath11k, where the subframe length includes the length of the
+ * mesh control field.
+ *
+ * @skb: The input A-MSDU frame without any headers.
+ * @mesh_hdr: use standard compliant mesh A-MSDU subframe header
+ * Returns: true if subframe header lengths are valid for the @mesh_hdr mode
+ */
+bool ieee80211_is_valid_amsdu(struct sk_buff *skb, bool mesh_hdr);
+
+/**
* ieee80211_amsdu_to_8023s - decode an IEEE 802.11n A-MSDU frame
*
* Decode an IEEE 802.11 A-MSDU and convert it to a list of 802.3 frames.
@@ -6205,11 +6267,36 @@ static inline int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
* @extra_headroom: The hardware extra headroom for SKBs in the @list.
* @check_da: DA to check in the inner ethernet header, or NULL
* @check_sa: SA to check in the inner ethernet header, or NULL
+ * @mesh_control: A-MSDU subframe header includes the mesh control field
*/
void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list,
const u8 *addr, enum nl80211_iftype iftype,
const unsigned int extra_headroom,
- const u8 *check_da, const u8 *check_sa);
+ const u8 *check_da, const u8 *check_sa,
+ bool mesh_control);
+
+/**
+ * ieee80211_get_8023_tunnel_proto - get RFC1042 or bridge tunnel encap protocol
+ *
+ * Check for RFC1042 or bridge tunnel header and fetch the encapsulated
+ * protocol.
+ *
+ * @hdr: pointer to the MSDU payload
+ * @proto: destination pointer to store the protocol
+ * Return: true if encapsulation was found
+ */
+bool ieee80211_get_8023_tunnel_proto(const void *hdr, __be16 *proto);
+
+/**
+ * ieee80211_strip_8023_mesh_hdr - strip mesh header from converted 802.3 frames
+ *
+ * Strip the mesh header, which was left in by ieee80211_data_to_8023 as part
+ * of the MSDU data. Also move any source/destination addresses from the mesh
+ * header to the ethernet header (if present).
+ *
+ * @skb: The 802.3 frame with embedded mesh header
+ */
+int ieee80211_strip_8023_mesh_hdr(struct sk_buff *skb);
/**
* cfg80211_classify8021d - determine the 802.1p/1d tag for a data frame
@@ -7783,7 +7870,7 @@ void cfg80211_new_sta(struct net_device *dev, const u8 *mac_addr,
/**
* cfg80211_del_sta_sinfo - notify userspace about deletion of a station
* @dev: the netdev
- * @mac_addr: the station's address
+ * @mac_addr: the station's address. For MLD station, MLD address is used.
* @sinfo: the station information/statistics
* @gfp: allocation flags
*/
@@ -7794,7 +7881,7 @@ void cfg80211_del_sta_sinfo(struct net_device *dev, const u8 *mac_addr,
* cfg80211_del_sta - notify userspace about deletion of a station
*
* @dev: the netdev
- * @mac_addr: the station's address
+ * @mac_addr: the station's address. For MLD station, MLD address is used.
* @gfp: allocation flags
*/
static inline void cfg80211_del_sta(struct net_device *dev,
@@ -8280,13 +8367,14 @@ bool cfg80211_reg_can_beacon_relax(struct wiphy *wiphy,
* @dev: the device which switched channels
* @chandef: the new channel definition
* @link_id: the link ID for MLO, must be 0 for non-MLO
+ * @punct_bitmap: the new puncturing bitmap
*
* Caller must acquire wdev_lock, therefore must only be called from sleepable
* driver context!
*/
void cfg80211_ch_switch_notify(struct net_device *dev,
struct cfg80211_chan_def *chandef,
- unsigned int link_id);
+ unsigned int link_id, u16 punct_bitmap);
/*
* cfg80211_ch_switch_started_notify - notify channel switch start
@@ -8295,6 +8383,7 @@ void cfg80211_ch_switch_notify(struct net_device *dev,
* @link_id: the link ID for MLO, must be 0 for non-MLO
* @count: the number of TBTTs until the channel switch happens
* @quiet: whether or not immediate quiet was requested by the AP
+ * @punct_bitmap: the future puncturing bitmap
*
* Inform the userspace about the channel switch that has just
* started, so that it can take appropriate actions (eg. starting
@@ -8303,7 +8392,7 @@ void cfg80211_ch_switch_notify(struct net_device *dev,
void cfg80211_ch_switch_started_notify(struct net_device *dev,
struct cfg80211_chan_def *chandef,
unsigned int link_id, u8 count,
- bool quiet);
+ bool quiet, u16 punct_bitmap);
/**
* ieee80211_operating_class_to_band - convert operating class to band
@@ -8850,12 +8939,11 @@ void cfg80211_bss_flush(struct wiphy *wiphy);
/**
* cfg80211_bss_color_notify - notify about bss color event
* @dev: network device
- * @gfp: allocation flags
* @cmd: the actual event we want to notify
* @count: the number of TBTTs until the color change happens
* @color_bitmap: representations of the colors that the local BSS is aware of
*/
-int cfg80211_bss_color_notify(struct net_device *dev, gfp_t gfp,
+int cfg80211_bss_color_notify(struct net_device *dev,
enum nl80211_commands cmd, u8 count,
u64 color_bitmap);
@@ -8863,13 +8951,11 @@ int cfg80211_bss_color_notify(struct net_device *dev, gfp_t gfp,
* cfg80211_obss_color_collision_notify - notify about bss color collision
* @dev: network device
* @color_bitmap: representations of the colors that the local BSS is aware of
- * @gfp: allocation flags
*/
static inline int cfg80211_obss_color_collision_notify(struct net_device *dev,
- u64 color_bitmap, gfp_t gfp)
+ u64 color_bitmap)
{
- return cfg80211_bss_color_notify(dev, gfp,
- NL80211_CMD_OBSS_COLOR_COLLISION,
+ return cfg80211_bss_color_notify(dev, NL80211_CMD_OBSS_COLOR_COLLISION,
0, color_bitmap);
}
@@ -8883,8 +8969,7 @@ static inline int cfg80211_obss_color_collision_notify(struct net_device *dev,
static inline int cfg80211_color_change_started_notify(struct net_device *dev,
u8 count)
{
- return cfg80211_bss_color_notify(dev, GFP_KERNEL,
- NL80211_CMD_COLOR_CHANGE_STARTED,
+ return cfg80211_bss_color_notify(dev, NL80211_CMD_COLOR_CHANGE_STARTED,
count, 0);
}
@@ -8896,8 +8981,7 @@ static inline int cfg80211_color_change_started_notify(struct net_device *dev,
*/
static inline int cfg80211_color_change_aborted_notify(struct net_device *dev)
{
- return cfg80211_bss_color_notify(dev, GFP_KERNEL,
- NL80211_CMD_COLOR_CHANGE_ABORTED,
+ return cfg80211_bss_color_notify(dev, NL80211_CMD_COLOR_CHANGE_ABORTED,
0, 0);
}
@@ -8909,9 +8993,21 @@ static inline int cfg80211_color_change_aborted_notify(struct net_device *dev)
*/
static inline int cfg80211_color_change_notify(struct net_device *dev)
{
- return cfg80211_bss_color_notify(dev, GFP_KERNEL,
+ return cfg80211_bss_color_notify(dev,
NL80211_CMD_COLOR_CHANGE_COMPLETED,
0, 0);
}
+/**
+ * cfg80211_valid_disable_subchannel_bitmap - validate puncturing bitmap
+ * @bitmap: bitmap to be validated
+ * @chandef: channel definition
+ *
+ * Validate the puncturing bitmap.
+ *
+ * Return: %true if the bitmap is valid. %false otherwise.
+ */
+bool cfg80211_valid_disable_subchannel_bitmap(u16 *bitmap,
+ const struct cfg80211_chan_def *chandef);
+
#endif /* __NET_CFG80211_H */
diff --git a/include/net/cfg802154.h b/include/net/cfg802154.h
index d09c393d229f..0c2778a836db 100644
--- a/include/net/cfg802154.h
+++ b/include/net/cfg802154.h
@@ -18,6 +18,8 @@
struct wpan_phy;
struct wpan_phy_cca;
+struct cfg802154_scan_request;
+struct cfg802154_beacon_request;
#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
struct ieee802154_llsec_device_key;
@@ -67,6 +69,14 @@ struct cfg802154_ops {
struct wpan_dev *wpan_dev, bool mode);
int (*set_ackreq_default)(struct wpan_phy *wpan_phy,
struct wpan_dev *wpan_dev, bool ackreq);
+ int (*trigger_scan)(struct wpan_phy *wpan_phy,
+ struct cfg802154_scan_request *request);
+ int (*abort_scan)(struct wpan_phy *wpan_phy,
+ struct wpan_dev *wpan_dev);
+ int (*send_beacons)(struct wpan_phy *wpan_phy,
+ struct cfg802154_beacon_request *request);
+ int (*stop_beacons)(struct wpan_phy *wpan_phy,
+ struct wpan_dev *wpan_dev);
#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
void (*get_llsec_table)(struct wpan_phy *wpan_phy,
struct wpan_dev *wpan_dev,
@@ -241,6 +251,17 @@ static inline void wpan_phy_net_set(struct wpan_phy *wpan_phy, struct net *net)
write_pnet(&wpan_phy->_net, net);
}
+static inline bool ieee802154_chan_is_valid(struct wpan_phy *phy,
+ u8 page, u8 channel)
+{
+ if (page > IEEE802154_MAX_PAGE ||
+ channel > IEEE802154_MAX_CHANNEL ||
+ !(phy->supported.channels[page] & BIT(channel)))
+ return false;
+
+ return true;
+}
+
/**
* struct ieee802154_addr - IEEE802.15.4 device address
* @mode: Address mode from frame header. Can be one of:
@@ -278,6 +299,60 @@ struct ieee802154_coord_desc {
bool gts_permit;
};
+/**
+ * struct cfg802154_scan_request - Scan request
+ *
+ * @type: type of scan to be performed
+ * @page: page on which to perform the scan
+ * @channels: channels in te %page to be scanned
+ * @duration: time spent on each channel, calculated with:
+ * aBaseSuperframeDuration * (2 ^ duration + 1)
+ * @wpan_dev: the wpan device on which to perform the scan
+ * @wpan_phy: the wpan phy on which to perform the scan
+ */
+struct cfg802154_scan_request {
+ enum nl802154_scan_types type;
+ u8 page;
+ u32 channels;
+ u8 duration;
+ struct wpan_dev *wpan_dev;
+ struct wpan_phy *wpan_phy;
+};
+
+/**
+ * struct cfg802154_beacon_request - Beacon request descriptor
+ *
+ * @interval: interval n between sendings, in multiple order of the super frame
+ * duration: aBaseSuperframeDuration * (2^n) unless the interval
+ * order is greater or equal to 15, in this case beacons won't be
+ * passively sent out at a fixed rate but instead inform the device
+ * that it should answer beacon requests as part of active scan
+ * procedures
+ * @wpan_dev: the concerned wpan device
+ * @wpan_phy: the wpan phy this was for
+ */
+struct cfg802154_beacon_request {
+ u8 interval;
+ struct wpan_dev *wpan_dev;
+ struct wpan_phy *wpan_phy;
+};
+
+/**
+ * struct cfg802154_mac_pkt - MAC packet descriptor (beacon/command)
+ * @node: MAC packets to process list member
+ * @skb: the received sk_buff
+ * @sdata: the interface on which @skb was received
+ * @page: page configuration when @skb was received
+ * @channel: channel configuration when @skb was received
+ */
+struct cfg802154_mac_pkt {
+ struct list_head node;
+ struct sk_buff *skb;
+ struct ieee802154_sub_if_data *sdata;
+ u8 page;
+ u8 channel;
+};
+
struct ieee802154_llsec_key_id {
u8 mode;
u8 id;
@@ -447,6 +522,7 @@ static inline const char *wpan_phy_name(struct wpan_phy *phy)
return dev_name(&phy->dev);
}
-void ieee802154_configure_durations(struct wpan_phy *phy);
+void ieee802154_configure_durations(struct wpan_phy *phy,
+ unsigned int page, unsigned int channel);
#endif /* __NET_CFG802154_H */
diff --git a/include/net/checksum.h b/include/net/checksum.h
index 6bc783b7a06c..1338cb92c8e7 100644
--- a/include/net/checksum.h
+++ b/include/net/checksum.h
@@ -18,8 +18,10 @@
#include <linux/errno.h>
#include <asm/types.h>
#include <asm/byteorder.h>
-#include <linux/uaccess.h>
#include <asm/checksum.h>
+#if !defined(_HAVE_ARCH_COPY_AND_CSUM_FROM_USER) || !defined(HAVE_CSUM_COPY_USER)
+#include <linux/uaccess.h>
+#endif
#ifndef _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
static __always_inline
diff --git a/include/net/dcbnl.h b/include/net/dcbnl.h
index 8841ab6c2de7..42207fc44660 100644
--- a/include/net/dcbnl.h
+++ b/include/net/dcbnl.h
@@ -19,18 +19,32 @@ struct dcb_app_type {
u8 dcbx;
};
+u16 dcb_getrewr(struct net_device *dev, struct dcb_app *app);
+int dcb_setrewr(struct net_device *dev, struct dcb_app *app);
+int dcb_delrewr(struct net_device *dev, struct dcb_app *app);
+
int dcb_setapp(struct net_device *, struct dcb_app *);
u8 dcb_getapp(struct net_device *, struct dcb_app *);
int dcb_ieee_setapp(struct net_device *, struct dcb_app *);
int dcb_ieee_delapp(struct net_device *, struct dcb_app *);
u8 dcb_ieee_getapp_mask(struct net_device *, struct dcb_app *);
+struct dcb_rewr_prio_pcp_map {
+ u16 map[IEEE_8021QAZ_MAX_TCS];
+};
+
+void dcb_getrewr_prio_pcp_mask_map(const struct net_device *dev,
+ struct dcb_rewr_prio_pcp_map *p_map);
+
struct dcb_ieee_app_prio_map {
u64 map[IEEE_8021QAZ_MAX_TCS];
};
void dcb_ieee_getapp_prio_dscp_mask_map(const struct net_device *dev,
struct dcb_ieee_app_prio_map *p_map);
+void dcb_getrewr_prio_dscp_mask_map(const struct net_device *dev,
+ struct dcb_ieee_app_prio_map *p_map);
+
struct dcb_ieee_app_dscp_map {
u8 map[64];
};
@@ -113,6 +127,10 @@ struct dcbnl_rtnl_ops {
/* apptrust */
int (*dcbnl_setapptrust)(struct net_device *, u8 *, int);
int (*dcbnl_getapptrust)(struct net_device *, u8 *, int *);
+
+ /* rewrite */
+ int (*dcbnl_setrewr)(struct net_device *dev, struct dcb_app *app);
+ int (*dcbnl_delrewr)(struct net_device *dev, struct dcb_app *app);
};
#endif /* __NET_DCBNL_H__ */
diff --git a/include/net/devlink.h b/include/net/devlink.h
index 6a2e4f21779f..6a942e70e451 100644
--- a/include/net/devlink.h
+++ b/include/net/devlink.h
@@ -146,7 +146,6 @@ struct devlink_port {
initialized:1;
struct delayed_work type_warn_dw;
struct list_head reporter_list;
- struct mutex reporters_lock; /* Protects reporter_list */
struct devlink_rate *devlink_rate;
struct devlink_linecard *linecard;
@@ -490,6 +489,10 @@ struct devlink_param_item {
const struct devlink_param *param;
union devlink_param_value driverinit_value;
bool driverinit_value_valid;
+ union devlink_param_value driverinit_value_new; /* Not reachable
+ * until reload.
+ */
+ bool driverinit_value_new_valid;
};
enum devlink_param_generic_id {
@@ -1646,7 +1649,9 @@ static inline struct devlink *devlink_alloc(const struct devlink_ops *ops,
{
return devlink_alloc_ns(ops, priv_size, &init_net, dev);
}
-void devlink_set_features(struct devlink *devlink, u64 features);
+
+int devl_register(struct devlink *devlink);
+void devl_unregister(struct devlink *devlink);
void devlink_register(struct devlink *devlink);
void devlink_unregister(struct devlink *devlink);
void devlink_free(struct devlink *devlink);
@@ -1685,9 +1690,9 @@ void devl_rate_nodes_destroy(struct devlink *devlink);
void devlink_port_linecard_set(struct devlink_port *devlink_port,
struct devlink_linecard *linecard);
struct devlink_linecard *
-devlink_linecard_create(struct devlink *devlink, unsigned int linecard_index,
- const struct devlink_linecard_ops *ops, void *priv);
-void devlink_linecard_destroy(struct devlink_linecard *linecard);
+devl_linecard_create(struct devlink *devlink, unsigned int linecard_index,
+ const struct devlink_linecard_ops *ops, void *priv);
+void devl_linecard_destroy(struct devlink_linecard *linecard);
void devlink_linecard_provision_set(struct devlink_linecard *linecard,
const char *type);
void devlink_linecard_provision_clear(struct devlink_linecard *linecard);
@@ -1766,21 +1771,23 @@ void devl_resource_occ_get_unregister(struct devlink *devlink,
void devlink_resource_occ_get_unregister(struct devlink *devlink,
u64 resource_id);
+int devl_params_register(struct devlink *devlink,
+ const struct devlink_param *params,
+ size_t params_count);
int devlink_params_register(struct devlink *devlink,
const struct devlink_param *params,
size_t params_count);
+void devl_params_unregister(struct devlink *devlink,
+ const struct devlink_param *params,
+ size_t params_count);
void devlink_params_unregister(struct devlink *devlink,
const struct devlink_param *params,
size_t params_count);
-int devlink_param_register(struct devlink *devlink,
- const struct devlink_param *param);
-void devlink_param_unregister(struct devlink *devlink,
- const struct devlink_param *param);
-int devlink_param_driverinit_value_get(struct devlink *devlink, u32 param_id,
- union devlink_param_value *init_val);
-int devlink_param_driverinit_value_set(struct devlink *devlink, u32 param_id,
- union devlink_param_value init_val);
-void devlink_param_value_changed(struct devlink *devlink, u32 param_id);
+int devl_param_driverinit_value_get(struct devlink *devlink, u32 param_id,
+ union devlink_param_value *val);
+void devl_param_driverinit_value_set(struct devlink *devlink, u32 param_id,
+ union devlink_param_value init_val);
+void devl_param_value_changed(struct devlink *devlink, u32 param_id);
struct devlink_region *devl_region_create(struct devlink *devlink,
const struct devlink_region_ops *ops,
u32 region_max_snapshots,
@@ -1863,20 +1870,30 @@ int devlink_fmsg_binary_pair_put(struct devlink_fmsg *fmsg, const char *name,
const void *value, u32 value_len);
struct devlink_health_reporter *
-devlink_health_reporter_create(struct devlink *devlink,
- const struct devlink_health_reporter_ops *ops,
- u64 graceful_period, void *priv);
+devl_port_health_reporter_create(struct devlink_port *port,
+ const struct devlink_health_reporter_ops *ops,
+ u64 graceful_period, void *priv);
struct devlink_health_reporter *
devlink_port_health_reporter_create(struct devlink_port *port,
const struct devlink_health_reporter_ops *ops,
u64 graceful_period, void *priv);
+struct devlink_health_reporter *
+devl_health_reporter_create(struct devlink *devlink,
+ const struct devlink_health_reporter_ops *ops,
+ u64 graceful_period, void *priv);
+
+struct devlink_health_reporter *
+devlink_health_reporter_create(struct devlink *devlink,
+ const struct devlink_health_reporter_ops *ops,
+ u64 graceful_period, void *priv);
+
void
-devlink_health_reporter_destroy(struct devlink_health_reporter *reporter);
+devl_health_reporter_destroy(struct devlink_health_reporter *reporter);
void
-devlink_port_health_reporter_destroy(struct devlink_health_reporter *reporter);
+devlink_health_reporter_destroy(struct devlink_health_reporter *reporter);
void *
devlink_health_reporter_priv(struct devlink_health_reporter *reporter);
diff --git a/include/net/dropreason.h b/include/net/dropreason.h
index 70539288f995..c0a3ea806cd5 100644
--- a/include/net/dropreason.h
+++ b/include/net/dropreason.h
@@ -71,6 +71,13 @@
FN(DUP_FRAG) \
FN(FRAG_REASM_TIMEOUT) \
FN(FRAG_TOO_FAR) \
+ FN(TCP_MINTTL) \
+ FN(IPV6_BAD_EXTHDR) \
+ FN(IPV6_NDISC_FRAG) \
+ FN(IPV6_NDISC_HOP_LIMIT) \
+ FN(IPV6_NDISC_BAD_CODE) \
+ FN(IPV6_NDISC_BAD_OPTIONS) \
+ FN(IPV6_NDISC_NS_OTHERHOST) \
FNe(MAX)
/**
@@ -313,6 +320,25 @@ enum skb_drop_reason {
*/
SKB_DROP_REASON_FRAG_TOO_FAR,
/**
+ * @SKB_DROP_REASON_TCP_MINTTL: ipv4 ttl or ipv6 hoplimit below
+ * the threshold (IP_MINTTL or IPV6_MINHOPCOUNT).
+ */
+ SKB_DROP_REASON_TCP_MINTTL,
+ /** @SKB_DROP_REASON_IPV6_BAD_EXTHDR: Bad IPv6 extension header. */
+ SKB_DROP_REASON_IPV6_BAD_EXTHDR,
+ /** @SKB_DROP_REASON_IPV6_NDISC_FRAG: invalid frag (suppress_frag_ndisc). */
+ SKB_DROP_REASON_IPV6_NDISC_FRAG,
+ /** @SKB_DROP_REASON_IPV6_NDISC_HOP_LIMIT: invalid hop limit. */
+ SKB_DROP_REASON_IPV6_NDISC_HOP_LIMIT,
+ /** @SKB_DROP_REASON_IPV6_NDISC_BAD_CODE: invalid NDISC icmp6 code. */
+ SKB_DROP_REASON_IPV6_NDISC_BAD_CODE,
+ /** @SKB_DROP_REASON_IPV6_NDISC_BAD_OPTIONS: invalid NDISC options. */
+ SKB_DROP_REASON_IPV6_NDISC_BAD_OPTIONS,
+ /** @SKB_DROP_REASON_IPV6_NDISC_NS_OTHERHOST: NEIGHBOUR SOLICITATION
+ * for another host.
+ */
+ SKB_DROP_REASON_IPV6_NDISC_NS_OTHERHOST,
+ /**
* @SKB_DROP_REASON_MAX: the maximum of drop reason, which shouldn't be
* used as a real 'reason'
*/
diff --git a/include/net/dsa.h b/include/net/dsa.h
index 96086289aa9b..a15f17a38eca 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -938,6 +938,17 @@ struct dsa_switch_ops {
struct ethtool_ts_info *ts);
/*
+ * ethtool MAC merge layer
+ */
+ int (*get_mm)(struct dsa_switch *ds, int port,
+ struct ethtool_mm_state *state);
+ int (*set_mm)(struct dsa_switch *ds, int port,
+ struct ethtool_mm_cfg *cfg,
+ struct netlink_ext_ack *extack);
+ void (*get_mm_stats)(struct dsa_switch *ds, int port,
+ struct ethtool_mm_stats *stats);
+
+ /*
* DCB ops
*/
int (*port_get_default_prio)(struct dsa_switch *ds, int port);
diff --git a/include/net/dst_ops.h b/include/net/dst_ops.h
index 88ff7bb2bb9b..632086b2f644 100644
--- a/include/net/dst_ops.h
+++ b/include/net/dst_ops.h
@@ -16,7 +16,7 @@ struct dst_ops {
unsigned short family;
unsigned int gc_thresh;
- int (*gc)(struct dst_ops *ops);
+ void (*gc)(struct dst_ops *ops);
struct dst_entry * (*check)(struct dst_entry *, __u32 cookie);
unsigned int (*default_advmss)(const struct dst_entry *);
unsigned int (*mtu)(const struct dst_entry *);
diff --git a/include/net/flow.h b/include/net/flow.h
index 2f0da4f0318b..bb8651a6eaa7 100644
--- a/include/net/flow.h
+++ b/include/net/flow.h
@@ -8,12 +8,13 @@
#ifndef _NET_FLOW_H
#define _NET_FLOW_H
-#include <linux/socket.h>
#include <linux/in6.h>
#include <linux/atomic.h>
-#include <net/flow_dissector.h>
+#include <linux/container_of.h>
#include <linux/uidgid.h>
+struct flow_keys;
+
/*
* ifindex generation is per-net namespace, and loopback is
* always the 1st device in ns (see net_dev_init), thus any
diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h
index 0400a0ac8a29..118082eae48c 100644
--- a/include/net/flow_offload.h
+++ b/include/net/flow_offload.h
@@ -228,6 +228,8 @@ void flow_action_cookie_destroy(struct flow_action_cookie *cookie);
struct flow_action_entry {
enum flow_action_id id;
u32 hw_index;
+ unsigned long cookie;
+ u64 miss_cookie;
enum flow_action_hw_stats hw_stats;
action_destr destructor;
void *destructor_priv;
@@ -320,7 +322,7 @@ struct flow_action_entry {
u16 sid;
} pppoe;
};
- struct flow_action_cookie *cookie; /* user defined action cookie */
+ struct flow_action_cookie *user_cookie; /* user defined action cookie */
};
struct flow_action {
@@ -593,6 +595,7 @@ struct flow_cls_common_offload {
struct flow_cls_offload {
struct flow_cls_common_offload common;
enum flow_cls_command command;
+ bool use_act_stats;
unsigned long cookie;
struct flow_rule *rule;
struct flow_stats stats;
@@ -610,6 +613,7 @@ struct flow_offload_action {
enum offload_act_command command;
enum flow_action_id id;
u32 index;
+ unsigned long cookie;
struct flow_stats stats;
struct flow_action action;
};
diff --git a/include/net/ieee802154_netdev.h b/include/net/ieee802154_netdev.h
index 4c33a20ea57f..da8a3e648c7a 100644
--- a/include/net/ieee802154_netdev.h
+++ b/include/net/ieee802154_netdev.h
@@ -38,6 +38,42 @@
#include <net/cfg802154.h>
+struct ieee802154_beacon_hdr {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ u16 beacon_order:4,
+ superframe_order:4,
+ final_cap_slot:4,
+ battery_life_ext:1,
+ reserved0:1,
+ pan_coordinator:1,
+ assoc_permit:1;
+ u8 gts_count:3,
+ gts_reserved:4,
+ gts_permit:1;
+ u8 pend_short_addr_count:3,
+ reserved1:1,
+ pend_ext_addr_count:3,
+ reserved2:1;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ u16 assoc_permit:1,
+ pan_coordinator:1,
+ reserved0:1,
+ battery_life_ext:1,
+ final_cap_slot:4,
+ superframe_order:4,
+ beacon_order:4;
+ u8 gts_permit:1,
+ gts_reserved:4,
+ gts_count:3;
+ u8 reserved2:1,
+ pend_ext_addr_count:3,
+ reserved1:1,
+ pend_short_addr_count:3;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+} __packed;
+
struct ieee802154_sechdr {
#if defined(__LITTLE_ENDIAN_BITFIELD)
u8 level:3,
@@ -93,6 +129,13 @@ enum ieee802154_frame_version {
IEEE802154_MULTIPURPOSE_STD = IEEE802154_2003_STD,
};
+enum ieee802154_addressing_mode {
+ IEEE802154_NO_ADDRESSING,
+ IEEE802154_RESERVED,
+ IEEE802154_SHORT_ADDRESSING,
+ IEEE802154_EXTENDED_ADDRESSING,
+};
+
struct ieee802154_hdr {
struct ieee802154_hdr_fc fc;
u8 seq;
@@ -101,6 +144,11 @@ struct ieee802154_hdr {
struct ieee802154_sechdr sec;
};
+struct ieee802154_beacon_frame {
+ struct ieee802154_hdr mhr;
+ struct ieee802154_beacon_hdr mac_pl;
+};
+
/* pushes hdr onto the skb. fields of hdr->fc that can be calculated from
* the contents of hdr will be, and the actual value of those bits in
* hdr->fc will be ignored. this includes the INTRA_PAN bit and the frame
@@ -126,6 +174,10 @@ int ieee802154_hdr_peek_addrs(const struct sk_buff *skb,
*/
int ieee802154_hdr_peek(const struct sk_buff *skb, struct ieee802154_hdr *hdr);
+/* pushes a beacon frame into an skb */
+int ieee802154_beacon_push(struct sk_buff *skb,
+ struct ieee802154_beacon_frame *beacon);
+
int ieee802154_max_payload(const struct ieee802154_hdr *hdr);
static inline int
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index bf5654ce711e..51857117ac09 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -249,6 +249,10 @@ struct inet_sock {
__be32 mc_addr;
struct ip_mc_socklist __rcu *mc_list;
struct inet_cork_full cork;
+ struct {
+ __u16 lo;
+ __u16 hi;
+ } local_port_range;
};
#define IPCORK_OPT 1 /* ip-options has been held in ipcork.opt */
diff --git a/include/net/ip.h b/include/net/ip.h
index 144bdfbb25af..c3fffaa92d6e 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -340,7 +340,8 @@ static inline u64 snmp_fold_field64(void __percpu *mib, int offt, size_t syncp_o
} \
}
-void inet_get_local_port_range(struct net *net, int *low, int *high);
+void inet_get_local_port_range(const struct net *net, int *low, int *high);
+void inet_sk_get_local_port_range(const struct sock *sk, int *low, int *high);
#ifdef CONFIG_SYSCTL
static inline bool inet_is_local_reserved_port(struct net *net, unsigned short port)
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 035d61d50a98..81ee387a1fc4 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -84,10 +84,6 @@ struct dst_entry *ip6_route_input_lookup(struct net *net,
struct flowi6 *fl6,
const struct sk_buff *skb, int flags);
-struct dst_entry *ip6_route_output_flags_noref(struct net *net,
- const struct sock *sk,
- struct flowi6 *fl6, int flags);
-
struct dst_entry *ip6_route_output_flags(struct net *net, const struct sock *sk,
struct flowi6 *fl6, int flags);
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index c6c61100d244..6d71a5ff52df 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -461,6 +461,7 @@ void ip_vs_stats_free(struct ip_vs_stats *stats);
/* Multiple chains processed in same tick */
struct ip_vs_est_tick_data {
+ struct rcu_head rcu_head;
struct hlist_head chains[IPVS_EST_TICK_CHAINS];
DECLARE_BITMAP(present, IPVS_EST_TICK_CHAINS);
DECLARE_BITMAP(full, IPVS_EST_TICK_CHAINS);
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 03f3af02a9a6..7332296eca44 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -436,7 +436,8 @@ static inline void fl6_sock_release(struct ip6_flowlabel *fl)
atomic_dec(&fl->users);
}
-void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info);
+enum skb_drop_reason icmpv6_notify(struct sk_buff *skb, u8 type,
+ u8 code, __be32 info);
void icmpv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6,
struct icmp6hdr *thdr, int len);
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index e3235b9c02c2..219fd15893b0 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -340,7 +340,7 @@ struct ieee80211_vif_chanctx_switch {
* @BSS_CHANGED_FILS_DISCOVERY: FILS discovery status changed.
* @BSS_CHANGED_UNSOL_BCAST_PROBE_RESP: Unsolicited broadcast probe response
* status changed.
- *
+ * @BSS_CHANGED_EHT_PUNCTURING: The channel puncturing bitmap changed.
*/
enum ieee80211_bss_change {
BSS_CHANGED_ASSOC = 1<<0,
@@ -375,6 +375,7 @@ enum ieee80211_bss_change {
BSS_CHANGED_HE_BSS_COLOR = 1<<29,
BSS_CHANGED_FILS_DISCOVERY = 1<<30,
BSS_CHANGED_UNSOL_BCAST_PROBE_RESP = 1<<31,
+ BSS_CHANGED_EHT_PUNCTURING = BIT_ULL(32),
/* when adding here, make sure to change ieee80211_reconfig */
};
@@ -640,9 +641,11 @@ struct ieee80211_fils_discovery {
* @tx_pwr_env_num: number of @tx_pwr_env.
* @pwr_reduction: power constraint of BSS.
* @eht_support: does this BSS support EHT
+ * @eht_puncturing: bitmap to indicate which channels are punctured in this BSS
* @csa_active: marks whether a channel switch is going on. Internally it is
* write-protected by sdata_lock and local->mtx so holding either is fine
* for read access.
+ * @csa_punct_bitmap: new puncturing bitmap for channel switch
* @mu_mimo_owner: indicates interface owns MU-MIMO capability
* @chanctx_conf: The channel context this interface is assigned to, or %NULL
* when it is not assigned. This pointer is RCU-protected due to the TX
@@ -653,6 +656,23 @@ struct ieee80211_fils_discovery {
* write-protected by sdata_lock and local->mtx so holding either is fine
* for read access.
* @color_change_color: the bss color that will be used after the change.
+ * @vht_su_beamformer: in AP mode, does this BSS support operation as an VHT SU
+ * beamformer
+ * @vht_su_beamformee: in AP mode, does this BSS support operation as an VHT SU
+ * beamformee
+ * @vht_mu_beamformer: in AP mode, does this BSS support operation as an VHT MU
+ * beamformer
+ * @vht_mu_beamformee: in AP mode, does this BSS support operation as an VHT MU
+ * beamformee
+ * @he_su_beamformer: in AP-mode, does this BSS support operation as an HE SU
+ * beamformer
+ * @he_su_beamformee: in AP-mode, does this BSS support operation as an HE SU
+ * beamformee
+ * @he_mu_beamformer: in AP-mode, does this BSS support operation as an HE MU
+ * beamformer
+ * @he_full_ul_mumimo: does this BSS support the reception (AP) or transmission
+ * (non-AP STA) of an HE TB PPDU on an RU that spans the entire PPDU
+ * bandwidth
*/
struct ieee80211_bss_conf {
const u8 *bssid;
@@ -719,13 +739,25 @@ struct ieee80211_bss_conf {
u8 tx_pwr_env_num;
u8 pwr_reduction;
bool eht_support;
+ u16 eht_puncturing;
bool csa_active;
+ u16 csa_punct_bitmap;
+
bool mu_mimo_owner;
struct ieee80211_chanctx_conf __rcu *chanctx_conf;
bool color_change_active;
u8 color_change_color;
+
+ bool vht_su_beamformer;
+ bool vht_su_beamformee;
+ bool vht_mu_beamformer;
+ bool vht_mu_beamformee;
+ bool he_su_beamformer;
+ bool he_su_beamformee;
+ bool he_mu_beamformer;
+ bool he_full_ul_mumimo;
};
/**
@@ -1436,6 +1468,7 @@ enum mac80211_rx_encoding {
RX_ENC_HT,
RX_ENC_VHT,
RX_ENC_HE,
+ RX_ENC_EHT,
};
/**
@@ -1469,7 +1502,7 @@ enum mac80211_rx_encoding {
* @antenna: antenna used
* @rate_idx: index of data rate into band's supported rates or MCS index if
* HT or VHT is used (%RX_FLAG_HT/%RX_FLAG_VHT)
- * @nss: number of streams (VHT and HE only)
+ * @nss: number of streams (VHT, HE and EHT only)
* @flag: %RX_FLAG_\*
* @encoding: &enum mac80211_rx_encoding
* @bw: &enum rate_info_bw
@@ -1477,6 +1510,9 @@ enum mac80211_rx_encoding {
* @he_ru: HE RU, from &enum nl80211_he_ru_alloc
* @he_gi: HE GI, from &enum nl80211_he_gi
* @he_dcm: HE DCM value
+ * @eht: EHT specific rate information
+ * @eht.ru: EHT RU, from &enum nl80211_eht_ru_alloc
+ * @eht.gi: EHT GI, from &enum nl80211_eht_gi
* @rx_flags: internal RX flags for mac80211
* @ampdu_reference: A-MPDU reference number, must be a different value for
* each A-MPDU but the same for each subframe within one A-MPDU
@@ -1498,8 +1534,18 @@ struct ieee80211_rx_status {
u32 flag;
u16 freq: 13, freq_offset: 1;
u8 enc_flags;
- u8 encoding:2, bw:3, he_ru:3;
- u8 he_gi:2, he_dcm:1;
+ u8 encoding:3, bw:4;
+ union {
+ struct {
+ u8 he_ru:3;
+ u8 he_gi:2;
+ u8 he_dcm:1;
+ };
+ struct {
+ u8 ru:4;
+ u8 gi:2;
+ } eht;
+ };
u8 rate_idx;
u8 nss;
u8 rx_flags;
@@ -5884,9 +5930,6 @@ void ieee80211_iterate_active_interfaces_atomic(struct ieee80211_hw *hw,
* This function iterates over the interfaces associated with a given
* hardware that are currently active and calls the callback for them.
* This version can only be used while holding the wiphy mutex.
- * The driver must not call this with a lock held that it can also take in
- * response to callbacks from mac80211, and it must not call this within
- * callbacks made by mac80211 - both would result in deadlocks.
*
* @hw: the hardware struct of which the interfaces should be iterated over
* @iter_flags: iteration flags, see &enum ieee80211_interface_iteration_flags
@@ -5901,24 +5944,6 @@ void ieee80211_iterate_active_interfaces_mtx(struct ieee80211_hw *hw,
void *data);
/**
- * ieee80211_iterate_stations - iterate stations
- *
- * This function iterates over all stations associated with a given
- * hardware that are currently uploaded to the driver and calls the callback
- * function for them.
- * This function allows the iterator function to sleep, when the iterator
- * function is atomic @ieee80211_iterate_stations_atomic can be used.
- *
- * @hw: the hardware struct of which the interfaces should be iterated over
- * @iterator: the iterator function to call, cannot sleep
- * @data: first argument of the iterator function
- */
-void ieee80211_iterate_stations(struct ieee80211_hw *hw,
- void (*iterator)(void *data,
- struct ieee80211_sta *sta),
- void *data);
-
-/**
* ieee80211_iterate_stations_atomic - iterate stations
*
* This function iterates over all stations associated with a given
@@ -7191,7 +7216,7 @@ ieee80211_get_unsol_bcast_probe_resp_tmpl(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
/**
- * ieeee80211_obss_color_collision_notify - notify userland about a BSS color
+ * ieee80211_obss_color_collision_notify - notify userland about a BSS color
* collision.
*
* @vif: &struct ieee80211_vif pointer from the add_interface callback.
@@ -7200,8 +7225,8 @@ ieee80211_get_unsol_bcast_probe_resp_tmpl(struct ieee80211_hw *hw,
* @gfp: allocation flags
*/
void
-ieeee80211_obss_color_collision_notify(struct ieee80211_vif *vif,
- u64 color_bitmap, gfp_t gfp);
+ieee80211_obss_color_collision_notify(struct ieee80211_vif *vif,
+ u64 color_bitmap, gfp_t gfp);
/**
* ieee80211_is_tx_data - check if frame is a data frame
diff --git a/include/net/ndisc.h b/include/net/ndisc.h
index da7eec8669ec..07e5168cdaf9 100644
--- a/include/net/ndisc.h
+++ b/include/net/ndisc.h
@@ -445,7 +445,7 @@ int ndisc_late_init(void);
void ndisc_late_cleanup(void);
void ndisc_cleanup(void);
-int ndisc_rcv(struct sk_buff *skb);
+enum skb_drop_reason ndisc_rcv(struct sk_buff *skb);
struct sk_buff *ndisc_ns_create(struct net_device *dev, const struct in6_addr *solicit,
const struct in6_addr *saddr, u64 nonce);
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index 6a2019aaa464..a72028dbef0c 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -126,6 +126,12 @@ struct nf_conn {
};
static inline struct nf_conn *
+nf_ct_to_nf_conn(const struct nf_conntrack *nfct)
+{
+ return container_of(nfct, struct nf_conn, ct_general);
+}
+
+static inline struct nf_conn *
nf_ct_tuplehash_to_ctrack(const struct nf_conntrack_tuple_hash *hash)
{
return container_of(hash, struct nf_conn,
@@ -175,6 +181,8 @@ nf_ct_get(const struct sk_buff *skb, enum ip_conntrack_info *ctinfo)
void nf_ct_destroy(struct nf_conntrack *nfct);
+void nf_conntrack_tcp_set_closing(struct nf_conn *ct);
+
/* decrement reference count on a conntrack */
static inline void nf_ct_put(struct nf_conn *ct)
{
@@ -362,6 +370,10 @@ static inline struct nf_conntrack_net *nf_ct_pernet(const struct net *net)
return net_generic(net, nf_conntrack_net_id);
}
+int nf_ct_skb_network_trim(struct sk_buff *skb, int family);
+int nf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
+ u16 zone, u8 family, u8 *proto, u16 *mru);
+
#define NF_CT_STAT_INC(net, count) __this_cpu_inc((net)->ct.stat->count)
#define NF_CT_STAT_INC_ATOMIC(net, count) this_cpu_inc((net)->ct.stat->count)
#define NF_CT_STAT_ADD_ATOMIC(net, count, v) this_cpu_add((net)->ct.stat->count, (v))
diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
index cd982f4a0f50..ebb28ec5b6fa 100644
--- a/include/net/netfilter/nf_flow_table.h
+++ b/include/net/netfilter/nf_flow_table.h
@@ -57,7 +57,7 @@ struct nf_flowtable_type {
struct net_device *dev,
enum flow_block_command cmd);
int (*action)(struct net *net,
- const struct flow_offload *flow,
+ struct flow_offload *flow,
enum flow_offload_tuple_dir dir,
struct nf_flow_rule *flow_rule);
void (*free)(struct nf_flowtable *ft);
@@ -164,6 +164,8 @@ enum nf_flow_flags {
NF_FLOW_HW_DYING,
NF_FLOW_HW_DEAD,
NF_FLOW_HW_PENDING,
+ NF_FLOW_HW_BIDIRECTIONAL,
+ NF_FLOW_HW_ESTABLISHED,
};
enum flow_offload_type {
@@ -312,10 +314,10 @@ void nf_flow_table_offload_flush_cleanup(struct nf_flowtable *flowtable);
int nf_flow_table_offload_setup(struct nf_flowtable *flowtable,
struct net_device *dev,
enum flow_block_command cmd);
-int nf_flow_rule_route_ipv4(struct net *net, const struct flow_offload *flow,
+int nf_flow_rule_route_ipv4(struct net *net, struct flow_offload *flow,
enum flow_offload_tuple_dir dir,
struct nf_flow_rule *flow_rule);
-int nf_flow_rule_route_ipv6(struct net *net, const struct flow_offload *flow,
+int nf_flow_rule_route_ipv6(struct net *net, struct flow_offload *flow,
enum flow_offload_tuple_dir dir,
struct nf_flow_rule *flow_rule);
diff --git a/include/net/netfilter/nf_tables_core.h b/include/net/netfilter/nf_tables_core.h
index 3e825381ac5c..780a5f6ad4a6 100644
--- a/include/net/netfilter/nf_tables_core.h
+++ b/include/net/netfilter/nf_tables_core.h
@@ -61,6 +61,16 @@ struct nft_immediate_expr {
extern const struct nft_expr_ops nft_cmp_fast_ops;
extern const struct nft_expr_ops nft_cmp16_fast_ops;
+struct nft_ct {
+ enum nft_ct_keys key:8;
+ enum ip_conntrack_dir dir:8;
+ u8 len;
+ union {
+ u8 dreg;
+ u8 sreg;
+ };
+};
+
struct nft_payload {
enum nft_payload_bases base:8;
u8 offset;
@@ -140,6 +150,8 @@ void nft_rt_get_eval(const struct nft_expr *expr,
struct nft_regs *regs, const struct nft_pktinfo *pkt);
void nft_counter_eval(const struct nft_expr *expr, struct nft_regs *regs,
const struct nft_pktinfo *pkt);
+void nft_ct_get_fast_eval(const struct nft_expr *expr,
+ struct nft_regs *regs, const struct nft_pktinfo *pkt);
enum {
NFT_PAYLOAD_CTX_INNER_TUN = (1 << 0),
@@ -164,4 +176,8 @@ void nft_payload_inner_eval(const struct nft_expr *expr, struct nft_regs *regs,
const struct nft_pktinfo *pkt,
struct nft_inner_tun_ctx *ctx);
+void nft_objref_eval(const struct nft_expr *expr, struct nft_regs *regs,
+ const struct nft_pktinfo *pkt);
+void nft_objref_map_eval(const struct nft_expr *expr, struct nft_regs *regs,
+ const struct nft_pktinfo *pkt);
#endif /* _NET_NF_TABLES_CORE_H */
diff --git a/include/net/netfilter/nf_tables_ipv4.h b/include/net/netfilter/nf_tables_ipv4.h
index 112708f7a6b4..947973623dc7 100644
--- a/include/net/netfilter/nf_tables_ipv4.h
+++ b/include/net/netfilter/nf_tables_ipv4.h
@@ -29,7 +29,7 @@ static inline int __nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt)
if (iph->ihl < 5 || iph->version != 4)
return -1;
- len = ntohs(iph->tot_len);
+ len = iph_totlen(pkt->skb, iph);
thoff = iph->ihl * 4;
if (pkt->skb->len < len)
return -1;
@@ -64,7 +64,7 @@ static inline int nft_set_pktinfo_ipv4_ingress(struct nft_pktinfo *pkt)
if (iph->ihl < 5 || iph->version != 4)
goto inhdr_error;
- len = ntohs(iph->tot_len);
+ len = iph_totlen(pkt->skb, iph);
thoff = iph->ihl * 4;
if (pkt->skb->len < len) {
__IP_INC_STATS(nft_net(pkt), IPSTATS_MIB_INTRUNCATEDPKTS);
diff --git a/include/net/netlink.h b/include/net/netlink.h
index 6e1e670e06bc..b12cd957abb4 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -276,7 +276,8 @@ enum nla_policy_validation {
* Note that in the interest of code simplicity and
* struct size both limits are s16, so you cannot
* enforce a range that doesn't fall within the range
- * of s16 - do that as usual in the code instead.
+ * of s16 - do that using the NLA_POLICY_FULL_RANGE()
+ * or NLA_POLICY_FULL_RANGE_SIGNED() macros instead.
* Use the NLA_POLICY_MIN(), NLA_POLICY_MAX() and
* NLA_POLICY_RANGE() macros.
* NLA_U8,
diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
index e1290c159184..1f463b3957c7 100644
--- a/include/net/netns/conntrack.h
+++ b/include/net/netns/conntrack.h
@@ -95,7 +95,6 @@ struct nf_ip_net {
struct netns_ct {
#ifdef CONFIG_NF_CONNTRACK_EVENTS
- u8 ctnetlink_has_listener;
bool ecache_dwork_pending;
#endif
u8 sysctl_log_invalid; /* Log invalid packets */
diff --git a/include/net/netns/core.h b/include/net/netns/core.h
index 8249060cf5d0..a91ef9f8de60 100644
--- a/include/net/netns/core.h
+++ b/include/net/netns/core.h
@@ -6,6 +6,7 @@
struct ctl_table_header;
struct prot_inuse;
+struct cpumask;
struct netns_core {
/* core sysctls */
@@ -17,6 +18,10 @@ struct netns_core {
#ifdef CONFIG_PROC_FS
struct prot_inuse __percpu *prot_inuse;
#endif
+
+#if IS_ENABLED(CONFIG_RPS) && IS_ENABLED(CONFIG_SYSCTL)
+ struct cpumask *rps_default_mask;
+#endif
};
#endif
diff --git a/include/net/nl802154.h b/include/net/nl802154.h
index b79a89d5207c..8cd9d141f5af 100644
--- a/include/net/nl802154.h
+++ b/include/net/nl802154.h
@@ -73,6 +73,11 @@ enum nl802154_commands {
NL802154_CMD_DEL_SEC_LEVEL,
NL802154_CMD_SCAN_EVENT,
+ NL802154_CMD_TRIGGER_SCAN,
+ NL802154_CMD_ABORT_SCAN,
+ NL802154_CMD_SCAN_DONE,
+ NL802154_CMD_SEND_BEACONS,
+ NL802154_CMD_STOP_BEACONS,
/* add new commands above here */
@@ -134,6 +139,14 @@ enum nl802154_attrs {
NL802154_ATTR_NETNS_FD,
NL802154_ATTR_COORDINATOR,
+ NL802154_ATTR_SCAN_TYPE,
+ NL802154_ATTR_SCAN_FLAGS,
+ NL802154_ATTR_SCAN_CHANNELS,
+ NL802154_ATTR_SCAN_PREAMBLE_CODES,
+ NL802154_ATTR_SCAN_MEAN_PRF,
+ NL802154_ATTR_SCAN_DURATION,
+ NL802154_ATTR_SCAN_DONE_REASON,
+ NL802154_ATTR_BEACON_INTERVAL,
/* add attributes here, update the policy in nl802154.c */
@@ -260,6 +273,54 @@ enum nl802154_coord {
};
/**
+ * enum nl802154_scan_types - Scan types
+ *
+ * @__NL802154_SCAN_INVALID: scan type number 0 is reserved
+ * @NL802154_SCAN_ED: An ED scan allows a device to obtain a measure of the peak
+ * energy in each requested channel
+ * @NL802154_SCAN_ACTIVE: Locate any coordinator transmitting Beacon frames using
+ * a Beacon Request command
+ * @NL802154_SCAN_PASSIVE: Locate any coordinator transmitting Beacon frames
+ * @NL802154_SCAN_ORPHAN: Relocate coordinator following a loss of synchronisation
+ * @NL802154_SCAN_ENHANCED_ACTIVE: Same as Active using Enhanced Beacon Request
+ * command instead of Beacon Request command
+ * @NL802154_SCAN_RIT_PASSIVE: Passive scan for RIT Data Request command frames
+ * instead of Beacon frames
+ * @NL802154_SCAN_ATTR_MAX: Maximum SCAN attribute number
+ */
+enum nl802154_scan_types {
+ __NL802154_SCAN_INVALID,
+ NL802154_SCAN_ED,
+ NL802154_SCAN_ACTIVE,
+ NL802154_SCAN_PASSIVE,
+ NL802154_SCAN_ORPHAN,
+ NL802154_SCAN_ENHANCED_ACTIVE,
+ NL802154_SCAN_RIT_PASSIVE,
+
+ /* keep last */
+ NL802154_SCAN_ATTR_MAX,
+};
+
+/**
+ * enum nl802154_scan_done_reasons - End of scan reasons
+ *
+ * @__NL802154_SCAN_DONE_REASON_INVALID: scan done reason number 0 is reserved.
+ * @NL802154_SCAN_DONE_REASON_FINISHED: The scan just finished naturally after
+ * going through all the requested and possible (complex) channels.
+ * @NL802154_SCAN_DONE_REASON_ABORTED: The scan was aborted upon user request.
+ * a Beacon Request command
+ * @NL802154_SCAN_DONE_REASON_MAX: Maximum scan done reason attribute number.
+ */
+enum nl802154_scan_done_reasons {
+ __NL802154_SCAN_DONE_REASON_INVALID,
+ NL802154_SCAN_DONE_REASON_FINISHED,
+ NL802154_SCAN_DONE_REASON_ABORTED,
+
+ /* keep last */
+ NL802154_SCAN_DONE_REASON_MAX,
+};
+
+/**
* enum nl802154_cca_modes - cca modes
*
* @__NL802154_CCA_INVALID: cca mode number 0 is reserved
diff --git a/include/net/page_pool.h b/include/net/page_pool.h
index 813c93499f20..ddfa0b328677 100644
--- a/include/net/page_pool.h
+++ b/include/net/page_pool.h
@@ -277,6 +277,16 @@ void page_pool_put_defragged_page(struct page_pool *pool, struct page *page,
unsigned int dma_sync_size,
bool allow_direct);
+/* pp_frag_count represents the number of writers who can update the page
+ * either by updating skb->data or via DMA mappings for the device.
+ * We can't rely on the page refcnt for that as we don't know who might be
+ * holding page references and we can't reliably destroy or sync DMA mappings
+ * of the fragments.
+ *
+ * When pp_frag_count reaches 0 we can either recycle the page if the page
+ * refcnt is 1 or return it back to the memory allocator and destroy any
+ * mappings we have.
+ */
static inline void page_pool_fragment_page(struct page *page, long nr)
{
atomic_long_set(&page->pp_frag_count, nr);
@@ -386,7 +396,7 @@ static inline void page_pool_nid_changed(struct page_pool *pool, int new_nid)
static inline void page_pool_ring_lock(struct page_pool *pool)
__acquires(&pool->ring.producer_lock)
{
- if (in_serving_softirq())
+ if (in_softirq())
spin_lock(&pool->ring.producer_lock);
else
spin_lock_bh(&pool->ring.producer_lock);
@@ -395,7 +405,7 @@ static inline void page_pool_ring_lock(struct page_pool *pool)
static inline void page_pool_ring_unlock(struct page_pool *pool)
__releases(&pool->ring.producer_lock)
{
- if (in_serving_softirq())
+ if (in_softirq())
spin_unlock(&pool->ring.producer_lock);
else
spin_unlock_bh(&pool->ring.producer_lock);
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index 4cabb32a2ad9..b3b5b0b62f16 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -59,6 +59,8 @@ int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
void tcf_block_put(struct tcf_block *block);
void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
struct tcf_block_ext_info *ei);
+int tcf_exts_init_ex(struct tcf_exts *exts, struct net *net, int action,
+ int police, struct tcf_proto *tp, u32 handle, bool used_action_miss);
static inline bool tcf_block_shared(struct tcf_block *block)
{
@@ -229,6 +231,7 @@ struct tcf_exts {
struct tc_action **actions;
struct net *net;
netns_tracker ns_tracker;
+ struct tcf_exts_miss_cookie_node *miss_cookie_node;
#endif
/* Map to export classifier specific extension TLV types to the
* generic extensions API. Unsupported extensions must be set to 0.
@@ -240,21 +243,11 @@ struct tcf_exts {
static inline int tcf_exts_init(struct tcf_exts *exts, struct net *net,
int action, int police)
{
-#ifdef CONFIG_NET_CLS_ACT
- exts->type = 0;
- exts->nr_actions = 0;
- /* Note: we do not own yet a reference on net.
- * This reference might be taken later from tcf_exts_get_net().
- */
- exts->net = net;
- exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *),
- GFP_KERNEL);
- if (!exts->actions)
- return -ENOMEM;
+#ifdef CONFIG_NET_CLS
+ return tcf_exts_init_ex(exts, net, action, police, NULL, 0, false);
+#else
+ return -EOPNOTSUPP;
#endif
- exts->action = action;
- exts->police = police;
- return 0;
}
/* Return false if the netns is being destroyed in cleanup_net(). Callers
@@ -292,10 +285,15 @@ static inline void tcf_exts_put_net(struct tcf_exts *exts)
#define tcf_act_for_each_action(i, a, actions) \
for (i = 0; i < TCA_ACT_MAX_PRIO && ((a) = actions[i]); i++)
+static inline bool tc_act_in_hw(struct tc_action *act)
+{
+ return !!act->in_hw_count;
+}
+
static inline void
tcf_exts_hw_stats_update(const struct tcf_exts *exts,
- u64 bytes, u64 packets, u64 drops, u64 lastuse,
- u8 used_hw_stats, bool used_hw_stats_valid)
+ struct flow_stats *stats,
+ bool use_act_stats)
{
#ifdef CONFIG_NET_CLS_ACT
int i;
@@ -303,16 +301,18 @@ tcf_exts_hw_stats_update(const struct tcf_exts *exts,
for (i = 0; i < exts->nr_actions; i++) {
struct tc_action *a = exts->actions[i];
- /* if stats from hw, just skip */
- if (tcf_action_update_hw_stats(a)) {
- preempt_disable();
- tcf_action_stats_update(a, bytes, packets, drops,
- lastuse, true);
- preempt_enable();
-
- a->used_hw_stats = used_hw_stats;
- a->used_hw_stats_valid = used_hw_stats_valid;
+ if (use_act_stats || tc_act_in_hw(a)) {
+ if (!tcf_action_update_hw_stats(a))
+ continue;
}
+
+ preempt_disable();
+ tcf_action_stats_update(a, stats->bytes, stats->pkts, stats->drops,
+ stats->lastused, true);
+ preempt_enable();
+
+ a->used_hw_stats = stats->used_hw_stats;
+ a->used_hw_stats_valid = stats->used_hw_stats_valid;
}
#endif
}
@@ -353,6 +353,18 @@ tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts,
return TC_ACT_OK;
}
+static inline int
+tcf_exts_exec_ex(struct sk_buff *skb, struct tcf_exts *exts, int act_index,
+ struct tcf_result *res)
+{
+#ifdef CONFIG_NET_CLS_ACT
+ return tcf_action_exec(skb, exts->actions + act_index,
+ exts->nr_actions - act_index, res);
+#else
+ return TC_ACT_OK;
+#endif
+}
+
int tcf_exts_validate(struct net *net, struct tcf_proto *tp,
struct nlattr **tb, struct nlattr *rate_tlv,
struct tcf_exts *exts, u32 flags,
@@ -577,6 +589,7 @@ int tc_setup_offload_action(struct flow_action *flow_action,
void tc_cleanup_offload_action(struct flow_action *flow_action);
int tc_setup_action(struct flow_action *flow_action,
struct tc_action *actions[],
+ u32 miss_cookie_base,
struct netlink_ext_ack *extack);
int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
@@ -770,6 +783,7 @@ struct tc_cls_matchall_offload {
enum tc_matchall_command command;
struct flow_rule *rule;
struct flow_stats stats;
+ bool use_act_stats;
unsigned long cookie;
};
@@ -788,16 +802,6 @@ struct tc_cls_bpf_offload {
bool exts_integrated;
};
-struct tc_mqprio_qopt_offload {
- /* struct tc_mqprio_qopt must always be the first element */
- struct tc_mqprio_qopt qopt;
- u16 mode;
- u16 shaper;
- u32 flags;
- u64 min_rate[TC_QOPT_MAX_QUEUE];
- u64 max_rate[TC_QOPT_MAX_QUEUE];
-};
-
/* This structure holds cookie structure that is passed from user
* to the kernel for actions and classifiers
*/
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index 38207873eda6..2016839991a4 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -160,8 +160,28 @@ struct tc_etf_qopt_offload {
s32 queue;
};
+struct tc_mqprio_caps {
+ bool validate_queue_counts:1;
+};
+
+struct tc_mqprio_qopt_offload {
+ /* struct tc_mqprio_qopt must always be the first element */
+ struct tc_mqprio_qopt qopt;
+ u16 mode;
+ u16 shaper;
+ u32 flags;
+ u64 min_rate[TC_QOPT_MAX_QUEUE];
+ u64 max_rate[TC_QOPT_MAX_QUEUE];
+};
+
struct tc_taprio_caps {
bool supports_queue_max_sdu:1;
+ bool gate_mask_per_txq:1;
+ /* Device expects lower TXQ numbers to have higher priority over higher
+ * TXQs, regardless of their TC mapping. DO NOT USE FOR NEW DRIVERS,
+ * INSTEAD ENFORCE A PROPER TC:TXQ MAPPING COMING FROM USER SPACE.
+ */
+ bool broken_mqprio:1;
};
struct tc_taprio_sched_entry {
@@ -173,6 +193,7 @@ struct tc_taprio_sched_entry {
};
struct tc_taprio_qopt_offload {
+ struct tc_mqprio_qopt_offload mqprio;
u8 enable;
ktime_t base_time;
u64 cycle_time;
diff --git a/include/net/raw.h b/include/net/raw.h
index 5e665934ebc7..2c004c20ed99 100644
--- a/include/net/raw.h
+++ b/include/net/raw.h
@@ -15,6 +15,8 @@
#include <net/inet_sock.h>
#include <net/protocol.h>
+#include <net/netns/hash.h>
+#include <linux/hash.h>
#include <linux/icmp.h>
extern struct proto raw_prot;
@@ -29,13 +31,20 @@ int raw_local_deliver(struct sk_buff *, int);
int raw_rcv(struct sock *, struct sk_buff *);
-#define RAW_HTABLE_SIZE MAX_INET_PROTOS
+#define RAW_HTABLE_LOG 8
+#define RAW_HTABLE_SIZE (1U << RAW_HTABLE_LOG)
struct raw_hashinfo {
spinlock_t lock;
- struct hlist_nulls_head ht[RAW_HTABLE_SIZE];
+
+ struct hlist_nulls_head ht[RAW_HTABLE_SIZE] ____cacheline_aligned;
};
+static inline u32 raw_hashfunc(const struct net *net, u32 proto)
+{
+ return hash_32(net_hash_mix(net) ^ proto, RAW_HTABLE_LOG);
+}
+
static inline void raw_hashinfo_init(struct raw_hashinfo *hashinfo)
{
int i;
diff --git a/include/net/route.h b/include/net/route.h
index 6e92dd5bcd61..fe00b0a2e475 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -35,9 +35,6 @@
#include <linux/cache.h>
#include <linux/security.h>
-/* IPv4 datagram length is stored into 16bit field (tot_len) */
-#define IP_MAX_MTU 0xFFFFU
-
#define RTO_ONLINK 0x01
#define RT_CONN_FLAGS(sk) (RT_TOS(inet_sk(sk)->tos) | sock_flag(sk, SOCK_LOCALROUTE))
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index af4aa66aaa4e..fab5ba3e61b7 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -369,6 +369,8 @@ struct tcf_proto_ops {
struct nlattr **tca,
struct netlink_ext_ack *extack);
void (*tmplt_destroy)(void *tmplt_priv);
+ struct tcf_exts * (*get_exts)(const struct tcf_proto *tp,
+ u32 handle);
/* rtnetlink specific */
int (*dump)(struct net*, struct tcf_proto*, void *,
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index afa3781e3ca2..e1f6e7fc2b11 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -1412,6 +1412,7 @@ struct sctp_stream_priorities {
/* The next stream in line */
struct sctp_stream_out_ext *next;
__u16 prio;
+ __u16 users;
};
struct sctp_stream_out_ext {
diff --git a/include/net/smc.h b/include/net/smc.h
index c926d3313e05..597cb9381182 100644
--- a/include/net/smc.h
+++ b/include/net/smc.h
@@ -15,6 +15,7 @@
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/wait.h>
+#include "linux/ism.h"
struct sock;
@@ -48,20 +49,14 @@ struct smcd_dmb {
#define ISM_ERROR 0xFFFF
-struct smcd_event {
- u32 type;
- u32 code;
- u64 tok;
- u64 time;
- u64 info;
-};
-
struct smcd_dev;
+struct ism_client;
struct smcd_ops {
int (*query_remote_gid)(struct smcd_dev *dev, u64 rgid, u32 vid_valid,
u32 vid);
- int (*register_dmb)(struct smcd_dev *dev, struct smcd_dmb *dmb);
+ int (*register_dmb)(struct smcd_dev *dev, struct smcd_dmb *dmb,
+ struct ism_client *client);
int (*unregister_dmb)(struct smcd_dev *dev, struct smcd_dmb *dmb);
int (*add_vlan_id)(struct smcd_dev *dev, u64 vlan_id);
int (*del_vlan_id)(struct smcd_dev *dev, u64 vlan_id);
@@ -73,14 +68,14 @@ struct smcd_ops {
bool sf, unsigned int offset, void *data,
unsigned int size);
u8* (*get_system_eid)(void);
+ u64 (*get_local_gid)(struct smcd_dev *dev);
u16 (*get_chid)(struct smcd_dev *dev);
+ struct device* (*get_dev)(struct smcd_dev *dev);
};
struct smcd_dev {
const struct smcd_ops *ops;
- struct device dev;
void *priv;
- u64 local_gid;
struct list_head list;
spinlock_t lock;
struct smc_connection **conn;
@@ -95,11 +90,4 @@ struct smcd_dev {
u8 going_away : 1;
};
-struct smcd_dev *smcd_alloc_dev(struct device *parent, const char *name,
- const struct smcd_ops *ops, int max_dmbs);
-int smcd_register_dev(struct smcd_dev *smcd);
-void smcd_unregister_dev(struct smcd_dev *smcd);
-void smcd_free_dev(struct smcd_dev *smcd);
-void smcd_handle_event(struct smcd_dev *dev, struct smcd_event *event);
-void smcd_handle_irq(struct smcd_dev *dev, unsigned int bit, u16 dmbemask);
#endif /* _SMC_H */
diff --git a/include/net/sock.h b/include/net/sock.h
index 556209727633..573f2bf7e0de 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1349,9 +1349,6 @@ struct proto {
char name[32];
struct list_head node;
-#ifdef SOCK_REFCNT_DEBUG
- atomic_t socks;
-#endif
int (*diag_destroy)(struct sock *sk, int err);
} __randomize_layout;
@@ -1359,31 +1356,6 @@ int proto_register(struct proto *prot, int alloc_slab);
void proto_unregister(struct proto *prot);
int sock_load_diag_module(int family, int protocol);
-#ifdef SOCK_REFCNT_DEBUG
-static inline void sk_refcnt_debug_inc(struct sock *sk)
-{
- atomic_inc(&sk->sk_prot->socks);
-}
-
-static inline void sk_refcnt_debug_dec(struct sock *sk)
-{
- atomic_dec(&sk->sk_prot->socks);
- printk(KERN_DEBUG "%s socket %p released, %d are still alive\n",
- sk->sk_prot->name, sk, atomic_read(&sk->sk_prot->socks));
-}
-
-static inline void sk_refcnt_debug_release(const struct sock *sk)
-{
- if (refcount_read(&sk->sk_refcnt) != 1)
- printk(KERN_DEBUG "Destruction of the %s socket %p delayed, refcnt=%d\n",
- sk->sk_prot->name, sk, refcount_read(&sk->sk_refcnt));
-}
-#else /* SOCK_REFCNT_DEBUG */
-#define sk_refcnt_debug_inc(sk) do { } while (0)
-#define sk_refcnt_debug_dec(sk) do { } while (0)
-#define sk_refcnt_debug_release(sk) do { } while (0)
-#endif /* SOCK_REFCNT_DEBUG */
-
INDIRECT_CALLABLE_DECLARE(bool tcp_stream_memory_free(const struct sock *sk, int wake));
static inline int sk_forward_alloc_get(const struct sock *sk)
@@ -1956,7 +1928,12 @@ void sk_common_release(struct sock *sk);
* Default socket callbacks and setup code
*/
-/* Initialise core socket variables */
+/* Initialise core socket variables using an explicit uid. */
+void sock_init_data_uid(struct socket *sock, struct sock *sk, kuid_t uid);
+
+/* Initialise core socket variables.
+ * Assumes struct socket *sock is embedded in a struct socket_alloc.
+ */
void sock_init_data(struct socket *sock, struct sock *sk);
/*
diff --git a/include/net/tc_act/tc_connmark.h b/include/net/tc_act/tc_connmark.h
index 1f4cb477bb5d..e8dd77a96748 100644
--- a/include/net/tc_act/tc_connmark.h
+++ b/include/net/tc_act/tc_connmark.h
@@ -4,10 +4,15 @@
#include <net/act_api.h>
-struct tcf_connmark_info {
- struct tc_action common;
+struct tcf_connmark_parms {
struct net *net;
u16 zone;
+ struct rcu_head rcu;
+};
+
+struct tcf_connmark_info {
+ struct tc_action common;
+ struct tcf_connmark_parms __rcu *parms;
};
#define to_connmark(a) ((struct tcf_connmark_info *)a)
diff --git a/include/net/tc_act/tc_nat.h b/include/net/tc_act/tc_nat.h
index c14407160812..c869274ac529 100644
--- a/include/net/tc_act/tc_nat.h
+++ b/include/net/tc_act/tc_nat.h
@@ -5,13 +5,17 @@
#include <linux/types.h>
#include <net/act_api.h>
-struct tcf_nat {
- struct tc_action common;
-
+struct tcf_nat_parms {
__be32 old_addr;
__be32 new_addr;
__be32 mask;
u32 flags;
+ struct rcu_head rcu;
+};
+
+struct tcf_nat {
+ struct tc_action common;
+ struct tcf_nat_parms __rcu *parms;
};
#define to_tcf_nat(a) ((struct tcf_nat *)a)
diff --git a/include/net/tc_act/tc_pedit.h b/include/net/tc_act/tc_pedit.h
index 3e02709a1df6..83fe39931781 100644
--- a/include/net/tc_act/tc_pedit.h
+++ b/include/net/tc_act/tc_pedit.h
@@ -4,22 +4,29 @@
#include <net/act_api.h>
#include <linux/tc_act/tc_pedit.h>
+#include <linux/types.h>
struct tcf_pedit_key_ex {
enum pedit_header_type htype;
enum pedit_cmd cmd;
};
-struct tcf_pedit {
- struct tc_action common;
- unsigned char tcfp_nkeys;
- unsigned char tcfp_flags;
- u32 tcfp_off_max_hint;
+struct tcf_pedit_parms {
struct tc_pedit_key *tcfp_keys;
struct tcf_pedit_key_ex *tcfp_keys_ex;
+ u32 tcfp_off_max_hint;
+ unsigned char tcfp_nkeys;
+ unsigned char tcfp_flags;
+ struct rcu_head rcu;
+};
+
+struct tcf_pedit {
+ struct tc_action common;
+ struct tcf_pedit_parms __rcu *parms;
};
#define to_pedit(a) ((struct tcf_pedit *)a)
+#define to_pedit_parms(a) (rcu_dereference(to_pedit(a)->parms))
static inline bool is_tcf_pedit(const struct tc_action *a)
{
@@ -32,37 +39,81 @@ static inline bool is_tcf_pedit(const struct tc_action *a)
static inline int tcf_pedit_nkeys(const struct tc_action *a)
{
- return to_pedit(a)->tcfp_nkeys;
+ struct tcf_pedit_parms *parms;
+ int nkeys;
+
+ rcu_read_lock();
+ parms = to_pedit_parms(a);
+ nkeys = parms->tcfp_nkeys;
+ rcu_read_unlock();
+
+ return nkeys;
}
static inline u32 tcf_pedit_htype(const struct tc_action *a, int index)
{
- if (to_pedit(a)->tcfp_keys_ex)
- return to_pedit(a)->tcfp_keys_ex[index].htype;
+ u32 htype = TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK;
+ struct tcf_pedit_parms *parms;
+
+ rcu_read_lock();
+ parms = to_pedit_parms(a);
+ if (parms->tcfp_keys_ex)
+ htype = parms->tcfp_keys_ex[index].htype;
+ rcu_read_unlock();
- return TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK;
+ return htype;
}
static inline u32 tcf_pedit_cmd(const struct tc_action *a, int index)
{
- if (to_pedit(a)->tcfp_keys_ex)
- return to_pedit(a)->tcfp_keys_ex[index].cmd;
+ struct tcf_pedit_parms *parms;
+ u32 cmd = __PEDIT_CMD_MAX;
- return __PEDIT_CMD_MAX;
+ rcu_read_lock();
+ parms = to_pedit_parms(a);
+ if (parms->tcfp_keys_ex)
+ cmd = parms->tcfp_keys_ex[index].cmd;
+ rcu_read_unlock();
+
+ return cmd;
}
static inline u32 tcf_pedit_mask(const struct tc_action *a, int index)
{
- return to_pedit(a)->tcfp_keys[index].mask;
+ struct tcf_pedit_parms *parms;
+ u32 mask;
+
+ rcu_read_lock();
+ parms = to_pedit_parms(a);
+ mask = parms->tcfp_keys[index].mask;
+ rcu_read_unlock();
+
+ return mask;
}
static inline u32 tcf_pedit_val(const struct tc_action *a, int index)
{
- return to_pedit(a)->tcfp_keys[index].val;
+ struct tcf_pedit_parms *parms;
+ u32 val;
+
+ rcu_read_lock();
+ parms = to_pedit_parms(a);
+ val = parms->tcfp_keys[index].val;
+ rcu_read_unlock();
+
+ return val;
}
static inline u32 tcf_pedit_offset(const struct tc_action *a, int index)
{
- return to_pedit(a)->tcfp_keys[index].off;
+ struct tcf_pedit_parms *parms;
+ u32 off;
+
+ rcu_read_lock();
+ parms = to_pedit_parms(a);
+ off = parms->tcfp_keys[index].off;
+ rcu_read_unlock();
+
+ return off;
}
#endif /* __NET_TC_PED_H */
diff --git a/include/net/tc_wrapper.h b/include/net/tc_wrapper.h
index d323fffb839a..a6d481b5bcbc 100644
--- a/include/net/tc_wrapper.h
+++ b/include/net/tc_wrapper.h
@@ -152,9 +152,6 @@ TC_INDIRECT_FILTER_DECLARE(flow_classify);
TC_INDIRECT_FILTER_DECLARE(fw_classify);
TC_INDIRECT_FILTER_DECLARE(mall_classify);
TC_INDIRECT_FILTER_DECLARE(route4_classify);
-TC_INDIRECT_FILTER_DECLARE(rsvp_classify);
-TC_INDIRECT_FILTER_DECLARE(rsvp6_classify);
-TC_INDIRECT_FILTER_DECLARE(tcindex_classify);
TC_INDIRECT_FILTER_DECLARE(u32_classify);
static inline int tc_classify(struct sk_buff *skb, const struct tcf_proto *tp,
@@ -199,18 +196,6 @@ static inline int tc_classify(struct sk_buff *skb, const struct tcf_proto *tp,
if (tp->classify == route4_classify)
return route4_classify(skb, tp, res);
#endif
-#if IS_BUILTIN(CONFIG_NET_CLS_RSVP)
- if (tp->classify == rsvp_classify)
- return rsvp_classify(skb, tp, res);
-#endif
-#if IS_BUILTIN(CONFIG_NET_CLS_RSVP6)
- if (tp->classify == rsvp6_classify)
- return rsvp6_classify(skb, tp, res);
-#endif
-#if IS_BUILTIN(CONFIG_NET_CLS_TCINDEX)
- if (tp->classify == tcindex_classify)
- return tcindex_classify(skb, tp, res);
-#endif
skip:
return tp->classify(skb, tp, res);
diff --git a/include/net/xdp.h b/include/net/xdp.h
index 55dbc68bfffc..d517bfac937b 100644
--- a/include/net/xdp.h
+++ b/include/net/xdp.h
@@ -7,6 +7,7 @@
#define __LINUX_NET_XDP_H__
#include <linux/skbuff.h> /* skb_shared_info */
+#include <uapi/linux/netdev.h>
/**
* DOC: XDP RX-queue information
@@ -43,6 +44,8 @@ enum xdp_mem_type {
MEM_TYPE_MAX,
};
+typedef u32 xdp_features_t;
+
/* XDP flags for ndo_xdp_xmit */
#define XDP_XMIT_FLUSH (1U << 0) /* doorbell signal consumer */
#define XDP_XMIT_FLAGS_MASK XDP_XMIT_FLUSH
@@ -409,4 +412,37 @@ void xdp_attachment_setup(struct xdp_attachment_info *info,
#define DEV_MAP_BULK_SIZE XDP_BULK_QUEUE_SIZE
+#define XDP_METADATA_KFUNC_xxx \
+ XDP_METADATA_KFUNC(XDP_METADATA_KFUNC_RX_TIMESTAMP, \
+ bpf_xdp_metadata_rx_timestamp) \
+ XDP_METADATA_KFUNC(XDP_METADATA_KFUNC_RX_HASH, \
+ bpf_xdp_metadata_rx_hash) \
+
+enum {
+#define XDP_METADATA_KFUNC(name, _) name,
+XDP_METADATA_KFUNC_xxx
+#undef XDP_METADATA_KFUNC
+MAX_XDP_METADATA_KFUNC,
+};
+
+#ifdef CONFIG_NET
+u32 bpf_xdp_metadata_kfunc_id(int id);
+bool bpf_dev_bound_kfunc_id(u32 btf_id);
+void xdp_features_set_redirect_target(struct net_device *dev, bool support_sg);
+void xdp_features_clear_redirect_target(struct net_device *dev);
+#else
+static inline u32 bpf_xdp_metadata_kfunc_id(int id) { return 0; }
+static inline bool bpf_dev_bound_kfunc_id(u32 btf_id) { return false; }
+
+static inline void
+xdp_features_set_redirect_target(struct net_device *dev, bool support_sg)
+{
+}
+
+static inline void
+xdp_features_clear_redirect_target(struct net_device *dev)
+{
+}
+#endif
+
#endif /* __LINUX_NET_XDP_H__ */
diff --git a/include/net/xsk_buff_pool.h b/include/net/xsk_buff_pool.h
index f787c3f524b0..3e952e569418 100644
--- a/include/net/xsk_buff_pool.h
+++ b/include/net/xsk_buff_pool.h
@@ -19,8 +19,11 @@ struct xdp_sock;
struct device;
struct page;
+#define XSK_PRIV_MAX 24
+
struct xdp_buff_xsk {
struct xdp_buff xdp;
+ u8 cb[XSK_PRIV_MAX];
dma_addr_t dma;
dma_addr_t frame_dma;
struct xsk_buff_pool *pool;
@@ -28,6 +31,8 @@ struct xdp_buff_xsk {
struct list_head free_list_node;
};
+#define XSK_CHECK_PRIV_TYPE(t) BUILD_BUG_ON(sizeof(t) > offsetofend(struct xdp_buff_xsk, cb))
+
struct xsk_dma_map {
dma_addr_t *dma_pages;
struct device *dev;
diff --git a/include/rdma/ib_sa.h b/include/rdma/ib_sa.h
index e930bec33b31..b46353fc53bf 100644
--- a/include/rdma/ib_sa.h
+++ b/include/rdma/ib_sa.h
@@ -414,7 +414,7 @@ int ib_sa_path_rec_get(struct ib_sa_client *client, struct ib_device *device,
ib_sa_comp_mask comp_mask, unsigned long timeout_ms,
gfp_t gfp_mask,
void (*callback)(int status, struct sa_path_rec *resp,
- int num_prs, void *context),
+ unsigned int num_prs, void *context),
void *context, struct ib_sa_query **query);
struct ib_sa_multicast {
diff --git a/include/rdma/ib_umem.h b/include/rdma/ib_umem.h
index 92a673cd9b4f..95896472a82b 100644
--- a/include/rdma/ib_umem.h
+++ b/include/rdma/ib_umem.h
@@ -25,7 +25,6 @@ struct ib_umem {
u32 writable : 1;
u32 is_odp : 1;
u32 is_dmabuf : 1;
- struct work_struct work;
struct sg_append_table sgt_append;
};
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index a9a429172c0a..949cf4ffc536 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -1168,7 +1168,7 @@ enum ib_qp_create_flags {
*/
struct ib_qp_init_attr {
- /* Consumer's event_handler callback must not block */
+ /* This callback occurs in workqueue context */
void (*event_handler)(struct ib_event *, void *);
void *qp_context;
diff --git a/include/rdma/rdma_cm.h b/include/rdma/rdma_cm.h
index cdc7cafab572..8a8ab2f793ab 100644
--- a/include/rdma/rdma_cm.h
+++ b/include/rdma/rdma_cm.h
@@ -49,7 +49,6 @@ struct rdma_addr {
struct rdma_dev_addr dev_addr;
};
-#define RDMA_PRIMARY_PATH_MAX_REC_NUM 3
struct rdma_route {
struct rdma_addr addr;
struct sa_path_rec *path_rec;
diff --git a/include/rdma/restrack.h b/include/rdma/restrack.h
index 79d109c47242..8b7c46daeb07 100644
--- a/include/rdma/restrack.h
+++ b/include/rdma/restrack.h
@@ -162,8 +162,8 @@ struct rdma_restrack_entry *rdma_restrack_get_byid(struct ib_device *dev,
* rdma_restrack_no_track() - don't add resource to the DB
* @res: resource entry
*
- * Every user of thie API should be cross examined.
- * Probaby you don't need to use this function.
+ * Every user of this API should be cross examined.
+ * Probably you don't need to use this function.
*/
static inline void rdma_restrack_no_track(struct rdma_restrack_entry *res)
{
diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h
index 1aee3d0ebbb2..159823e0afbf 100644
--- a/include/scsi/libsas.h
+++ b/include/scsi/libsas.h
@@ -735,7 +735,6 @@ void sas_unregister_domain_devices(struct asd_sas_port *port, int gone);
void sas_init_disc(struct sas_discovery *disc, struct asd_sas_port *);
void sas_discover_event(struct asd_sas_port *, enum discover_event ev);
-int sas_discover_sata(struct domain_device *);
int sas_discover_end_dev(struct domain_device *);
void sas_unregister_dev(struct asd_sas_port *port, struct domain_device *);
diff --git a/include/scsi/sas_ata.h b/include/scsi/sas_ata.h
index 9c927d46f136..2f8c719840a6 100644
--- a/include/scsi/sas_ata.h
+++ b/include/scsi/sas_ata.h
@@ -36,8 +36,15 @@ void sas_ata_device_link_abort(struct domain_device *dev, bool force_reset);
int sas_execute_ata_cmd(struct domain_device *device, u8 *fis,
int force_phy_id);
int smp_ata_check_ready_type(struct ata_link *link);
+int sas_discover_sata(struct domain_device *dev);
+int sas_ata_add_dev(struct domain_device *parent, struct ex_phy *phy,
+ struct domain_device *child, int phy_id);
#else
+static inline void sas_ata_disabled_notice(void)
+{
+ pr_notice_once("ATA device seen but CONFIG_SCSI_SAS_ATA=N\n");
+}
static inline int dev_is_sata(struct domain_device *dev)
{
@@ -103,6 +110,19 @@ static inline int smp_ata_check_ready_type(struct ata_link *link)
{
return 0;
}
+
+static inline int sas_discover_sata(struct domain_device *dev)
+{
+ sas_ata_disabled_notice();
+ return -ENXIO;
+}
+
+static inline int sas_ata_add_dev(struct domain_device *parent, struct ex_phy *phy,
+ struct domain_device *child, int phy_id)
+{
+ sas_ata_disabled_notice();
+ return -ENODEV;
+}
#endif
#endif /* _SAS_ATA_H_ */
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index 3642b8e3928b..7e95ec45138f 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -455,29 +455,21 @@ extern const char *scsi_device_state_name(enum scsi_device_state);
extern int scsi_is_sdev_device(const struct device *);
extern int scsi_is_target_device(const struct device *);
extern void scsi_sanitize_inquiry_string(unsigned char *s, int len);
-extern int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
- int data_direction, void *buffer, unsigned bufflen,
- unsigned char *sense, struct scsi_sense_hdr *sshdr,
- int timeout, int retries, blk_opf_t flags,
- req_flags_t rq_flags, int *resid);
-/* Make sure any sense buffer is the correct size. */
-#define scsi_execute(sdev, cmd, data_direction, buffer, bufflen, sense, \
- sshdr, timeout, retries, flags, rq_flags, resid) \
-({ \
- BUILD_BUG_ON((sense) != NULL && \
- sizeof(sense) != SCSI_SENSE_BUFFERSIZE); \
- __scsi_execute(sdev, cmd, data_direction, buffer, bufflen, \
- sense, sshdr, timeout, retries, flags, rq_flags, \
- resid); \
-})
-static inline int scsi_execute_req(struct scsi_device *sdev,
- const unsigned char *cmd, int data_direction, void *buffer,
- unsigned bufflen, struct scsi_sense_hdr *sshdr, int timeout,
- int retries, int *resid)
-{
- return scsi_execute(sdev, cmd, data_direction, buffer,
- bufflen, NULL, sshdr, timeout, retries, 0, 0, resid);
-}
+
+/* Optional arguments to scsi_execute_cmd */
+struct scsi_exec_args {
+ unsigned char *sense; /* sense buffer */
+ unsigned int sense_len; /* sense buffer len */
+ struct scsi_sense_hdr *sshdr; /* decoded sense header */
+ blk_mq_req_flags_t req_flags; /* BLK_MQ_REQ flags */
+ int *resid; /* residual length */
+};
+
+int scsi_execute_cmd(struct scsi_device *sdev, const unsigned char *cmd,
+ blk_opf_t opf, void *buffer, unsigned int bufflen,
+ int timeout, int retries,
+ const struct scsi_exec_args *args);
+
extern void sdev_disable_disk_events(struct scsi_device *sdev);
extern void sdev_enable_disk_events(struct scsi_device *sdev);
extern int scsi_vpd_lun_id(struct scsi_device *, char *, size_t);
diff --git a/include/soc/mscc/ocelot.h b/include/soc/mscc/ocelot.h
index df62be80a193..2080879e4134 100644
--- a/include/soc/mscc/ocelot.h
+++ b/include/soc/mscc/ocelot.h
@@ -362,6 +362,29 @@ enum ocelot_reg {
SYS_COUNT_RX_GREEN_PRIO_5,
SYS_COUNT_RX_GREEN_PRIO_6,
SYS_COUNT_RX_GREEN_PRIO_7,
+ SYS_COUNT_RX_ASSEMBLY_ERRS,
+ SYS_COUNT_RX_SMD_ERRS,
+ SYS_COUNT_RX_ASSEMBLY_OK,
+ SYS_COUNT_RX_MERGE_FRAGMENTS,
+ SYS_COUNT_RX_PMAC_OCTETS,
+ SYS_COUNT_RX_PMAC_UNICAST,
+ SYS_COUNT_RX_PMAC_MULTICAST,
+ SYS_COUNT_RX_PMAC_BROADCAST,
+ SYS_COUNT_RX_PMAC_SHORTS,
+ SYS_COUNT_RX_PMAC_FRAGMENTS,
+ SYS_COUNT_RX_PMAC_JABBERS,
+ SYS_COUNT_RX_PMAC_CRC_ALIGN_ERRS,
+ SYS_COUNT_RX_PMAC_SYM_ERRS,
+ SYS_COUNT_RX_PMAC_64,
+ SYS_COUNT_RX_PMAC_65_127,
+ SYS_COUNT_RX_PMAC_128_255,
+ SYS_COUNT_RX_PMAC_256_511,
+ SYS_COUNT_RX_PMAC_512_1023,
+ SYS_COUNT_RX_PMAC_1024_1526,
+ SYS_COUNT_RX_PMAC_1527_MAX,
+ SYS_COUNT_RX_PMAC_PAUSE,
+ SYS_COUNT_RX_PMAC_CONTROL,
+ SYS_COUNT_RX_PMAC_LONGS,
SYS_COUNT_TX_OCTETS,
SYS_COUNT_TX_UNICAST,
SYS_COUNT_TX_MULTICAST,
@@ -393,6 +416,20 @@ enum ocelot_reg {
SYS_COUNT_TX_GREEN_PRIO_6,
SYS_COUNT_TX_GREEN_PRIO_7,
SYS_COUNT_TX_AGED,
+ SYS_COUNT_TX_MM_HOLD,
+ SYS_COUNT_TX_MERGE_FRAGMENTS,
+ SYS_COUNT_TX_PMAC_OCTETS,
+ SYS_COUNT_TX_PMAC_UNICAST,
+ SYS_COUNT_TX_PMAC_MULTICAST,
+ SYS_COUNT_TX_PMAC_BROADCAST,
+ SYS_COUNT_TX_PMAC_PAUSE,
+ SYS_COUNT_TX_PMAC_64,
+ SYS_COUNT_TX_PMAC_65_127,
+ SYS_COUNT_TX_PMAC_128_255,
+ SYS_COUNT_TX_PMAC_256_511,
+ SYS_COUNT_TX_PMAC_512_1023,
+ SYS_COUNT_TX_PMAC_1024_1526,
+ SYS_COUNT_TX_PMAC_1527_MAX,
SYS_COUNT_DROP_LOCAL,
SYS_COUNT_DROP_TAIL,
SYS_COUNT_DROP_YELLOW_PRIO_0,
@@ -478,6 +515,9 @@ enum ocelot_reg {
DEV_MAC_FC_MAC_LOW_CFG,
DEV_MAC_FC_MAC_HIGH_CFG,
DEV_MAC_STICKY,
+ DEV_MM_ENABLE_CONFIG,
+ DEV_MM_VERIF_CONFIG,
+ DEV_MM_STATUS,
PCS1G_CFG,
PCS1G_MODE_CFG,
PCS1G_SD_CFG,
@@ -702,6 +742,12 @@ struct ocelot_mirror {
int to;
};
+struct ocelot_mm_state {
+ struct mutex lock;
+ enum ethtool_mm_verify_status verify_status;
+ bool tx_active;
+};
+
struct ocelot_port;
struct ocelot_port {
@@ -814,6 +860,7 @@ struct ocelot {
struct workqueue_struct *owq;
u8 ptp:1;
+ u8 mm_supported:1;
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_info;
struct hwtstamp_config hwtstamp_config;
@@ -826,6 +873,8 @@ struct ocelot {
spinlock_t ptp_clock_lock;
struct ptp_pin_desc ptp_pins[OCELOT_PTP_PINS_NUM];
+ struct ocelot_mm_state *mm;
+
struct ocelot_fdma *fdma;
};
@@ -918,6 +967,7 @@ void ocelot_ptp_rx_timestamp(struct ocelot *ocelot, struct sk_buff *skb,
int ocelot_regfields_init(struct ocelot *ocelot,
const struct reg_field *const regfields);
struct regmap *ocelot_regmap_init(struct ocelot *ocelot, struct resource *res);
+int ocelot_reset(struct ocelot *ocelot);
int ocelot_init(struct ocelot *ocelot);
void ocelot_deinit(struct ocelot *ocelot);
void ocelot_init_port(struct ocelot *ocelot, int port);
@@ -929,6 +979,11 @@ void ocelot_port_assign_dsa_8021q_cpu(struct ocelot *ocelot, int port, int cpu);
void ocelot_port_unassign_dsa_8021q_cpu(struct ocelot *ocelot, int port);
u32 ocelot_port_assigned_dsa_8021q_cpu_mask(struct ocelot *ocelot, int port);
+/* Watermark interface */
+u16 ocelot_wm_enc(u16 value);
+u16 ocelot_wm_dec(u16 wm);
+void ocelot_wm_stat(u32 val, u32 *inuse, u32 *maxuse);
+
/* DSA callbacks */
void ocelot_get_strings(struct ocelot *ocelot, int port, u32 sset, u8 *data);
void ocelot_get_ethtool_stats(struct ocelot *ocelot, int port, u64 *data);
@@ -937,6 +992,8 @@ void ocelot_port_get_stats64(struct ocelot *ocelot, int port,
struct rtnl_link_stats64 *stats);
void ocelot_port_get_pause_stats(struct ocelot *ocelot, int port,
struct ethtool_pause_stats *pause_stats);
+void ocelot_port_get_mm_stats(struct ocelot *ocelot, int port,
+ struct ethtool_mm_stats *stats);
void ocelot_port_get_rmon_stats(struct ocelot *ocelot, int port,
struct ethtool_rmon_stats *rmon_stats,
const struct ethtool_rmon_hist_range **ranges);
@@ -1082,6 +1139,13 @@ int ocelot_vcap_policer_add(struct ocelot *ocelot, u32 pol_ix,
struct ocelot_policer *pol);
int ocelot_vcap_policer_del(struct ocelot *ocelot, u32 pol_ix);
+void ocelot_port_mm_irq(struct ocelot *ocelot, int port);
+int ocelot_port_set_mm(struct ocelot *ocelot, int port,
+ struct ethtool_mm_cfg *cfg,
+ struct netlink_ext_ack *extack);
+int ocelot_port_get_mm(struct ocelot *ocelot, int port,
+ struct ethtool_mm_state *state);
+
#if IS_ENABLED(CONFIG_BRIDGE_MRP)
int ocelot_mrp_add(struct ocelot *ocelot, int port,
const struct switchdev_obj_mrp *mrp);
diff --git a/include/soc/mscc/ocelot_dev.h b/include/soc/mscc/ocelot_dev.h
index 0c6021f02fee..fcf02baa76b2 100644
--- a/include/soc/mscc/ocelot_dev.h
+++ b/include/soc/mscc/ocelot_dev.h
@@ -93,6 +93,29 @@
#define DEV_MAC_STICKY_TX_FRM_LEN_OVR_STICKY BIT(1)
#define DEV_MAC_STICKY_TX_ABORT_STICKY BIT(0)
+#define DEV_MM_CONFIG_ENABLE_CONFIG_MM_RX_ENA BIT(0)
+#define DEV_MM_CONFIG_ENABLE_CONFIG_MM_TX_ENA BIT(4)
+#define DEV_MM_CONFIG_ENABLE_CONFIG_KEEP_S_AFTER_D BIT(8)
+
+#define DEV_MM_CONFIG_VERIF_CONFIG_PRM_VERIFY_DIS BIT(0)
+#define DEV_MM_CONFIG_VERIF_CONFIG_PRM_VERIFY_TIME(x) (((x) << 4) & GENMASK(11, 4))
+#define DEV_MM_CONFIG_VERIF_CONFIG_PRM_VERIFY_TIME_M GENMASK(11, 4)
+#define DEV_MM_CONFIG_VERIF_CONFIG_PRM_VERIFY_TIME_X(x) (((x) & GENMASK(11, 4)) >> 4)
+#define DEV_MM_CONFIG_VERIF_CONFIG_VERIF_TIMER_UNITS(x) (((x) << 12) & GENMASK(13, 12))
+#define DEV_MM_CONFIG_VERIF_CONFIG_VERIF_TIMER_UNITS_M GENMASK(13, 12)
+#define DEV_MM_CONFIG_VERIF_CONFIG_VERIF_TIMER_UNITS_X(x) (((x) & GENMASK(13, 12)) >> 12)
+
+#define DEV_MM_STAT_MM_STATUS_PRMPT_ACTIVE_STATUS BIT(0)
+#define DEV_MM_STAT_MM_STATUS_PRMPT_ACTIVE_STICKY BIT(4)
+#define DEV_MM_STAT_MM_STATUS_PRMPT_VERIFY_STATE(x) (((x) << 8) & GENMASK(10, 8))
+#define DEV_MM_STAT_MM_STATUS_PRMPT_VERIFY_STATE_M GENMASK(10, 8)
+#define DEV_MM_STAT_MM_STATUS_PRMPT_VERIFY_STATE_X(x) (((x) & GENMASK(10, 8)) >> 8)
+#define DEV_MM_STAT_MM_STATUS_UNEXP_RX_PFRM_STICKY BIT(12)
+#define DEV_MM_STAT_MM_STATUS_UNEXP_TX_PFRM_STICKY BIT(16)
+#define DEV_MM_STAT_MM_STATUS_MM_RX_FRAME_STATUS BIT(20)
+#define DEV_MM_STAT_MM_STATUS_MM_TX_FRAME_STATUS BIT(24)
+#define DEV_MM_STAT_MM_STATUS_MM_TX_PRMPT_STATUS BIT(28)
+
#define PCS1G_CFG_LINK_STATUS_TYPE BIT(4)
#define PCS1G_CFG_AN_LINK_CTRL_ENA BIT(1)
#define PCS1G_CFG_PCS_ENA BIT(0)
diff --git a/include/soc/mscc/vsc7514_regs.h b/include/soc/mscc/vsc7514_regs.h
index ceee26c96959..ffe343a9c04b 100644
--- a/include/soc/mscc/vsc7514_regs.h
+++ b/include/soc/mscc/vsc7514_regs.h
@@ -10,20 +10,10 @@
#include <soc/mscc/ocelot_vcap.h>
-extern const u32 vsc7514_ana_regmap[];
-extern const u32 vsc7514_qs_regmap[];
-extern const u32 vsc7514_qsys_regmap[];
-extern const u32 vsc7514_rew_regmap[];
-extern const u32 vsc7514_sys_regmap[];
-extern const u32 vsc7514_vcap_regmap[];
-extern const u32 vsc7514_ptp_regmap[];
-extern const u32 vsc7514_dev_gmii_regmap[];
+extern struct vcap_props vsc7514_vcap_props[];
-extern const struct vcap_field vsc7514_vcap_es0_keys[];
-extern const struct vcap_field vsc7514_vcap_es0_actions[];
-extern const struct vcap_field vsc7514_vcap_is1_keys[];
-extern const struct vcap_field vsc7514_vcap_is1_actions[];
-extern const struct vcap_field vsc7514_vcap_is2_keys[];
-extern const struct vcap_field vsc7514_vcap_is2_actions[];
+extern const struct reg_field vsc7514_regfields[REGFIELD_MAX];
+
+extern const u32 *vsc7514_regmap[TARGET_MAX];
#endif
diff --git a/include/sound/ac97/codec.h b/include/sound/ac97/codec.h
index 9792d25fa369..2fc641cb1982 100644
--- a/include/sound/ac97/codec.h
+++ b/include/sound/ac97/codec.h
@@ -63,7 +63,7 @@ struct ac97_codec_device {
struct ac97_codec_driver {
struct device_driver driver;
int (*probe)(struct ac97_codec_device *);
- int (*remove)(struct ac97_codec_device *);
+ void (*remove)(struct ac97_codec_device *dev);
void (*shutdown)(struct ac97_codec_device *);
const struct ac97_id *id_table;
};
diff --git a/include/sound/acp63_chip_offset_byte.h b/include/sound/acp63_chip_offset_byte.h
index c9260e1640ae..d746c1cb0163 100644
--- a/include/sound/acp63_chip_offset_byte.h
+++ b/include/sound/acp63_chip_offset_byte.h
@@ -126,24 +126,24 @@
#define ACP_PAD_PULLDOWN_CTRL 0x0001448
#define ACP_PAD_DRIVE_STRENGTH_CTRL 0x000144C
#define ACP_PAD_SCHMEN_CTRL 0x0001450
-#define ACP_SW_PAD_KEEPER_EN 0x0001454
-#define ACP_SW_WAKE_EN 0x0001458
+#define ACP_SW0_PAD_KEEPER_EN 0x0001454
+#define ACP_SW0_WAKE_EN 0x0001458
#define ACP_I2S_WAKE_EN 0x000145C
#define ACP_SW1_WAKE_EN 0x0001460
-#define ACP_SW_I2S_ERROR_REASON 0x00018B4
-#define ACP_SW_POS_TRACK_I2S_TX_CTRL 0x00018B8
-#define ACP_SW_I2S_TX_DMA_POS 0x00018BC
-#define ACP_SW_POS_TRACK_BT_TX_CTRL 0x00018C0
-#define ACP_SW_BT_TX_DMA_POS 0x00018C4
-#define ACP_SW_POS_TRACK_HS_TX_CTRL 0x00018C8
-#define ACP_SW_HS_TX_DMA_POS 0x00018CC
-#define ACP_SW_POS_TRACK_I2S_RX_CTRL 0x00018D0
-#define ACP_SW_I2S_RX_DMA_POS 0x00018D4
-#define ACP_SW_POS_TRACK_BT_RX_CTRL 0x00018D8
-#define ACP_SW_BT_RX_DMA_POS 0x00018DC
-#define ACP_SW_POS_TRACK_HS_RX_CTRL 0x00018E0
-#define ACP_SW_HS_RX_DMA_POS 0x00018E4
+#define ACP_SW0_I2S_ERROR_REASON 0x00018B4
+#define ACP_SW0_POS_TRACK_AUDIO0_TX_CTRL 0x00018B8
+#define ACP_SW0_AUDIO0_TX_DMA_POS 0x00018BC
+#define ACP_SW0_POS_TRACK_AUDIO1_TX_CTRL 0x00018C0
+#define ACP_SW0_AUDIO1_TX_DMA_POS 0x00018C4
+#define ACP_SW0_POS_TRACK_AUDIO2_TX_CTRL 0x00018C8
+#define ACP_SW0_AUDIO2_TX_DMA_POS 0x00018CC
+#define ACP_SW0_POS_TRACK_AUDIO0_RX_CTRL 0x00018D0
+#define ACP_SW0_AUDIO0_DMA_POS 0x00018D4
+#define ACP_SW0_POS_TRACK_AUDIO1_RX_CTRL 0x00018D8
+#define ACP_SW0_AUDIO1_RX_DMA_POS 0x00018DC
+#define ACP_SW0_POS_TRACK_AUDIO2_RX_CTRL 0x00018E0
+#define ACP_SW0_AUDIO2_RX_DMA_POS 0x00018E4
#define ACP_ERROR_INTR_MASK1 0X0001974
#define ACP_ERROR_INTR_MASK2 0X0001978
#define ACP_ERROR_INTR_MASK3 0X000197C
@@ -155,98 +155,80 @@
#define ACP_EXTERNAL_INTR_STAT 0x0001A0C
#define ACP_EXTERNAL_INTR_STAT1 0x0001A10
#define ACP_ERROR_STATUS 0x0001A4C
-#define ACP_P1_SW_I2S_ERROR_REASON 0x0001A50
-#define ACP_P1_SW_POS_TRACK_I2S_TX_CTRL 0x0001A6C
-#define ACP_P1_SW_I2S_TX_DMA_POS 0x0001A70
-#define ACP_P1_SW_POS_TRACK_I2S_RX_CTRL 0x0001A74
-#define ACP_P1_SW_I2S_RX_DMA_POS 0x0001A78
+#define ACP_SW1_I2S_ERROR_REASON 0x0001A50
+#define ACP_SW1_POS_TRACK_AUDIO0_TX_CTRL 0x0001A6C
+#define ACP_SW1_AUDIO0_TX_DMA_POS 0x0001A70
+#define ACP_SW1_POS_TRACK_AUDIO0_RX_CTRL 0x0001A74
+#define ACP_SW1_AUDIO0_RX_DMA_POS 0x0001A78
#define ACP_P1_DMIC_I2S_GPIO_INTR_CTRL 0x0001A7C
#define ACP_P1_DMIC_I2S_GPIO_INTR_STATUS 0x0001A80
#define ACP_SCRATCH_REG_BASE_ADDR 0x0001A84
-#define ACP_P1_SW_POS_TRACK_BT_TX_CTRL 0x0001A88
-#define ACP_P1_SW_BT_TX_DMA_POS 0x0001A8C
-#define ACP_P1_SW_POS_TRACK_HS_TX_CTRL 0x0001A90
-#define ACP_P1_SW_HS_TX_DMA_POS 0x0001A94
-#define ACP_P1_SW_POS_TRACK_BT_RX_CTRL 0x0001A98
-#define ACP_P1_SW_BT_RX_DMA_POS 0x0001A9C
-#define ACP_P1_SW_POS_TRACK_HS_RX_CTRL 0x0001AA0
-#define ACP_P1_SW_HS_RX_DMA_POS 0x0001AA4
+#define ACP_SW1_POS_TRACK_AUDIO1_TX_CTRL 0x0001A88
+#define ACP_SW1_AUDIO1_TX_DMA_POS 0x0001A8C
+#define ACP_SW1_POS_TRACK_AUDIO2_TX_CTRL 0x0001A90
+#define ACP_SW1_AUDIO2_TX_DMA_POS 0x0001A94
+#define ACP_SW1_POS_TRACK_AUDIO1_RX_CTRL 0x0001A98
+#define ACP_SW1_AUDIO1_RX_DMA_POS 0x0001A9C
+#define ACP_SW1_POS_TRACK_AUDIO2_RX_CTRL 0x0001AA0
+#define ACP_SW1_AUDIO2_RX_DMA_POS 0x0001AA4
#define ACP_ERROR_INTR_MASK4 0X0001AEC
#define ACP_ERROR_INTR_MASK5 0X0001AF0
/* Registers from ACP_AUDIO_BUFFERS block */
-#define ACP_I2S_RX_RINGBUFADDR 0x0002000
-#define ACP_I2S_RX_RINGBUFSIZE 0x0002004
-#define ACP_I2S_RX_LINKPOSITIONCNTR 0x0002008
-#define ACP_I2S_RX_FIFOADDR 0x000200C
-#define ACP_I2S_RX_FIFOSIZE 0x0002010
-#define ACP_I2S_RX_DMA_SIZE 0x0002014
-#define ACP_I2S_RX_LINEARPOSITIONCNTR_HIGH 0x0002018
-#define ACP_I2S_RX_LINEARPOSITIONCNTR_LOW 0x000201C
-#define ACP_I2S_RX_INTR_WATERMARK_SIZE 0x0002020
-#define ACP_I2S_TX_RINGBUFADDR 0x0002024
-#define ACP_I2S_TX_RINGBUFSIZE 0x0002028
-#define ACP_I2S_TX_LINKPOSITIONCNTR 0x000202C
-#define ACP_I2S_TX_FIFOADDR 0x0002030
-#define ACP_I2S_TX_FIFOSIZE 0x0002034
-#define ACP_I2S_TX_DMA_SIZE 0x0002038
-#define ACP_I2S_TX_LINEARPOSITIONCNTR_HIGH 0x000203C
-#define ACP_I2S_TX_LINEARPOSITIONCNTR_LOW 0x0002040
-#define ACP_I2S_TX_INTR_WATERMARK_SIZE 0x0002044
-#define ACP_BT_RX_RINGBUFADDR 0x0002048
-#define ACP_BT_RX_RINGBUFSIZE 0x000204C
-#define ACP_BT_RX_LINKPOSITIONCNTR 0x0002050
-#define ACP_BT_RX_FIFOADDR 0x0002054
-#define ACP_BT_RX_FIFOSIZE 0x0002058
-#define ACP_BT_RX_DMA_SIZE 0x000205C
-#define ACP_BT_RX_LINEARPOSITIONCNTR_HIGH 0x0002060
-#define ACP_BT_RX_LINEARPOSITIONCNTR_LOW 0x0002064
-#define ACP_BT_RX_INTR_WATERMARK_SIZE 0x0002068
-#define ACP_BT_TX_RINGBUFADDR 0x000206C
-#define ACP_BT_TX_RINGBUFSIZE 0x0002070
-#define ACP_BT_TX_LINKPOSITIONCNTR 0x0002074
-#define ACP_BT_TX_FIFOADDR 0x0002078
-#define ACP_BT_TX_FIFOSIZE 0x000207C
-#define ACP_BT_TX_DMA_SIZE 0x0002080
-#define ACP_BT_TX_LINEARPOSITIONCNTR_HIGH 0x0002084
-#define ACP_BT_TX_LINEARPOSITIONCNTR_LOW 0x0002088
-#define ACP_BT_TX_INTR_WATERMARK_SIZE 0x000208C
-#define ACP_HS_RX_RINGBUFADDR 0x0002090
-#define ACP_HS_RX_RINGBUFSIZE 0x0002094
-#define ACP_HS_RX_LINKPOSITIONCNTR 0x0002098
-#define ACP_HS_RX_FIFOADDR 0x000209C
-#define ACP_HS_RX_FIFOSIZE 0x00020A0
-#define ACP_HS_RX_DMA_SIZE 0x00020A4
-#define ACP_HS_RX_LINEARPOSITIONCNTR_HIGH 0x00020A8
-#define ACP_HS_RX_LINEARPOSITIONCNTR_LOW 0x00020AC
-#define ACP_HS_RX_INTR_WATERMARK_SIZE 0x00020B0
-#define ACP_HS_TX_RINGBUFADDR 0x00020B4
-#define ACP_HS_TX_RINGBUFSIZE 0x00020B8
-#define ACP_HS_TX_LINKPOSITIONCNTR 0x00020BC
-#define ACP_HS_TX_FIFOADDR 0x00020C0
-#define ACP_HS_TX_FIFOSIZE 0x00020C4
-#define ACP_HS_TX_DMA_SIZE 0x00020C8
-#define ACP_HS_TX_LINEARPOSITIONCNTR_HIGH 0x00020CC
-#define ACP_HS_TX_LINEARPOSITIONCNTR_LOW 0x00020D0
-#define ACP_HS_TX_INTR_WATERMARK_SIZE 0x00020D4
-#define ACP_AUDIO_RX_RINGBUFADDR ACP_I2S_RX_RINGBUFADDR
-#define ACP_AUDIO_RX_RINGBUFSIZE ACP_I2S_RX_RINGBUFSIZE
-#define ACP_AUDIO_RX_LINKPOSITIONCNTR ACP_I2S_RX_LINKPOSITIONCNTR
-#define ACP_AUDIO_RX_FIFOADDR ACP_I2S_RX_FIFOADDR
-#define ACP_AUDIO_RX_FIFOSIZE ACP_I2S_RX_FIFOSIZE
-#define ACP_AUDIO_RX_DMA_SIZE ACP_I2S_RX_DMA_SIZE
-#define ACP_AUDIO_RX_LINEARPOSITIONCNTR_HIGH ACP_I2S_RX_LINEARPOSITIONCNTR_HIGH
-#define ACP_AUDIO_RX_LINEARPOSITIONCNTR_LOW ACP_I2S_RX_LINEARPOSITIONCNTR_LOW
-#define ACP_AUDIO_RX_INTR_WATERMARK_SIZE ACP_I2S_RX_INTR_WATERMARK_SIZE
-#define ACP_AUDIO_TX_RINGBUFADDR ACP_I2S_TX_RINGBUFADDR
-#define ACP_AUDIO_TX_RINGBUFSIZE ACP_I2S_TX_RINGBUFSIZE
-#define ACP_AUDIO_TX_LINKPOSITIONCNTR ACP_I2S_TX_LINKPOSITIONCNTR
-#define ACP_AUDIO_TX_FIFOADDR ACP_I2S_TX_FIFOADDR
-#define ACP_AUDIO_TX_FIFOSIZE ACP_I2S_TX_FIFOSIZE
-#define ACP_AUDIO_TX_DMA_SIZE ACP_I2S_TX_DMA_SIZE
-#define ACP_AUDIO_TX_LINEARPOSITIONCNTR_HIGH ACP_I2S_TX_LINEARPOSITIONCNTR_HIGH
-#define ACP_AUDIO_TX_LINEARPOSITIONCNTR_LOW ACP_I2S_TX_LINEARPOSITIONCNTR_LOW
-#define ACP_AUDIO_TX_INTR_WATERMARK_SIZE ACP_I2S_TX_INTR_WATERMARK_SIZE
+#define ACP_AUDIO0_RX_RINGBUFADDR 0x0002000
+#define ACP_AUDIO0_RX_RINGBUFSIZE 0x0002004
+#define ACP_AUDIO0_RX_LINKPOSITIONCNTR 0x0002008
+#define ACP_AUDIO0_RX_FIFOADDR 0x000200C
+#define ACP_AUDIO0_RX_FIFOSIZE 0x0002010
+#define ACP_AUDIO0_RX_DMA_SIZE 0x0002014
+#define ACP_AUDIO0_RX_LINEARPOSITIONCNTR_HIGH 0x0002018
+#define ACP_AUDIO0_RX_LINEARPOSITIONCNTR_LOW 0x000201C
+#define ACP_AUDIO0_RX_INTR_WATERMARK_SIZE 0x0002020
+#define ACP_AUDIO0_TX_RINGBUFADDR 0x0002024
+#define ACP_AUDIO0_TX_RINGBUFSIZE 0x0002028
+#define ACP_AUDIO0_TX_LINKPOSITIONCNTR 0x000202C
+#define ACP_AUDIO0_TX_FIFOADDR 0x0002030
+#define ACP_AUDIO0_TX_FIFOSIZE 0x0002034
+#define ACP_AUDIO0_TX_DMA_SIZE 0x0002038
+#define ACP_AUDIO0_TX_LINEARPOSITIONCNTR_HIGH 0x000203C
+#define ACP_AUDIO0_TX_LINEARPOSITIONCNTR_LOW 0x0002040
+#define ACP_AUDIO0_TX_INTR_WATERMARK_SIZE 0x0002044
+#define ACP_AUDIO1_RX_RINGBUFADDR 0x0002048
+#define ACP_AUDIO1_RX_RINGBUFSIZE 0x000204C
+#define ACP_AUDIO1_RX_LINKPOSITIONCNTR 0x0002050
+#define ACP_AUDIO1_RX_FIFOADDR 0x0002054
+#define ACP_AUDIO1_RX_FIFOSIZE 0x0002058
+#define ACP_AUDIO1_RX_DMA_SIZE 0x000205C
+#define ACP_AUDIO1_RX_LINEARPOSITIONCNTR_HIGH 0x0002060
+#define ACP_AUDIO1_RX_LINEARPOSITIONCNTR_LOW 0x0002064
+#define ACP_AUDIO1_RX_INTR_WATERMARK_SIZE 0x0002068
+#define ACP_AUDIO1_TX_RINGBUFADDR 0x000206C
+#define ACP_AUDIO1_TX_RINGBUFSIZE 0x0002070
+#define ACP_AUDIO1_TX_LINKPOSITIONCNTR 0x0002074
+#define ACP_AUDIO1_TX_FIFOADDR 0x0002078
+#define ACP_AUDIO1_TX_FIFOSIZE 0x000207C
+#define ACP_AUDIO1_TX_DMA_SIZE 0x0002080
+#define ACP_AUDIO1_TX_LINEARPOSITIONCNTR_HIGH 0x0002084
+#define ACP_AUDIO1_TX_LINEARPOSITIONCNTR_LOW 0x0002088
+#define ACP_AUDIO1_TX_INTR_WATERMARK_SIZE 0x000208C
+#define ACP_AUDIO2_RX_RINGBUFADDR 0x0002090
+#define ACP_AUDIO2_RX_RINGBUFSIZE 0x0002094
+#define ACP_AUDIO2_RX_LINKPOSITIONCNTR 0x0002098
+#define ACP_AUDIO2_RX_FIFOADDR 0x000209C
+#define ACP_AUDIO2_RX_FIFOSIZE 0x00020A0
+#define ACP_AUDIO2_RX_DMA_SIZE 0x00020A4
+#define ACP_AUDIO2_RX_LINEARPOSITIONCNTR_HIGH 0x00020A8
+#define ACP_AUDIO2_RX_LINEARPOSITIONCNTR_LOW 0x00020AC
+#define ACP_AUDIO2_RX_INTR_WATERMARK_SIZE 0x00020B0
+#define ACP_AUDIO2_TX_RINGBUFADDR 0x00020B4
+#define ACP_AUDIO2_TX_RINGBUFSIZE 0x00020B8
+#define ACP_AUDIO2_TX_LINKPOSITIONCNTR 0x00020BC
+#define ACP_AUDIO2_TX_FIFOADDR 0x00020C0
+#define ACP_AUDIO2_TX_FIFOSIZE 0x00020C4
+#define ACP_AUDIO2_TX_DMA_SIZE 0x00020C8
+#define ACP_AUDIO2_TX_LINEARPOSITIONCNTR_HIGH 0x00020CC
+#define ACP_AUDIO2_TX_LINEARPOSITIONCNTR_LOW 0x00020D0
+#define ACP_AUDIO2_TX_INTR_WATERMARK_SIZE 0x00020D4
/* Registers from ACP_I2S_TDM block */
#define ACP_I2STDM_IER 0x0002400
@@ -292,367 +274,222 @@
#define ACP_WOV_ERROR_STATUS_REGISTER 0x0002C68
#define ACP_PDM_CLKDIV 0x0002C6C
-/* Registers from ACP_SW_SWCLK block */
-#define ACP_SW_EN 0x0003000
-#define ACP_SW_EN_STATUS 0x0003004
-#define ACP_SW_FRAMESIZE 0x0003008
-#define ACP_SW_SSP_COUNTER 0x000300C
-#define ACP_SW_AUDIO_TX_EN 0x0003010
-#define ACP_SW_AUDIO_TX_EN_STATUS 0x0003014
-#define ACP_SW_AUDIO_TX_FRAME_FORMAT 0x0003018
-#define ACP_SW_AUDIO_TX_SAMPLEINTERVAL 0x000301C
-#define ACP_SW_AUDIO_TX_HCTRL_DP0 0x0003020
-#define ACP_SW_AUDIO_TX_HCTRL_DP1 0x0003024
-#define ACP_SW_AUDIO_TX_HCTRL_DP2 0x0003028
-#define ACP_SW_AUDIO_TX_HCTRL_DP3 0x000302C
-#define ACP_SW_AUDIO_TX_OFFSET_DP0 0x0003030
-#define ACP_SW_AUDIO_TX_OFFSET_DP1 0x0003034
-#define ACP_SW_AUDIO_TX_OFFSET_DP2 0x0003038
-#define ACP_SW_AUDIO_TX_OFFSET_DP3 0x000303C
-#define ACP_SW_AUDIO_TX_CHANNEL_ENABLE_DP0 0x0003040
-#define ACP_SW_AUDIO_TX_CHANNEL_ENABLE_DP1 0x0003044
-#define ACP_SW_AUDIO_TX_CHANNEL_ENABLE_DP2 0x0003048
-#define ACP_SW_AUDIO_TX_CHANNEL_ENABLE_DP3 0x000304C
-#define ACP_SW_BT_TX_EN 0x0003050
-#define ACP_SW_BT_TX_EN_STATUS 0x0003054
-#define ACP_SW_BT_TX_FRAME_FORMAT 0x0003058
-#define ACP_SW_BT_TX_SAMPLEINTERVAL 0x000305C
-#define ACP_SW_BT_TX_HCTRL 0x0003060
-#define ACP_SW_BT_TX_OFFSET 0x0003064
-#define ACP_SW_BT_TX_CHANNEL_ENABLE_DP0 0x0003068
-#define ACP_SW_HEADSET_TX_EN 0x000306C
-#define ACP_SW_HEADSET_TX_EN_STATUS 0x0003070
-#define ACP_SW_HEADSET_TX_FRAME_FORMAT 0x0003074
-#define ACP_SW_HEADSET_TX_SAMPLEINTERVAL 0x0003078
-#define ACP_SW_HEADSET_TX_HCTRL 0x000307C
-#define ACP_SW_HEADSET_TX_OFFSET 0x0003080
-#define ACP_SW_HEADSET_TX_CHANNEL_ENABLE_DP0 0x0003084
-#define ACP_SW_AUDIO_RX_EN 0x0003088
-#define ACP_SW_AUDIO_RX_EN_STATUS 0x000308C
-#define ACP_SW_AUDIO_RX_FRAME_FORMAT 0x0003090
-#define ACP_SW_AUDIO_RX_SAMPLEINTERVAL 0x0003094
-#define ACP_SW_AUDIO_RX_HCTRL_DP0 0x0003098
-#define ACP_SW_AUDIO_RX_HCTRL_DP1 0x000309C
-#define ACP_SW_AUDIO_RX_HCTRL_DP2 0x0003100
-#define ACP_SW_AUDIO_RX_HCTRL_DP3 0x0003104
-#define ACP_SW_AUDIO_RX_OFFSET_DP0 0x0003108
-#define ACP_SW_AUDIO_RX_OFFSET_DP1 0x000310C
-#define ACP_SW_AUDIO_RX_OFFSET_DP2 0x0003110
-#define ACP_SW_AUDIO_RX_OFFSET_DP3 0x0003114
-#define ACP_SW_AUDIO_RX_CHANNEL_ENABLE_DP0 0x0003118
-#define ACP_SW_AUDIO_RX_CHANNEL_ENABLE_DP1 0x000311C
-#define ACP_SW_AUDIO_RX_CHANNEL_ENABLE_DP2 0x0003120
-#define ACP_SW_AUDIO_RX_CHANNEL_ENABLE_DP3 0x0003124
-#define ACP_SW_BT_RX_EN 0x0003128
-#define ACP_SW_BT_RX_EN_STATUS 0x000312C
-#define ACP_SW_BT_RX_FRAME_FORMAT 0x0003130
-#define ACP_SW_BT_RX_SAMPLEINTERVAL 0x0003134
-#define ACP_SW_BT_RX_HCTRL 0x0003138
-#define ACP_SW_BT_RX_OFFSET 0x000313C
-#define ACP_SW_BT_RX_CHANNEL_ENABLE_DP0 0x0003140
-#define ACP_SW_HEADSET_RX_EN 0x0003144
-#define ACP_SW_HEADSET_RX_EN_STATUS 0x0003148
-#define ACP_SW_HEADSET_RX_FRAME_FORMAT 0x000314C
-#define ACP_SW_HEADSET_RX_SAMPLEINTERVAL 0x0003150
-#define ACP_SW_HEADSET_RX_HCTRL 0x0003154
-#define ACP_SW_HEADSET_RX_OFFSET 0x0003158
-#define ACP_SW_HEADSET_RX_CHANNEL_ENABLE_DP0 0x000315C
-#define ACP_SW_BPT_PORT_EN 0x0003160
-#define ACP_SW_BPT_PORT_EN_STATUS 0x0003164
-#define ACP_SW_BPT_PORT_FRAME_FORMAT 0x0003168
-#define ACP_SW_BPT_PORT_SAMPLEINTERVAL 0x000316C
-#define ACP_SW_BPT_PORT_HCTRL 0x0003170
-#define ACP_SW_BPT_PORT_OFFSET 0x0003174
-#define ACP_SW_BPT_PORT_CHANNEL_ENABLE 0x0003178
-#define ACP_SW_BPT_PORT_FIRST_BYTE_ADDR 0x000317C
-#define ACP_SW_CLK_RESUME_CTRL 0x0003180
-#define ACP_SW_CLK_RESUME_DELAY_CNTR 0x0003184
-#define ACP_SW_BUS_RESET_CTRL 0x0003188
-#define ACP_SW_PRBS_ERR_STATUS 0x000318C
-#define SW_IMM_CMD_UPPER_WORD 0x0003230
-#define SW_IMM_CMD_LOWER_QWORD 0x0003234
-#define SW_IMM_RESP_UPPER_WORD 0x0003238
-#define SW_IMM_RESP_LOWER_QWORD 0x000323C
-#define SW_IMM_CMD_STS 0x0003240
-#define SW_BRA_BASE_ADDRESS 0x0003244
-#define SW_BRA_TRANSFER_SIZE 0x0003248
-#define SW_BRA_DMA_BUSY 0x000324C
-#define SW_BRA_RESP 0x0003250
-#define SW_BRA_RESP_FRAME_ADDR 0x0003254
-#define SW_BRA_CURRENT_TRANSFER_SIZE 0x0003258
-#define SW_STATE_CHANGE_STATUS_0TO7 0x000325C
-#define SW_STATE_CHANGE_STATUS_8TO11 0x0003260
-#define SW_STATE_CHANGE_STATUS_MASK_0TO7 0x0003264
-#define SW_STATE_CHANGE_STATUS_MASK_8TO11 0x0003268
-#define SW_CLK_FREQUENCY_CTRL 0x000326C
-#define SW_ERROR_INTR_MASK 0x0003270
-#define SW_PHY_TEST_MODE_DATA_OFF 0x0003274
+/* Registers from ACP_SW0_SWCLK block */
+#define ACP_SW0_EN 0x0003000
+#define ACP_SW0_EN_STATUS 0x0003004
+#define ACP_SW0_FRAMESIZE 0x0003008
+#define ACP_SW0_SSP_COUNTER 0x000300C
+#define ACP_SW0_AUDIO0_TX_EN 0x0003010
+#define ACP_SW0_AUDIO0_TX_EN_STATUS 0x0003014
+#define ACP_SW0_AUDIO0_TX_FRAME_FORMAT 0x0003018
+#define ACP_SW0_AUDIO0_TX_SAMPLEINTERVAL 0x000301C
+#define ACP_SW0_AUDIO0_TX_HCTRL_DP0 0x0003020
+#define ACP_SW0_AUDIO0_TX_HCTRL_DP1 0x0003024
+#define ACP_SW0_AUDIO0_TX_HCTRL_DP2 0x0003028
+#define ACP_SW0_AUDIO0_TX_HCTRL_DP3 0x000302C
+#define ACP_SW0_AUDIO0_TX_OFFSET_DP0 0x0003030
+#define ACP_SW0_AUDIO0_TX_OFFSET_DP1 0x0003034
+#define ACP_SW0_AUDIO0_TX_OFFSET_DP2 0x0003038
+#define ACP_SW0_AUDIO0_TX_OFFSET_DP3 0x000303C
+#define ACP_SW0_AUDIO0_TX_CHANNEL_ENABLE_DP0 0x0003040
+#define ACP_SW0_AUDIO0_TX_CHANNEL_ENABLE_DP1 0x0003044
+#define ACP_SW0_AUDIO0_TX_CHANNEL_ENABLE_DP2 0x0003048
+#define ACP_SW0_AUDIO0_TX_CHANNEL_ENABLE_DP3 0x000304C
+#define ACP_SW0_AUDIO1_TX_EN 0x0003050
+#define ACP_SW0_AUDIO1_TX_EN_STATUS 0x0003054
+#define ACP_SW0_AUDIO1_TX_FRAME_FORMAT 0x0003058
+#define ACP_SW0_AUDIO1_TX_SAMPLEINTERVAL 0x000305C
+#define ACP_SW0_AUDIO1_TX_HCTRL 0x0003060
+#define ACP_SW0_AUDIO1_TX_OFFSET 0x0003064
+#define ACP_SW0_AUDIO1_TX_CHANNEL_ENABLE_DP0 0x0003068
+#define ACP_SW0_AUDIO2_TX_EN 0x000306C
+#define ACP_SW0_AUDIO2_TX_EN_STATUS 0x0003070
+#define ACP_SW0_AUDIO2_TX_FRAME_FORMAT 0x0003074
+#define ACP_SW0_AUDIO2_TX_SAMPLEINTERVAL 0x0003078
+#define ACP_SW0_AUDIO2_TX_HCTRL 0x000307C
+#define ACP_SW0_AUDIO2_TX_OFFSET 0x0003080
+#define ACP_SW0_AUDIO2_TX_CHANNEL_ENABLE_DP0 0x0003084
+#define ACP_SW0_AUDIO0_RX_EN 0x0003088
+#define ACP_SW0_AUDIO0_RX_EN_STATUS 0x000308C
+#define ACP_SW0_AUDIO0_RX_FRAME_FORMAT 0x0003090
+#define ACP_SW0_AUDIO0_RX_SAMPLEINTERVAL 0x0003094
+#define ACP_SW0_AUDIO0_RX_HCTRL_DP0 0x0003098
+#define ACP_SW0_AUDIO0_RX_HCTRL_DP1 0x000309C
+#define ACP_SW0_AUDIO0_RX_HCTRL_DP2 0x0003100
+#define ACP_SW0_AUDIO0_RX_HCTRL_DP3 0x0003104
+#define ACP_SW0_AUDIO0_RX_OFFSET_DP0 0x0003108
+#define ACP_SW0_AUDIO0_RX_OFFSET_DP1 0x000310C
+#define ACP_SW0_AUDIO0_RX_OFFSET_DP2 0x0003110
+#define ACP_SW0_AUDIO0_RX_OFFSET_DP3 0x0003114
+#define ACP_SW0_AUDIO0_RX_CHANNEL_ENABLE_DP0 0x0003118
+#define ACP_SW0_AUDIO0_RX_CHANNEL_ENABLE_DP1 0x000311C
+#define ACP_SW0_AUDIO0_RX_CHANNEL_ENABLE_DP2 0x0003120
+#define ACP_SW0_AUDIO0_RX_CHANNEL_ENABLE_DP3 0x0003124
+#define ACP_SW0_AUDIO1_RX_EN 0x0003128
+#define ACP_SW0_AUDIO1_RX_EN_STATUS 0x000312C
+#define ACP_SW0_AUDIO1_RX_FRAME_FORMAT 0x0003130
+#define ACP_SW0_AUDIO1_RX_SAMPLEINTERVAL 0x0003134
+#define ACP_SW0_AUDIO1_RX_HCTRL 0x0003138
+#define ACP_SW0_AUDIO1_RX_OFFSET 0x000313C
+#define ACP_SW0_AUDIO1_RX_CHANNEL_ENABLE_DP0 0x0003140
+#define ACP_SW0_AUDIO2_RX_EN 0x0003144
+#define ACP_SW0_AUDIO2_RX_EN_STATUS 0x0003148
+#define ACP_SW0_AUDIO2_RX_FRAME_FORMAT 0x000314C
+#define ACP_SW0_AUDIO2_RX_SAMPLEINTERVAL 0x0003150
+#define ACP_SW0_AUDIO2_RX_HCTRL 0x0003154
+#define ACP_SW0_AUDIO2_RX_OFFSET 0x0003158
+#define ACP_SW0_AUDIO2_RX_CHANNEL_ENABLE_DP0 0x000315C
+#define ACP_SW0_BPT_PORT_EN 0x0003160
+#define ACP_SW0_BPT_PORT_EN_STATUS 0x0003164
+#define ACP_SW0_BPT_PORT_FRAME_FORMAT 0x0003168
+#define ACP_SW0_BPT_PORT_SAMPLEINTERVAL 0x000316C
+#define ACP_SW0_BPT_PORT_HCTRL 0x0003170
+#define ACP_SW0_BPT_PORT_OFFSET 0x0003174
+#define ACP_SW0_BPT_PORT_CHANNEL_ENABLE 0x0003178
+#define ACP_SW0_BPT_PORT_FIRST_BYTE_ADDR 0x000317C
+#define ACP_SW0_CLK_RESUME_CTRL 0x0003180
+#define ACP_SW0_CLK_RESUME_DELAY_CNTR 0x0003184
+#define ACP_SW0_BUS_RESET_CTRL 0x0003188
+#define ACP_SW0_PRBS_ERR_STATUS 0x000318C
+#define ACP_SW0_IMM_CMD_UPPER_WORD 0x0003230
+#define ACP_SW0_IMM_CMD_LOWER_QWORD 0x0003234
+#define ACP_SW0_IMM_RESP_UPPER_WORD 0x0003238
+#define ACP_SW0_IMM_RESP_LOWER_QWORD 0x000323C
+#define ACP_SW0_IMM_CMD_STS 0x0003240
+#define ACP_SW0_BRA_BASE_ADDRESS 0x0003244
+#define ACP_SW0_BRA_TRANSFER_SIZE 0x0003248
+#define ACP_SW0_BRA_DMA_BUSY 0x000324C
+#define ACP_SW0_BRA_RESP 0x0003250
+#define ACP_SW0_BRA_RESP_FRAME_ADDR 0x0003254
+#define ACP_SW0_BRA_CURRENT_TRANSFER_SIZE 0x0003258
+#define ACP_SW0_STATECHANGE_STATUS_0TO7 0x000325C
+#define ACP_SW0_STATECHANGE_STATUS_8TO11 0x0003260
+#define ACP_SW0_STATECHANGE_STATUS_MASK_0TO7 0x0003264
+#define ACP_SW0_STATECHANGE_STATUS_MASK_8TO11 0x0003268
+#define ACP_SW0_CLK_FREQUENCY_CTRL 0x000326C
+#define ACP_SW0_ERROR_INTR_MASK 0x0003270
+#define ACP_SW0_PHY_TEST_MODE_DATA_OFF 0x0003274
/* Registers from ACP_P1_AUDIO_BUFFERS block */
-#define ACP_P1_I2S_RX_RINGBUFADDR 0x0003A00
-#define ACP_P1_I2S_RX_RINGBUFSIZE 0x0003A04
-#define ACP_P1_I2S_RX_LINKPOSITIONCNTR 0x0003A08
-#define ACP_P1_I2S_RX_FIFOADDR 0x0003A0C
-#define ACP_P1_I2S_RX_FIFOSIZE 0x0003A10
-#define ACP_P1_I2S_RX_DMA_SIZE 0x0003A14
-#define ACP_P1_I2S_RX_LINEARPOSITIONCNTR_HIGH 0x0003A18
-#define ACP_P1_I2S_RX_LINEARPOSITIONCNTR_LOW 0x0003A1C
-#define ACP_P1_I2S_RX_INTR_WATERMARK_SIZE 0x0003A20
-#define ACP_P1_I2S_TX_RINGBUFADDR 0x0003A24
-#define ACP_P1_I2S_TX_RINGBUFSIZE 0x0003A28
-#define ACP_P1_I2S_TX_LINKPOSITIONCNTR 0x0003A2C
-#define ACP_P1_I2S_TX_FIFOADDR 0x0003A30
-#define ACP_P1_I2S_TX_FIFOSIZE 0x0003A34
-#define ACP_P1_I2S_TX_DMA_SIZE 0x0003A38
-#define ACP_P1_I2S_TX_LINEARPOSITIONCNTR_HIGH 0x0003A3C
-#define ACP_P1_I2S_TX_LINEARPOSITIONCNTR_LOW 0x0003A40
-#define ACP_P1_I2S_TX_INTR_WATERMARK_SIZE 0x0003A44
-#define ACP_P1_BT_RX_RINGBUFADDR 0x0003A48
-#define ACP_P1_BT_RX_RINGBUFSIZE 0x0003A4C
-#define ACP_P1_BT_RX_LINKPOSITIONCNTR 0x0003A50
-#define ACP_P1_BT_RX_FIFOADDR 0x0003A54
-#define ACP_P1_BT_RX_FIFOSIZE 0x0003A58
-#define ACP_P1_BT_RX_DMA_SIZE 0x0003A5C
-#define ACP_P1_BT_RX_LINEARPOSITIONCNTR_HIGH 0x0003A60
-#define ACP_P1_BT_RX_LINEARPOSITIONCNTR_LOW 0x0003A64
-#define ACP_P1_BT_RX_INTR_WATERMARK_SIZE 0x0003A68
-#define ACP_P1_BT_TX_RINGBUFADDR 0x0003A6C
-#define ACP_P1_BT_TX_RINGBUFSIZE 0x0003A70
-#define ACP_P1_BT_TX_LINKPOSITIONCNTR 0x0003A74
-#define ACP_P1_BT_TX_FIFOADDR 0x0003A78
-#define ACP_P1_BT_TX_FIFOSIZE 0x0003A7C
-#define ACP_P1_BT_TX_DMA_SIZE 0x0003A80
-#define ACP_P1_BT_TX_LINEARPOSITIONCNTR_HIGH 0x0003A84
-#define ACP_P1_BT_TX_LINEARPOSITIONCNTR_LOW 0x0003A88
-#define ACP_P1_BT_TX_INTR_WATERMARK_SIZE 0x0003A8C
-#define ACP_P1_HS_RX_RINGBUFADDR 0x0003A90
-#define ACP_P1_HS_RX_RINGBUFSIZE 0x0003A94
-#define ACP_P1_HS_RX_LINKPOSITIONCNTR 0x0003A98
-#define ACP_P1_HS_RX_FIFOADDR 0x0003A9C
-#define ACP_P1_HS_RX_FIFOSIZE 0x0003AA0
-#define ACP_P1_HS_RX_DMA_SIZE 0x0003AA4
-#define ACP_P1_HS_RX_LINEARPOSITIONCNTR_HIGH 0x0003AA8
-#define ACP_P1_HS_RX_LINEARPOSITIONCNTR_LOW 0x0003AAC
-#define ACP_P1_HS_RX_INTR_WATERMARK_SIZE 0x0003AB0
-#define ACP_P1_HS_TX_RINGBUFADDR 0x0003AB4
-#define ACP_P1_HS_TX_RINGBUFSIZE 0x0003AB8
-#define ACP_P1_HS_TX_LINKPOSITIONCNTR 0x0003ABC
-#define ACP_P1_HS_TX_FIFOADDR 0x0003AC0
-#define ACP_P1_HS_TX_FIFOSIZE 0x0003AC4
-#define ACP_P1_HS_TX_DMA_SIZE 0x0003AC8
-#define ACP_P1_HS_TX_LINEARPOSITIONCNTR_HIGH 0x0003ACC
-#define ACP_P1_HS_TX_LINEARPOSITIONCNTR_LOW 0x0003AD0
-#define ACP_P1_HS_TX_INTR_WATERMARK_SIZE 0x0003AD4
-#define ACP_P1_AUDIO_RX_RINGBUFADDR ACP_P1_I2S_RX_RINGBUFADDR
-#define ACP_P1_AUDIO_RX_RINGBUFSIZE ACP_P1_I2S_RX_RINGBUFSIZE
-#define ACP_P1_AUDIO_RX_LINKPOSITIONCNTR ACP_P1_I2S_RX_LINKPOSITIONCNTR
-#define ACP_P1_AUDIO_RX_FIFOADDR ACP_P1_I2S_RX_FIFOADDR
-#define ACP_P1_AUDIO_RX_FIFOSIZE ACP_P1_I2S_RX_FIFOSIZE
-#define ACP_P1_AUDIO_RX_DMA_SIZE ACP_P1_I2S_RX_DMA_SIZE
-#define ACP_P1_AUDIO_RX_LINEARPOSITIONCNTR_HIGH ACP_P1_I2S_RX_LINEARPOSITIONCNTR_HIGH
-#define ACP_P1_AUDIO_RX_LINEARPOSITIONCNTR_LOW ACP_P1_I2S_RX_LINEARPOSITIONCNTR_LOW
-#define ACP_P1_AUDIO_RX_INTR_WATERMARK_SIZE ACP_P1_I2S_RX_INTR_WATERMARK_SIZE
-#define ACP_P1_AUDIO_TX_RINGBUFADDR ACP_P1_I2S_TX_RINGBUFADDR
-#define ACP_P1_AUDIO_TX_RINGBUFSIZE ACP_P1_I2S_TX_RINGBUFSIZE
-#define ACP_P1_AUDIO_TX_LINKPOSITIONCNTR ACP_P1_I2S_TX_LINKPOSITIONCNTR
-#define ACP_P1_AUDIO_TX_FIFOADDR ACP_P1_I2S_TX_FIFOADDR
-#define ACP_P1_AUDIO_TX_FIFOSIZE ACP_P1_I2S_TX_FIFOSIZE
-#define ACP_P1_AUDIO_TX_DMA_SIZE ACP_P1_I2S_TX_DMA_SIZE
-#define ACP_P1_AUDIO_TX_LINEARPOSITIONCNTR_HIGH ACP_P1_I2S_TX_LINEARPOSITIONCNTR_HIGH
-#define ACP_P1_AUDIO_TX_LINEARPOSITIONCNTR_LOW ACP_P1_I2S_TX_LINEARPOSITIONCNTR_LOW
-#define ACP_P1_AUDIO_TX_INTR_WATERMARK_SIZE ACP_P1_I2S_TX_INTR_WATERMARK_SIZE
+#define ACP_P1_AUDIO0_RX_RINGBUFADDR 0x0003A00
+#define ACP_P1_AUDIO0_RX_RINGBUFSIZE 0x0003A04
+#define ACP_P1_AUDIO0_RX_LINKPOSITIONCNTR 0x0003A08
+#define ACP_P1_AUDIO0_RX_FIFOADDR 0x0003A0C
+#define ACP_P1_AUDIO0_RX_FIFOSIZE 0x0003A10
+#define ACP_P1_AUDIO0_RX_DMA_SIZE 0x0003A14
+#define ACP_P1_AUDIO0_RX_LINEARPOSITIONCNTR_HIGH 0x0003A18
+#define ACP_P1_AUDIO0_RX_LINEARPOSITIONCNTR_LOW 0x0003A1C
+#define ACP_P1_AUDIO0_RX_INTR_WATERMARK_SIZE 0x0003A20
+#define ACP_P1_AUDIO0_TX_RINGBUFADDR 0x0003A24
+#define ACP_P1_AUDIO0_TX_RINGBUFSIZE 0x0003A28
+#define ACP_P1_AUDIO0_TX_LINKPOSITIONCNTR 0x0003A2C
+#define ACP_P1_AUDIO0_TX_FIFOADDR 0x0003A30
+#define ACP_P1_AUDIO0_TX_FIFOSIZE 0x0003A34
+#define ACP_P1_AUDIO0_TX_DMA_SIZE 0x0003A38
+#define ACP_P1_AUDIO0_TX_LINEARPOSITIONCNTR_HIGH 0x0003A3C
+#define ACP_P1_AUDIO0_TX_LINEARPOSITIONCNTR_LOW 0x0003A40
+#define ACP_P1_AUDIO0_TX_INTR_WATERMARK_SIZE 0x0003A44
+#define ACP_P1_AUDIO1_RX_RINGBUFADDR 0x0003A48
+#define ACP_P1_AUDIO1_RX_RINGBUFSIZE 0x0003A4C
+#define ACP_P1_AUDIO1_RX_LINKPOSITIONCNTR 0x0003A50
+#define ACP_P1_AUDIO1_RX_FIFOADDR 0x0003A54
+#define ACP_P1_AUDIO1_RX_FIFOSIZE 0x0003A58
+#define ACP_P1_AUDIO1_RX_DMA_SIZE 0x0003A5C
+#define ACP_P1_AUDIO1_RX_LINEARPOSITIONCNTR_HIGH 0x0003A60
+#define ACP_P1_AUDIO1_RX_LINEARPOSITIONCNTR_LOW 0x0003A64
+#define ACP_P1_AUDIO1_RX_INTR_WATERMARK_SIZE 0x0003A68
+#define ACP_P1_AUDIO1_TX_RINGBUFADDR 0x0003A6C
+#define ACP_P1_AUDIO1_TX_RINGBUFSIZE 0x0003A70
+#define ACP_P1_AUDIO1_TX_LINKPOSITIONCNTR 0x0003A74
+#define ACP_P1_AUDIO1_TX_FIFOADDR 0x0003A78
+#define ACP_P1_AUDIO1_TX_FIFOSIZE 0x0003A7C
+#define ACP_P1_AUDIO1_TX_DMA_SIZE 0x0003A80
+#define ACP_P1_AUDIO1_TX_LINEARPOSITIONCNTR_HIGH 0x0003A84
+#define ACP_P1_AUDIO1_TX_LINEARPOSITIONCNTR_LOW 0x0003A88
+#define ACP_P1_AUDIO1_TX_INTR_WATERMARK_SIZE 0x0003A8C
+#define ACP_P1_AUDIO2_RX_RINGBUFADDR 0x0003A90
+#define ACP_P1_AUDIO2_RX_RINGBUFSIZE 0x0003A94
+#define ACP_P1_AUDIO2_RX_LINKPOSITIONCNTR 0x0003A98
+#define ACP_P1_AUDIO2_RX_FIFOADDR 0x0003A9C
+#define ACP_P1_AUDIO2_RX_FIFOSIZE 0x0003AA0
+#define ACP_P1_AUDIO2_RX_DMA_SIZE 0x0003AA4
+#define ACP_P1_AUDIO2_RX_LINEARPOSITIONCNTR_HIGH 0x0003AA8
+#define ACP_P1_AUDIO2_RX_LINEARPOSITIONCNTR_LOW 0x0003AAC
+#define ACP_P1_AUDIO2_RX_INTR_WATERMARK_SIZE 0x0003AB0
+#define ACP_P1_AUDIO2_TX_RINGBUFADDR 0x0003AB4
+#define ACP_P1_AUDIO2_TX_RINGBUFSIZE 0x0003AB8
+#define ACP_P1_AUDIO2_TX_LINKPOSITIONCNTR 0x0003ABC
+#define ACP_P1_AUDIO2_TX_FIFOADDR 0x0003AC0
+#define ACP_P1_AUDIO2_TX_FIFOSIZE 0x0003AC4
+#define ACP_P1_AUDIO2_TX_DMA_SIZE 0x0003AC8
+#define ACP_P1_AUDIO2_TX_LINEARPOSITIONCNTR_HIGH 0x0003ACC
+#define ACP_P1_AUDIO2_TX_LINEARPOSITIONCNTR_LOW 0x0003AD0
+#define ACP_P1_AUDIO2_TX_INTR_WATERMARK_SIZE 0x0003AD4
-/* Registers from ACP_P1_SW_SWCLK block */
-#define ACP_P1_SW_EN 0x0003C00
-#define ACP_P1_SW_EN_STATUS 0x0003C04
-#define ACP_P1_SW_FRAMESIZE 0x0003C08
-#define ACP_P1_SW_SSP_COUNTER 0x0003C0C
-#define ACP_P1_SW_BT_TX_EN 0x0003C50
-#define ACP_P1_SW_BT_TX_EN_STATUS 0x0003C54
-#define ACP_P1_SW_BT_TX_FRAME_FORMAT 0x0003C58
-#define ACP_P1_SW_BT_TX_SAMPLEINTERVAL 0x0003C5C
-#define ACP_P1_SW_BT_TX_HCTRL 0x0003C60
-#define ACP_P1_SW_BT_TX_OFFSET 0x0003C64
-#define ACP_P1_SW_BT_TX_CHANNEL_ENABLE_DP0 0x0003C68
-#define ACP_P1_SW_BT_RX_EN 0x0003D28
-#define ACP_P1_SW_BT_RX_EN_STATUS 0x0003D2C
-#define ACP_P1_SW_BT_RX_FRAME_FORMAT 0x0003D30
-#define ACP_P1_SW_BT_RX_SAMPLEINTERVAL 0x0003D34
-#define ACP_P1_SW_BT_RX_HCTRL 0x0003D38
-#define ACP_P1_SW_BT_RX_OFFSET 0x0003D3C
-#define ACP_P1_SW_BT_RX_CHANNEL_ENABLE_DP0 0x0003D40
-#define ACP_P1_SW_BPT_PORT_EN 0x0003D60
-#define ACP_P1_SW_BPT_PORT_EN_STATUS 0x0003D64
-#define ACP_P1_SW_BPT_PORT_FRAME_FORMAT 0x0003D68
-#define ACP_P1_SW_BPT_PORT_SAMPLEINTERVAL 0x0003D6C
-#define ACP_P1_SW_BPT_PORT_HCTRL 0x0003D70
-#define ACP_P1_SW_BPT_PORT_OFFSET 0x0003D74
-#define ACP_P1_SW_BPT_PORT_CHANNEL_ENABLE 0x0003D78
-#define ACP_P1_SW_BPT_PORT_FIRST_BYTE_ADDR 0x0003D7C
-#define ACP_P1_SW_CLK_RESUME_CTRL 0x0003D80
-#define ACP_P1_SW_CLK_RESUME_DELAY_CNTR 0x0003D84
-#define ACP_P1_SW_BUS_RESET_CTRL 0x0003D88
-#define ACP_P1_SW_PRBS_ERR_STATUS 0x0003D8C
+/* Registers from ACP_SW1_SWCLK block */
+#define ACP_SW1_EN 0x0003C00
+#define ACP_SW1_EN_STATUS 0x0003C04
+#define ACP_SW1_FRAMESIZE 0x0003C08
+#define ACP_SW1_SSP_COUNTER 0x0003C0C
+#define ACP_SW1_AUDIO1_TX_EN 0x0003C50
+#define ACP_SW1_AUDIO1_TX_EN_STATUS 0x0003C54
+#define ACP_SW1_AUDIO1_TX_FRAME_FORMAT 0x0003C58
+#define ACP_SW1_AUDIO1_TX_SAMPLEINTERVAL 0x0003C5C
+#define ACP_SW1_AUDIO1_TX_HCTRL 0x0003C60
+#define ACP_SW1_AUDIO1_TX_OFFSET 0x0003C64
+#define ACP_SW1_AUDIO1_TX_CHANNEL_ENABLE_DP0 0x0003C68
+#define ACP_SW1_AUDIO1_RX_EN 0x0003D28
+#define ACP_SW1_AUDIO1_RX_EN_STATUS 0x0003D2C
+#define ACP_SW1_AUDIO1_RX_FRAME_FORMAT 0x0003D30
+#define ACP_SW1_AUDIO1_RX_SAMPLEINTERVAL 0x0003D34
+#define ACP_SW1_AUDIO1_RX_HCTRL 0x0003D38
+#define ACP_SW1_AUDIO1_RX_OFFSET 0x0003D3C
+#define ACP_SW1_AUDIO1_RX_CHANNEL_ENABLE_DP0 0x0003D40
+#define ACP_SW1_BPT_PORT_EN 0x0003D60
+#define ACP_SW1_BPT_PORT_EN_STATUS 0x0003D64
+#define ACP_SW1_BPT_PORT_FRAME_FORMAT 0x0003D68
+#define ACP_SW1_BPT_PORT_SAMPLEINTERVAL 0x0003D6C
+#define ACP_SW1_BPT_PORT_HCTRL 0x0003D70
+#define ACP_SW1_BPT_PORT_OFFSET 0x0003D74
+#define ACP_SW1_BPT_PORT_CHANNEL_ENABLE 0x0003D78
+#define ACP_SW1_BPT_PORT_FIRST_BYTE_ADDR 0x0003D7C
+#define ACP_SW1_CLK_RESUME_CTRL 0x0003D80
+#define ACP_SW1_CLK_RESUME_DELAY_CNTR 0x0003D84
+#define ACP_SW1_BUS_RESET_CTRL 0x0003D88
+#define ACP_SW1_PRBS_ERR_STATUS 0x0003D8C
-/* Registers from ACP_P1_SW_ACLK block */
-#define P1_SW_CORB_BASE_ADDRESS 0x0003E00
-#define P1_SW_CORB_WRITE_POINTER 0x0003E04
-#define P1_SW_CORB_READ_POINTER 0x0003E08
-#define P1_SW_CORB_CONTROL 0x0003E0C
-#define P1_SW_CORB_SIZE 0x0003E14
-#define P1_SW_RIRB_BASE_ADDRESS 0x0003E18
-#define P1_SW_RIRB_WRITE_POINTER 0x0003E1C
-#define P1_SW_RIRB_RESPONSE_INTERRUPT_COUNT 0x0003E20
-#define P1_SW_RIRB_CONTROL 0x0003E24
-#define P1_SW_RIRB_SIZE 0x0003E28
-#define P1_SW_RIRB_FIFO_MIN_THDL 0x0003E2C
-#define P1_SW_IMM_CMD_UPPER_WORD 0x0003E30
-#define P1_SW_IMM_CMD_LOWER_QWORD 0x0003E34
-#define P1_SW_IMM_RESP_UPPER_WORD 0x0003E38
-#define P1_SW_IMM_RESP_LOWER_QWORD 0x0003E3C
-#define P1_SW_IMM_CMD_STS 0x0003E40
-#define P1_SW_BRA_BASE_ADDRESS 0x0003E44
-#define P1_SW_BRA_TRANSFER_SIZE 0x0003E48
-#define P1_SW_BRA_DMA_BUSY 0x0003E4C
-#define P1_SW_BRA_RESP 0x0003E50
-#define P1_SW_BRA_RESP_FRAME_ADDR 0x0003E54
-#define P1_SW_BRA_CURRENT_TRANSFER_SIZE 0x0003E58
-#define P1_SW_STATE_CHANGE_STATUS_0TO7 0x0003E5C
-#define P1_SW_STATE_CHANGE_STATUS_8TO11 0x0003E60
-#define P1_SW_STATE_CHANGE_STATUS_MASK_0TO7 0x0003E64
-#define P1_SW_STATE_CHANGE_STATUS_MASK_8TO11 0x0003E68
-#define P1_SW_CLK_FREQUENCY_CTRL 0x0003E6C
-#define P1_SW_ERROR_INTR_MASK 0x0003E70
-#define P1_SW_PHY_TEST_MODE_DATA_OFF 0x0003E74
+/* Registers from ACP_SW1_ACLK block */
+#define ACP_SW1_CORB_BASE_ADDRESS 0x0003E00
+#define ACP_SW1_CORB_WRITE_POINTER 0x0003E04
+#define ACP_SW1_CORB_READ_POINTER 0x0003E08
+#define ACP_SW1_CORB_CONTROL 0x0003E0C
+#define ACP_SW1_CORB_SIZE 0x0003E14
+#define ACP_SW1_RIRB_BASE_ADDRESS 0x0003E18
+#define ACP_SW1_RIRB_WRITE_POINTER 0x0003E1C
+#define ACP_SW1_RIRB_RESPONSE_INTERRUPT_COUNT 0x0003E20
+#define ACP_SW1_RIRB_CONTROL 0x0003E24
+#define ACP_SW1_RIRB_SIZE 0x0003E28
+#define ACP_SW1_RIRB_FIFO_MIN_THDL 0x0003E2C
+#define ACP_SW1_IMM_CMD_UPPER_WORD 0x0003E30
+#define ACP_SW1_IMM_CMD_LOWER_QWORD 0x0003E34
+#define ACP_SW1_IMM_RESP_UPPER_WORD 0x0003E38
+#define ACP_SW1_IMM_RESP_LOWER_QWORD 0x0003E3C
+#define ACP_SW1_IMM_CMD_STS 0x0003E40
+#define ACP_SW1_BRA_BASE_ADDRESS 0x0003E44
+#define ACP_SW1_BRA_TRANSFER_SIZE 0x0003E48
+#define ACP_SW1_BRA_DMA_BUSY 0x0003E4C
+#define ACP_SW1_BRA_RESP 0x0003E50
+#define ACP_SW1_BRA_RESP_FRAME_ADDR 0x0003E54
+#define ACP_SW1_BRA_CURRENT_TRANSFER_SIZE 0x0003E58
+#define ACP_SW1_STATECHANGE_STATUS_0TO7 0x0003E5C
+#define ACP_SW1_STATECHANGE_STATUS_8TO11 0x0003E60
+#define ACP_SW1_STATECHANGE_STATUS_MASK_0TO7 0x0003E64
+#define ACP_SW1_STATECHANGE_STATUS_MASK_8TO11 0x0003E68
+#define ACP_SW1_CLK_FREQUENCY_CTRL 0x0003E6C
+#define ACP_SW1_ERROR_INTR_MASK 0x0003E70
+#define ACP_SW1_PHY_TEST_MODE_DATA_OFF 0x0003E74
/* Registers from ACP_SCRATCH block */
-#define ACP_SCRATCH_REG_0 0x0010000
-#define ACP_SCRATCH_REG_1 0x0010004
-#define ACP_SCRATCH_REG_2 0x0010008
-#define ACP_SCRATCH_REG_3 0x001000C
-#define ACP_SCRATCH_REG_4 0x0010010
-#define ACP_SCRATCH_REG_5 0x0010014
-#define ACP_SCRATCH_REG_6 0x0010018
-#define ACP_SCRATCH_REG_7 0x001001C
-#define ACP_SCRATCH_REG_8 0x0010020
-#define ACP_SCRATCH_REG_9 0x0010024
-#define ACP_SCRATCH_REG_10 0x0010028
-#define ACP_SCRATCH_REG_11 0x001002C
-#define ACP_SCRATCH_REG_12 0x0010030
-#define ACP_SCRATCH_REG_13 0x0010034
-#define ACP_SCRATCH_REG_14 0x0010038
-#define ACP_SCRATCH_REG_15 0x001003C
-#define ACP_SCRATCH_REG_16 0x0010040
-#define ACP_SCRATCH_REG_17 0x0010044
-#define ACP_SCRATCH_REG_18 0x0010048
-#define ACP_SCRATCH_REG_19 0x001004C
-#define ACP_SCRATCH_REG_20 0x0010050
-#define ACP_SCRATCH_REG_21 0x0010054
-#define ACP_SCRATCH_REG_22 0x0010058
-#define ACP_SCRATCH_REG_23 0x001005C
-#define ACP_SCRATCH_REG_24 0x0010060
-#define ACP_SCRATCH_REG_25 0x0010064
-#define ACP_SCRATCH_REG_26 0x0010068
-#define ACP_SCRATCH_REG_27 0x001006C
-#define ACP_SCRATCH_REG_28 0x0010070
-#define ACP_SCRATCH_REG_29 0x0010074
-#define ACP_SCRATCH_REG_30 0x0010078
-#define ACP_SCRATCH_REG_31 0x001007C
-#define ACP_SCRATCH_REG_32 0x0010080
-#define ACP_SCRATCH_REG_33 0x0010084
-#define ACP_SCRATCH_REG_34 0x0010088
-#define ACP_SCRATCH_REG_35 0x001008C
-#define ACP_SCRATCH_REG_36 0x0010090
-#define ACP_SCRATCH_REG_37 0x0010094
-#define ACP_SCRATCH_REG_38 0x0010098
-#define ACP_SCRATCH_REG_39 0x001009C
-#define ACP_SCRATCH_REG_40 0x00100A0
-#define ACP_SCRATCH_REG_41 0x00100A4
-#define ACP_SCRATCH_REG_42 0x00100A8
-#define ACP_SCRATCH_REG_43 0x00100AC
-#define ACP_SCRATCH_REG_44 0x00100B0
-#define ACP_SCRATCH_REG_45 0x00100B4
-#define ACP_SCRATCH_REG_46 0x00100B8
-#define ACP_SCRATCH_REG_47 0x00100BC
-#define ACP_SCRATCH_REG_48 0x00100C0
-#define ACP_SCRATCH_REG_49 0x00100C4
-#define ACP_SCRATCH_REG_50 0x00100C8
-#define ACP_SCRATCH_REG_51 0x00100CC
-#define ACP_SCRATCH_REG_52 0x00100D0
-#define ACP_SCRATCH_REG_53 0x00100D4
-#define ACP_SCRATCH_REG_54 0x00100D8
-#define ACP_SCRATCH_REG_55 0x00100DC
-#define ACP_SCRATCH_REG_56 0x00100E0
-#define ACP_SCRATCH_REG_57 0x00100E4
-#define ACP_SCRATCH_REG_58 0x00100E8
-#define ACP_SCRATCH_REG_59 0x00100EC
-#define ACP_SCRATCH_REG_60 0x00100F0
-#define ACP_SCRATCH_REG_61 0x00100F4
-#define ACP_SCRATCH_REG_62 0x00100F8
-#define ACP_SCRATCH_REG_63 0x00100FC
-#define ACP_SCRATCH_REG_64 0x0010100
-#define ACP_SCRATCH_REG_65 0x0010104
-#define ACP_SCRATCH_REG_66 0x0010108
-#define ACP_SCRATCH_REG_67 0x001010C
-#define ACP_SCRATCH_REG_68 0x0010110
-#define ACP_SCRATCH_REG_69 0x0010114
-#define ACP_SCRATCH_REG_70 0x0010118
-#define ACP_SCRATCH_REG_71 0x001011C
-#define ACP_SCRATCH_REG_72 0x0010120
-#define ACP_SCRATCH_REG_73 0x0010124
-#define ACP_SCRATCH_REG_74 0x0010128
-#define ACP_SCRATCH_REG_75 0x001012C
-#define ACP_SCRATCH_REG_76 0x0010130
-#define ACP_SCRATCH_REG_77 0x0010134
-#define ACP_SCRATCH_REG_78 0x0010138
-#define ACP_SCRATCH_REG_79 0x001013C
-#define ACP_SCRATCH_REG_80 0x0010140
-#define ACP_SCRATCH_REG_81 0x0010144
-#define ACP_SCRATCH_REG_82 0x0010148
-#define ACP_SCRATCH_REG_83 0x001014C
-#define ACP_SCRATCH_REG_84 0x0010150
-#define ACP_SCRATCH_REG_85 0x0010154
-#define ACP_SCRATCH_REG_86 0x0010158
-#define ACP_SCRATCH_REG_87 0x001015C
-#define ACP_SCRATCH_REG_88 0x0010160
-#define ACP_SCRATCH_REG_89 0x0010164
-#define ACP_SCRATCH_REG_90 0x0010168
-#define ACP_SCRATCH_REG_91 0x001016C
-#define ACP_SCRATCH_REG_92 0x0010170
-#define ACP_SCRATCH_REG_93 0x0010174
-#define ACP_SCRATCH_REG_94 0x0010178
-#define ACP_SCRATCH_REG_95 0x001017C
-#define ACP_SCRATCH_REG_96 0x0010180
-#define ACP_SCRATCH_REG_97 0x0010184
-#define ACP_SCRATCH_REG_98 0x0010188
-#define ACP_SCRATCH_REG_99 0x001018C
-#define ACP_SCRATCH_REG_100 0x0010190
-#define ACP_SCRATCH_REG_101 0x0010194
-#define ACP_SCRATCH_REG_102 0x0010198
-#define ACP_SCRATCH_REG_103 0x001019C
-#define ACP_SCRATCH_REG_104 0x00101A0
-#define ACP_SCRATCH_REG_105 0x00101A4
-#define ACP_SCRATCH_REG_106 0x00101A8
-#define ACP_SCRATCH_REG_107 0x00101AC
-#define ACP_SCRATCH_REG_108 0x00101B0
-#define ACP_SCRATCH_REG_109 0x00101B4
-#define ACP_SCRATCH_REG_110 0x00101B8
-#define ACP_SCRATCH_REG_111 0x00101BC
-#define ACP_SCRATCH_REG_112 0x00101C0
-#define ACP_SCRATCH_REG_113 0x00101C4
-#define ACP_SCRATCH_REG_114 0x00101C8
-#define ACP_SCRATCH_REG_115 0x00101CC
-#define ACP_SCRATCH_REG_116 0x00101D0
-#define ACP_SCRATCH_REG_117 0x00101D4
-#define ACP_SCRATCH_REG_118 0x00101D8
-#define ACP_SCRATCH_REG_119 0x00101DC
-#define ACP_SCRATCH_REG_120 0x00101E0
-#define ACP_SCRATCH_REG_121 0x00101E4
-#define ACP_SCRATCH_REG_122 0x00101E8
-#define ACP_SCRATCH_REG_123 0x00101EC
-#define ACP_SCRATCH_REG_124 0x00101F0
-#define ACP_SCRATCH_REG_125 0x00101F4
-#define ACP_SCRATCH_REG_126 0x00101F8
-#define ACP_SCRATCH_REG_127 0x00101FC
-#define ACP_SCRATCH_REG_128 0x0010200
+#define ACP_SCRATCH_REG_0 0x0010000
+
#endif
diff --git a/include/sound/core.h b/include/sound/core.h
index 4365c35d038b..3edc4ab08774 100644
--- a/include/sound/core.h
+++ b/include/sound/core.h
@@ -286,10 +286,10 @@ int snd_devm_card_new(struct device *parent, int idx, const char *xid,
struct module *module, size_t extra_size,
struct snd_card **card_ret);
-int snd_card_disconnect(struct snd_card *card);
+void snd_card_disconnect(struct snd_card *card);
void snd_card_disconnect_sync(struct snd_card *card);
-int snd_card_free(struct snd_card *card);
-int snd_card_free_when_closed(struct snd_card *card);
+void snd_card_free(struct snd_card *card);
+void snd_card_free_when_closed(struct snd_card *card);
int snd_card_free_on_error(struct device *dev, int ret);
void snd_card_set_id(struct snd_card *card, const char *id);
int snd_card_register(struct snd_card *card);
diff --git a/include/sound/cs42l42.h b/include/sound/cs42l42.h
index 1d1c24fdd0ca..3994e933db19 100644
--- a/include/sound/cs42l42.h
+++ b/include/sound/cs42l42.h
@@ -34,6 +34,7 @@
#define CS42L42_PAGE_24 0x2400
#define CS42L42_PAGE_25 0x2500
#define CS42L42_PAGE_26 0x2600
+#define CS42L42_PAGE_27 0x2700
#define CS42L42_PAGE_28 0x2800
#define CS42L42_PAGE_29 0x2900
#define CS42L42_PAGE_2A 0x2A00
@@ -720,6 +721,10 @@
#define CS42L42_SRC_SDOUT_FS (CS42L42_PAGE_26 + 0x09)
+/* Page 0x27 DMA */
+#define CS42L42_SOFT_RESET_REBOOT (CS42L42_PAGE_27 + 0x01)
+#define CS42L42_SFT_RST_REBOOT_MASK BIT(1)
+
/* Page 0x28 S/PDIF Registers */
#define CS42L42_SPDIF_CTL1 (CS42L42_PAGE_28 + 0x01)
#define CS42L42_SPDIF_CTL2 (CS42L42_PAGE_28 + 0x02)
diff --git a/include/sound/hda_codec.h b/include/sound/hda_codec.h
index eba23daf2c29..bbb7805e85d8 100644
--- a/include/sound/hda_codec.h
+++ b/include/sound/hda_codec.h
@@ -259,6 +259,7 @@ struct hda_codec {
unsigned int relaxed_resume:1; /* don't resume forcibly for jack */
unsigned int forced_resume:1; /* forced resume for jack */
unsigned int no_stream_clean_at_suspend:1; /* do not clean streams at suspend */
+ unsigned int ctl_dev_id:1; /* old control element id build behaviour */
#ifdef CONFIG_PM
unsigned long power_on_acct;
diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h
index a6872537724d..97f09acae302 100644
--- a/include/sound/hdaudio.h
+++ b/include/sound/hdaudio.h
@@ -123,7 +123,7 @@ void snd_hdac_device_exit(struct hdac_device *dev);
int snd_hdac_device_register(struct hdac_device *codec);
void snd_hdac_device_unregister(struct hdac_device *codec);
int snd_hdac_device_set_chip_name(struct hdac_device *codec, const char *name);
-int snd_hdac_codec_modalias(struct hdac_device *hdac, char *buf, size_t size);
+int snd_hdac_codec_modalias(const struct hdac_device *hdac, char *buf, size_t size);
int snd_hdac_refresh_widgets(struct hdac_device *codec);
@@ -575,7 +575,7 @@ void snd_hdac_stream_cleanup(struct hdac_stream *azx_dev);
int snd_hdac_stream_setup_periods(struct hdac_stream *azx_dev);
int snd_hdac_stream_set_params(struct hdac_stream *azx_dev,
unsigned int format_val);
-void snd_hdac_stream_start(struct hdac_stream *azx_dev, bool fresh_start);
+void snd_hdac_stream_start(struct hdac_stream *azx_dev);
void snd_hdac_stream_stop(struct hdac_stream *azx_dev);
void snd_hdac_stop_streams(struct hdac_bus *bus);
void snd_hdac_stop_streams_and_chip(struct hdac_bus *bus);
diff --git a/include/sound/simple_card_utils.h b/include/sound/simple_card_utils.h
index 38590f1ae9ee..a3f3f3aa9e6e 100644
--- a/include/sound/simple_card_utils.h
+++ b/include/sound/simple_card_utils.h
@@ -69,6 +69,7 @@ struct asoc_simple_priv {
} *dai_props;
struct asoc_simple_jack hp_jack;
struct asoc_simple_jack mic_jack;
+ struct snd_soc_jack *aux_jacks;
struct snd_soc_dai_link *dai_link;
struct asoc_simple_dai *dais;
struct snd_soc_dai_link_component *dlcs;
@@ -187,6 +188,8 @@ int asoc_simple_parse_pin_switches(struct snd_soc_card *card,
int asoc_simple_init_jack(struct snd_soc_card *card,
struct asoc_simple_jack *sjack,
int is_hp, char *prefix, char *pin);
+int asoc_simple_init_aux_jacks(struct asoc_simple_priv *priv,
+ char *prefix);
int asoc_simple_init_priv(struct asoc_simple_priv *priv,
struct link_info *li);
int asoc_simple_remove(struct platform_device *pdev);
diff --git a/include/sound/soc-component.h b/include/sound/soc-component.h
index c26ffb033777..3203d35bc8c1 100644
--- a/include/sound/soc-component.h
+++ b/include/sound/soc-component.h
@@ -98,6 +98,7 @@ struct snd_soc_component_driver {
int source, unsigned int freq_in, unsigned int freq_out);
int (*set_jack)(struct snd_soc_component *component,
struct snd_soc_jack *jack, void *data);
+ int (*get_jack_type)(struct snd_soc_component *component);
/* DT */
int (*of_xlate_dai_name)(struct snd_soc_component *component,
@@ -384,6 +385,7 @@ int snd_soc_component_set_pll(struct snd_soc_component *component, int pll_id,
unsigned int freq_out);
int snd_soc_component_set_jack(struct snd_soc_component *component,
struct snd_soc_jack *jack, void *data);
+int snd_soc_component_get_jack_type(struct snd_soc_component *component);
void snd_soc_component_seq_notifier(struct snd_soc_component *component,
enum snd_soc_dapm_type type, int subseq);
diff --git a/include/sound/soc-dai.h b/include/sound/soc-dai.h
index ea7509672086..e3906ecda740 100644
--- a/include/sound/soc-dai.h
+++ b/include/sound/soc-dai.h
@@ -423,6 +423,16 @@ struct snd_soc_dai_driver {
int remove_order;
};
+/* for Playback/Capture */
+struct snd_soc_dai_stream {
+ struct snd_soc_dapm_widget *widget;
+
+ unsigned int active; /* usage count */
+ unsigned int tdm_mask; /* CODEC TDM slot masks and params (for fixup) */
+
+ void *dma_data; /* DAI DMA data */
+};
+
/*
* Digital Audio Interface runtime data.
*
@@ -437,14 +447,7 @@ struct snd_soc_dai {
struct snd_soc_dai_driver *driver;
/* DAI runtime info */
- unsigned int stream_active[SNDRV_PCM_STREAM_LAST + 1]; /* usage count */
-
- struct snd_soc_dapm_widget *playback_widget;
- struct snd_soc_dapm_widget *capture_widget;
-
- /* DAI DMA data */
- void *playback_dma_data;
- void *capture_dma_data;
+ struct snd_soc_dai_stream stream[SNDRV_PCM_STREAM_LAST + 1];
/* Symmetry data - only valid if symmetry is being enforced */
unsigned int rate;
@@ -454,10 +457,6 @@ struct snd_soc_dai {
/* parent platform/codec */
struct snd_soc_component *component;
- /* CODEC TDM slot masks and params (for fixup) */
- unsigned int tx_mask;
- unsigned int rx_mask;
-
struct list_head list;
/* function mark */
@@ -477,36 +476,59 @@ snd_soc_dai_get_pcm_stream(const struct snd_soc_dai *dai, int stream)
&dai->driver->playback : &dai->driver->capture;
}
+#define snd_soc_dai_get_widget_playback(dai) snd_soc_dai_get_widget(dai, SNDRV_PCM_STREAM_PLAYBACK)
+#define snd_soc_dai_get_widget_capture(dai) snd_soc_dai_get_widget(dai, SNDRV_PCM_STREAM_CAPTURE)
static inline
-struct snd_soc_dapm_widget *snd_soc_dai_get_widget(
- struct snd_soc_dai *dai, int stream)
+struct snd_soc_dapm_widget *snd_soc_dai_get_widget(struct snd_soc_dai *dai, int stream)
{
- return (stream == SNDRV_PCM_STREAM_PLAYBACK) ?
- dai->playback_widget : dai->capture_widget;
+ return dai->stream[stream].widget;
}
-static inline void *snd_soc_dai_get_dma_data(const struct snd_soc_dai *dai,
- const struct snd_pcm_substream *ss)
+#define snd_soc_dai_set_widget_playback(dai, widget) snd_soc_dai_set_widget(dai, SNDRV_PCM_STREAM_PLAYBACK, widget)
+#define snd_soc_dai_set_widget_capture(dai, widget) snd_soc_dai_set_widget(dai, SNDRV_PCM_STREAM_CAPTURE, widget)
+static inline
+void snd_soc_dai_set_widget(struct snd_soc_dai *dai, int stream, struct snd_soc_dapm_widget *widget)
{
- return (ss->stream == SNDRV_PCM_STREAM_PLAYBACK) ?
- dai->playback_dma_data : dai->capture_dma_data;
+ dai->stream[stream].widget = widget;
}
-static inline void snd_soc_dai_set_dma_data(struct snd_soc_dai *dai,
- const struct snd_pcm_substream *ss,
- void *data)
+#define snd_soc_dai_dma_data_get_playback(dai) snd_soc_dai_dma_data_get(dai, SNDRV_PCM_STREAM_PLAYBACK)
+#define snd_soc_dai_dma_data_get_capture(dai) snd_soc_dai_dma_data_get(dai, SNDRV_PCM_STREAM_CAPTURE)
+#define snd_soc_dai_get_dma_data(dai, ss) snd_soc_dai_dma_data_get(dai, ss->stream)
+static inline void *snd_soc_dai_dma_data_get(const struct snd_soc_dai *dai, int stream)
{
- if (ss->stream == SNDRV_PCM_STREAM_PLAYBACK)
- dai->playback_dma_data = data;
- else
- dai->capture_dma_data = data;
+ return dai->stream[stream].dma_data;
+}
+
+#define snd_soc_dai_dma_data_set_playback(dai, data) snd_soc_dai_dma_data_set(dai, SNDRV_PCM_STREAM_PLAYBACK, data)
+#define snd_soc_dai_dma_data_set_capture(dai, data) snd_soc_dai_dma_data_set(dai, SNDRV_PCM_STREAM_CAPTURE, data)
+#define snd_soc_dai_set_dma_data(dai, ss, data) snd_soc_dai_dma_data_set(dai, ss->stream, data)
+static inline void snd_soc_dai_dma_data_set(struct snd_soc_dai *dai, int stream, void *data)
+{
+ dai->stream[stream].dma_data = data;
}
-static inline void snd_soc_dai_init_dma_data(struct snd_soc_dai *dai,
- void *playback, void *capture)
+static inline void snd_soc_dai_init_dma_data(struct snd_soc_dai *dai, void *playback, void *capture)
{
- dai->playback_dma_data = playback;
- dai->capture_dma_data = capture;
+ snd_soc_dai_dma_data_set_playback(dai, playback);
+ snd_soc_dai_dma_data_set_capture(dai, capture);
+}
+
+static inline unsigned int snd_soc_dai_tdm_mask_get(struct snd_soc_dai *dai, int stream)
+{
+ return dai->stream[stream].tdm_mask;
+}
+
+static inline void snd_soc_dai_tdm_mask_set(struct snd_soc_dai *dai, int stream,
+ unsigned int tdm_mask)
+{
+ dai->stream[stream].tdm_mask = tdm_mask;
+}
+
+static inline unsigned int snd_soc_dai_stream_active(struct snd_soc_dai *dai, int stream)
+{
+ /* see snd_soc_dai_action() for setup */
+ return dai->stream[stream].active;
}
static inline void snd_soc_dai_set_drvdata(struct snd_soc_dai *dai,
@@ -561,10 +583,4 @@ static inline void *snd_soc_dai_get_stream(struct snd_soc_dai *dai,
return ERR_PTR(-ENOTSUPP);
}
-static inline unsigned int
-snd_soc_dai_stream_active(struct snd_soc_dai *dai, int stream)
-{
- return dai->stream_active[stream];
-}
-
#endif
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index 77495e5988c1..64915ebd641e 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -16,6 +16,7 @@
#include <sound/asoc.h>
struct device;
+struct snd_pcm_substream;
struct snd_soc_pcm_runtime;
struct soc_enum;
diff --git a/include/sound/soc-dpcm.h b/include/sound/soc-dpcm.h
index 2864aed72998..1e7d09556fe3 100644
--- a/include/sound/soc-dpcm.h
+++ b/include/sound/soc-dpcm.h
@@ -162,6 +162,8 @@ int dpcm_be_dai_prepare(struct snd_soc_pcm_runtime *fe, int stream);
int dpcm_dapm_stream_event(struct snd_soc_pcm_runtime *fe, int dir,
int event);
bool dpcm_end_walk_at_be(struct snd_soc_dapm_widget *widget, enum snd_soc_dapm_direction dir);
+int widget_in_list(struct snd_soc_dapm_widget_list *list,
+ struct snd_soc_dapm_widget *widget);
#define dpcm_be_dai_startup_rollback(fe, stream, last) \
dpcm_be_dai_stop(fe, stream, 0, last)
diff --git a/include/sound/soc-topology.h b/include/sound/soc-topology.h
index b4b896f83b94..f055c6917f6c 100644
--- a/include/sound/soc-topology.h
+++ b/include/sound/soc-topology.h
@@ -62,7 +62,7 @@ struct snd_soc_dobj {
enum snd_soc_dobj_type type;
unsigned int index; /* objects can belong in different groups */
struct list_head list;
- struct snd_soc_tplg_ops *ops;
+ int (*unload)(struct snd_soc_component *comp, struct snd_soc_dobj *dobj);
union {
struct snd_soc_dobj_control control;
struct snd_soc_dobj_widget widget;
diff --git a/include/sound/soc.h b/include/sound/soc.h
index 37bbfc8b45cb..e58b43b5da7c 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -1052,6 +1052,12 @@ struct snd_soc_card {
#define for_each_card_widgets_safe(card, w, _w) \
list_for_each_entry_safe(w, _w, &card->widgets, list)
+
+static inline int snd_soc_card_is_instantiated(struct snd_soc_card *card)
+{
+ return card && card->instantiated;
+}
+
/* SoC machine DAI configuration, glues a codec and cpu DAI together */
struct snd_soc_pcm_runtime {
struct device *dev;
diff --git a/include/sound/sof/ipc4/header.h b/include/sound/sof/ipc4/header.h
index 622193be7ac4..d31349bf011d 100644
--- a/include/sound/sof/ipc4/header.h
+++ b/include/sound/sof/ipc4/header.h
@@ -185,6 +185,9 @@ enum sof_ipc4_pipeline_state {
#define SOF_IPC4_GLB_PIPE_STATE_MASK GENMASK(15, 0)
#define SOF_IPC4_GLB_PIPE_STATE(x) ((x) << SOF_IPC4_GLB_PIPE_STATE_SHIFT)
+/* pipeline set state IPC msg extension */
+#define SOF_IPC4_GLB_PIPE_STATE_EXT_MULTI BIT(0)
+
/* load library ipc msg */
#define SOF_IPC4_GLB_LOAD_LIBRARY_LIB_ID_SHIFT 16
#define SOF_IPC4_GLB_LOAD_LIBRARY_LIB_ID(x) ((x) << SOF_IPC4_GLB_LOAD_LIBRARY_LIB_ID_SHIFT)
diff --git a/include/trace/bpf_probe.h b/include/trace/bpf_probe.h
index 155c495b89ea..1f7fc1fc590c 100644
--- a/include/trace/bpf_probe.h
+++ b/include/trace/bpf_probe.h
@@ -4,50 +4,7 @@
#ifdef CONFIG_BPF_EVENTS
-#undef __entry
-#define __entry entry
-
-#undef __get_dynamic_array
-#define __get_dynamic_array(field) \
- ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
-
-#undef __get_dynamic_array_len
-#define __get_dynamic_array_len(field) \
- ((__entry->__data_loc_##field >> 16) & 0xffff)
-
-#undef __get_str
-#define __get_str(field) ((char *)__get_dynamic_array(field))
-
-#undef __get_bitmask
-#define __get_bitmask(field) (char *)__get_dynamic_array(field)
-
-#undef __get_cpumask
-#define __get_cpumask(field) (char *)__get_dynamic_array(field)
-
-#undef __get_sockaddr
-#define __get_sockaddr(field) ((struct sockaddr *)__get_dynamic_array(field))
-
-#undef __get_rel_dynamic_array
-#define __get_rel_dynamic_array(field) \
- ((void *)(&__entry->__rel_loc_##field) + \
- sizeof(__entry->__rel_loc_##field) + \
- (__entry->__rel_loc_##field & 0xffff))
-
-#undef __get_rel_dynamic_array_len
-#define __get_rel_dynamic_array_len(field) \
- ((__entry->__rel_loc_##field >> 16) & 0xffff)
-
-#undef __get_rel_str
-#define __get_rel_str(field) ((char *)__get_rel_dynamic_array(field))
-
-#undef __get_rel_bitmask
-#define __get_rel_bitmask(field) (char *)__get_rel_dynamic_array(field)
-
-#undef __get_rel_cpumask
-#define __get_rel_cpumask(field) (char *)__get_rel_dynamic_array(field)
-
-#undef __get_rel_sockaddr
-#define __get_rel_sockaddr(field) ((struct sockaddr *)__get_rel_dynamic_array(field))
+#include "stages/stage6_event_callback.h"
#undef __perf_count
#define __perf_count(c) (c)
diff --git a/include/trace/events/bridge.h b/include/trace/events/bridge.h
index 6b200059c2c5..a6b3a4e409f0 100644
--- a/include/trace/events/bridge.h
+++ b/include/trace/events/bridge.h
@@ -122,6 +122,64 @@ TRACE_EVENT(br_fdb_update,
__entry->flags)
);
+TRACE_EVENT(br_mdb_full,
+
+ TP_PROTO(const struct net_device *dev,
+ const struct br_ip *group),
+
+ TP_ARGS(dev, group),
+
+ TP_STRUCT__entry(
+ __string(dev, dev->name)
+ __field(int, af)
+ __field(u16, vid)
+ __array(__u8, src, 16)
+ __array(__u8, grp, 16)
+ __array(__u8, grpmac, ETH_ALEN) /* For af == 0. */
+ ),
+
+ TP_fast_assign(
+ struct in6_addr *in6;
+
+ __assign_str(dev, dev->name);
+ __entry->vid = group->vid;
+
+ if (!group->proto) {
+ __entry->af = 0;
+
+ memset(__entry->src, 0, sizeof(__entry->src));
+ memset(__entry->grp, 0, sizeof(__entry->grp));
+ memcpy(__entry->grpmac, group->dst.mac_addr, ETH_ALEN);
+ } else if (group->proto == htons(ETH_P_IP)) {
+ __entry->af = AF_INET;
+
+ in6 = (struct in6_addr *)__entry->src;
+ ipv6_addr_set_v4mapped(group->src.ip4, in6);
+
+ in6 = (struct in6_addr *)__entry->grp;
+ ipv6_addr_set_v4mapped(group->dst.ip4, in6);
+
+ memset(__entry->grpmac, 0, ETH_ALEN);
+
+#if IS_ENABLED(CONFIG_IPV6)
+ } else {
+ __entry->af = AF_INET6;
+
+ in6 = (struct in6_addr *)__entry->src;
+ *in6 = group->src.ip6;
+
+ in6 = (struct in6_addr *)__entry->grp;
+ *in6 = group->dst.ip6;
+
+ memset(__entry->grpmac, 0, ETH_ALEN);
+#endif
+ }
+ ),
+
+ TP_printk("dev %s af %u src %pI6c grp %pI6c/%pM vid %u",
+ __get_str(dev), __entry->af, __entry->src, __entry->grp,
+ __entry->grpmac, __entry->vid)
+);
#endif /* _TRACE_BRIDGE_H */
diff --git a/include/trace/events/cma.h b/include/trace/events/cma.h
index 3d708dae1542..ef75ea606ab2 100644
--- a/include/trace/events/cma.h
+++ b/include/trace/events/cma.h
@@ -91,12 +91,38 @@ TRACE_EVENT(cma_alloc_start,
__entry->align)
);
-DEFINE_EVENT(cma_alloc_class, cma_alloc_finish,
+TRACE_EVENT(cma_alloc_finish,
TP_PROTO(const char *name, unsigned long pfn, const struct page *page,
- unsigned long count, unsigned int align),
+ unsigned long count, unsigned int align, int errorno),
- TP_ARGS(name, pfn, page, count, align)
+ TP_ARGS(name, pfn, page, count, align, errorno),
+
+ TP_STRUCT__entry(
+ __string(name, name)
+ __field(unsigned long, pfn)
+ __field(const struct page *, page)
+ __field(unsigned long, count)
+ __field(unsigned int, align)
+ __field(int, errorno)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, name);
+ __entry->pfn = pfn;
+ __entry->page = page;
+ __entry->count = count;
+ __entry->align = align;
+ __entry->errorno = errorno;
+ ),
+
+ TP_printk("name=%s pfn=0x%lx page=%p count=%lu align=%u errorno=%d",
+ __get_str(name),
+ __entry->pfn,
+ __entry->page,
+ __entry->count,
+ __entry->align,
+ __entry->errorno)
);
DEFINE_EVENT(cma_alloc_class, cma_alloc_busy_retry,
diff --git a/include/trace/events/cxl.h b/include/trace/events/cxl.h
deleted file mode 100644
index ad085a2534ef..000000000000
--- a/include/trace/events/cxl.h
+++ /dev/null
@@ -1,112 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM cxl
-
-#if !defined(_CXL_EVENTS_H) || defined(TRACE_HEADER_MULTI_READ)
-#define _CXL_EVENTS_H
-
-#include <linux/tracepoint.h>
-
-#define CXL_HEADERLOG_SIZE SZ_512
-#define CXL_HEADERLOG_SIZE_U32 SZ_512 / sizeof(u32)
-
-#define CXL_RAS_UC_CACHE_DATA_PARITY BIT(0)
-#define CXL_RAS_UC_CACHE_ADDR_PARITY BIT(1)
-#define CXL_RAS_UC_CACHE_BE_PARITY BIT(2)
-#define CXL_RAS_UC_CACHE_DATA_ECC BIT(3)
-#define CXL_RAS_UC_MEM_DATA_PARITY BIT(4)
-#define CXL_RAS_UC_MEM_ADDR_PARITY BIT(5)
-#define CXL_RAS_UC_MEM_BE_PARITY BIT(6)
-#define CXL_RAS_UC_MEM_DATA_ECC BIT(7)
-#define CXL_RAS_UC_REINIT_THRESH BIT(8)
-#define CXL_RAS_UC_RSVD_ENCODE BIT(9)
-#define CXL_RAS_UC_POISON BIT(10)
-#define CXL_RAS_UC_RECV_OVERFLOW BIT(11)
-#define CXL_RAS_UC_INTERNAL_ERR BIT(14)
-#define CXL_RAS_UC_IDE_TX_ERR BIT(15)
-#define CXL_RAS_UC_IDE_RX_ERR BIT(16)
-
-#define show_uc_errs(status) __print_flags(status, " | ", \
- { CXL_RAS_UC_CACHE_DATA_PARITY, "Cache Data Parity Error" }, \
- { CXL_RAS_UC_CACHE_ADDR_PARITY, "Cache Address Parity Error" }, \
- { CXL_RAS_UC_CACHE_BE_PARITY, "Cache Byte Enable Parity Error" }, \
- { CXL_RAS_UC_CACHE_DATA_ECC, "Cache Data ECC Error" }, \
- { CXL_RAS_UC_MEM_DATA_PARITY, "Memory Data Parity Error" }, \
- { CXL_RAS_UC_MEM_ADDR_PARITY, "Memory Address Parity Error" }, \
- { CXL_RAS_UC_MEM_BE_PARITY, "Memory Byte Enable Parity Error" }, \
- { CXL_RAS_UC_MEM_DATA_ECC, "Memory Data ECC Error" }, \
- { CXL_RAS_UC_REINIT_THRESH, "REINIT Threshold Hit" }, \
- { CXL_RAS_UC_RSVD_ENCODE, "Received Unrecognized Encoding" }, \
- { CXL_RAS_UC_POISON, "Received Poison From Peer" }, \
- { CXL_RAS_UC_RECV_OVERFLOW, "Receiver Overflow" }, \
- { CXL_RAS_UC_INTERNAL_ERR, "Component Specific Error" }, \
- { CXL_RAS_UC_IDE_TX_ERR, "IDE Tx Error" }, \
- { CXL_RAS_UC_IDE_RX_ERR, "IDE Rx Error" } \
-)
-
-TRACE_EVENT(cxl_aer_uncorrectable_error,
- TP_PROTO(const struct device *dev, u32 status, u32 fe, u32 *hl),
- TP_ARGS(dev, status, fe, hl),
- TP_STRUCT__entry(
- __string(dev_name, dev_name(dev))
- __field(u32, status)
- __field(u32, first_error)
- __array(u32, header_log, CXL_HEADERLOG_SIZE_U32)
- ),
- TP_fast_assign(
- __assign_str(dev_name, dev_name(dev));
- __entry->status = status;
- __entry->first_error = fe;
- /*
- * Embed the 512B headerlog data for user app retrieval and
- * parsing, but no need to print this in the trace buffer.
- */
- memcpy(__entry->header_log, hl, CXL_HEADERLOG_SIZE);
- ),
- TP_printk("%s: status: '%s' first_error: '%s'",
- __get_str(dev_name),
- show_uc_errs(__entry->status),
- show_uc_errs(__entry->first_error)
- )
-);
-
-#define CXL_RAS_CE_CACHE_DATA_ECC BIT(0)
-#define CXL_RAS_CE_MEM_DATA_ECC BIT(1)
-#define CXL_RAS_CE_CRC_THRESH BIT(2)
-#define CLX_RAS_CE_RETRY_THRESH BIT(3)
-#define CXL_RAS_CE_CACHE_POISON BIT(4)
-#define CXL_RAS_CE_MEM_POISON BIT(5)
-#define CXL_RAS_CE_PHYS_LAYER_ERR BIT(6)
-
-#define show_ce_errs(status) __print_flags(status, " | ", \
- { CXL_RAS_CE_CACHE_DATA_ECC, "Cache Data ECC Error" }, \
- { CXL_RAS_CE_MEM_DATA_ECC, "Memory Data ECC Error" }, \
- { CXL_RAS_CE_CRC_THRESH, "CRC Threshold Hit" }, \
- { CLX_RAS_CE_RETRY_THRESH, "Retry Threshold" }, \
- { CXL_RAS_CE_CACHE_POISON, "Received Cache Poison From Peer" }, \
- { CXL_RAS_CE_MEM_POISON, "Received Memory Poison From Peer" }, \
- { CXL_RAS_CE_PHYS_LAYER_ERR, "Received Error From Physical Layer" } \
-)
-
-TRACE_EVENT(cxl_aer_correctable_error,
- TP_PROTO(const struct device *dev, u32 status),
- TP_ARGS(dev, status),
- TP_STRUCT__entry(
- __string(dev_name, dev_name(dev))
- __field(u32, status)
- ),
- TP_fast_assign(
- __assign_str(dev_name, dev_name(dev));
- __entry->status = status;
- ),
- TP_printk("%s: status: '%s'",
- __get_str(dev_name), show_ce_errs(__entry->status)
- )
-);
-
-#endif /* _CXL_EVENTS_H */
-
-/* This part must be outside protection */
-#undef TRACE_INCLUDE_FILE
-#define TRACE_INCLUDE_FILE cxl
-#include <trace/define_trace.h>
diff --git a/include/trace/events/devlink.h b/include/trace/events/devlink.h
index 24969184c534..77ff7cfc6049 100644
--- a/include/trace/events/devlink.h
+++ b/include/trace/events/devlink.h
@@ -88,7 +88,7 @@ TRACE_EVENT(devlink_health_report,
__string(bus_name, devlink_to_dev(devlink)->bus->name)
__string(dev_name, dev_name(devlink_to_dev(devlink)))
__string(driver_name, devlink_to_dev(devlink)->driver->name)
- __string(reporter_name, msg)
+ __string(reporter_name, reporter_name)
__string(msg, msg)
),
diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
index 31d994e6b4ca..1322d34a5dfc 100644
--- a/include/trace/events/f2fs.h
+++ b/include/trace/events/f2fs.h
@@ -569,10 +569,10 @@ TRACE_EVENT(f2fs_file_write_iter,
);
TRACE_EVENT(f2fs_map_blocks,
- TP_PROTO(struct inode *inode, struct f2fs_map_blocks *map,
- int create, int flag, int ret),
+ TP_PROTO(struct inode *inode, struct f2fs_map_blocks *map, int flag,
+ int ret),
- TP_ARGS(inode, map, create, flag, ret),
+ TP_ARGS(inode, map, flag, ret),
TP_STRUCT__entry(
__field(dev_t, dev)
@@ -584,7 +584,6 @@ TRACE_EVENT(f2fs_map_blocks,
__field(int, m_seg_type)
__field(bool, m_may_create)
__field(bool, m_multidev_dio)
- __field(int, create)
__field(int, flag)
__field(int, ret)
),
@@ -599,7 +598,6 @@ TRACE_EVENT(f2fs_map_blocks,
__entry->m_seg_type = map->m_seg_type;
__entry->m_may_create = map->m_may_create;
__entry->m_multidev_dio = map->m_multidev_dio;
- __entry->create = create;
__entry->flag = flag;
__entry->ret = ret;
),
@@ -607,7 +605,7 @@ TRACE_EVENT(f2fs_map_blocks,
TP_printk("dev = (%d,%d), ino = %lu, file offset = %llu, "
"start blkaddr = 0x%llx, len = 0x%llx, flags = %u, "
"seg_type = %d, may_create = %d, multidevice = %d, "
- "create = %d, flag = %d, err = %d",
+ "flag = %d, err = %d",
show_dev_ino(__entry),
(unsigned long long)__entry->m_lblk,
(unsigned long long)__entry->m_pblk,
@@ -616,7 +614,6 @@ TRACE_EVENT(f2fs_map_blocks,
__entry->m_seg_type,
__entry->m_may_create,
__entry->m_multidev_dio,
- __entry->create,
__entry->flag,
__entry->ret)
);
@@ -1293,6 +1290,43 @@ DEFINE_EVENT(f2fs__page, f2fs_vm_page_mkwrite,
TP_ARGS(page, type)
);
+TRACE_EVENT(f2fs_replace_atomic_write_block,
+
+ TP_PROTO(struct inode *inode, struct inode *cow_inode, pgoff_t index,
+ block_t old_addr, block_t new_addr, bool recovery),
+
+ TP_ARGS(inode, cow_inode, index, old_addr, new_addr, recovery),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(ino_t, cow_ino)
+ __field(pgoff_t, index)
+ __field(block_t, old_addr)
+ __field(block_t, new_addr)
+ __field(bool, recovery)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->cow_ino = cow_inode->i_ino;
+ __entry->index = index;
+ __entry->old_addr = old_addr;
+ __entry->new_addr = new_addr;
+ __entry->recovery = recovery;
+ ),
+
+ TP_printk("dev = (%d,%d), ino = %lu, cow_ino = %lu, index = %lu, "
+ "old_addr = 0x%llx, new_addr = 0x%llx, recovery = %d",
+ show_dev_ino(__entry),
+ __entry->cow_ino,
+ (unsigned long)__entry->index,
+ (unsigned long long)__entry->old_addr,
+ (unsigned long long)__entry->new_addr,
+ __entry->recovery)
+);
+
TRACE_EVENT(f2fs_filemap_fault,
TP_PROTO(struct inode *inode, pgoff_t index, unsigned long ret),
@@ -1975,7 +2009,7 @@ TRACE_EVENT(f2fs_iostat,
__entry->fs_cdrio = iostat[FS_CDATA_READ_IO];
__entry->fs_nrio = iostat[FS_NODE_READ_IO];
__entry->fs_mrio = iostat[FS_META_READ_IO];
- __entry->fs_discard = iostat[FS_DISCARD];
+ __entry->fs_discard = iostat[FS_DISCARD_IO];
),
TP_printk("dev = (%d,%d), "
@@ -2048,33 +2082,33 @@ TRACE_EVENT(f2fs_iostat_latency,
TP_fast_assign(
__entry->dev = sbi->sb->s_dev;
- __entry->d_rd_peak = iostat_lat[0][DATA].peak_lat;
- __entry->d_rd_avg = iostat_lat[0][DATA].avg_lat;
- __entry->d_rd_cnt = iostat_lat[0][DATA].cnt;
- __entry->n_rd_peak = iostat_lat[0][NODE].peak_lat;
- __entry->n_rd_avg = iostat_lat[0][NODE].avg_lat;
- __entry->n_rd_cnt = iostat_lat[0][NODE].cnt;
- __entry->m_rd_peak = iostat_lat[0][META].peak_lat;
- __entry->m_rd_avg = iostat_lat[0][META].avg_lat;
- __entry->m_rd_cnt = iostat_lat[0][META].cnt;
- __entry->d_wr_s_peak = iostat_lat[1][DATA].peak_lat;
- __entry->d_wr_s_avg = iostat_lat[1][DATA].avg_lat;
- __entry->d_wr_s_cnt = iostat_lat[1][DATA].cnt;
- __entry->n_wr_s_peak = iostat_lat[1][NODE].peak_lat;
- __entry->n_wr_s_avg = iostat_lat[1][NODE].avg_lat;
- __entry->n_wr_s_cnt = iostat_lat[1][NODE].cnt;
- __entry->m_wr_s_peak = iostat_lat[1][META].peak_lat;
- __entry->m_wr_s_avg = iostat_lat[1][META].avg_lat;
- __entry->m_wr_s_cnt = iostat_lat[1][META].cnt;
- __entry->d_wr_as_peak = iostat_lat[2][DATA].peak_lat;
- __entry->d_wr_as_avg = iostat_lat[2][DATA].avg_lat;
- __entry->d_wr_as_cnt = iostat_lat[2][DATA].cnt;
- __entry->n_wr_as_peak = iostat_lat[2][NODE].peak_lat;
- __entry->n_wr_as_avg = iostat_lat[2][NODE].avg_lat;
- __entry->n_wr_as_cnt = iostat_lat[2][NODE].cnt;
- __entry->m_wr_as_peak = iostat_lat[2][META].peak_lat;
- __entry->m_wr_as_avg = iostat_lat[2][META].avg_lat;
- __entry->m_wr_as_cnt = iostat_lat[2][META].cnt;
+ __entry->d_rd_peak = iostat_lat[READ_IO][DATA].peak_lat;
+ __entry->d_rd_avg = iostat_lat[READ_IO][DATA].avg_lat;
+ __entry->d_rd_cnt = iostat_lat[READ_IO][DATA].cnt;
+ __entry->n_rd_peak = iostat_lat[READ_IO][NODE].peak_lat;
+ __entry->n_rd_avg = iostat_lat[READ_IO][NODE].avg_lat;
+ __entry->n_rd_cnt = iostat_lat[READ_IO][NODE].cnt;
+ __entry->m_rd_peak = iostat_lat[READ_IO][META].peak_lat;
+ __entry->m_rd_avg = iostat_lat[READ_IO][META].avg_lat;
+ __entry->m_rd_cnt = iostat_lat[READ_IO][META].cnt;
+ __entry->d_wr_s_peak = iostat_lat[WRITE_SYNC_IO][DATA].peak_lat;
+ __entry->d_wr_s_avg = iostat_lat[WRITE_SYNC_IO][DATA].avg_lat;
+ __entry->d_wr_s_cnt = iostat_lat[WRITE_SYNC_IO][DATA].cnt;
+ __entry->n_wr_s_peak = iostat_lat[WRITE_SYNC_IO][NODE].peak_lat;
+ __entry->n_wr_s_avg = iostat_lat[WRITE_SYNC_IO][NODE].avg_lat;
+ __entry->n_wr_s_cnt = iostat_lat[WRITE_SYNC_IO][NODE].cnt;
+ __entry->m_wr_s_peak = iostat_lat[WRITE_SYNC_IO][META].peak_lat;
+ __entry->m_wr_s_avg = iostat_lat[WRITE_SYNC_IO][META].avg_lat;
+ __entry->m_wr_s_cnt = iostat_lat[WRITE_SYNC_IO][META].cnt;
+ __entry->d_wr_as_peak = iostat_lat[WRITE_ASYNC_IO][DATA].peak_lat;
+ __entry->d_wr_as_avg = iostat_lat[WRITE_ASYNC_IO][DATA].avg_lat;
+ __entry->d_wr_as_cnt = iostat_lat[WRITE_ASYNC_IO][DATA].cnt;
+ __entry->n_wr_as_peak = iostat_lat[WRITE_ASYNC_IO][NODE].peak_lat;
+ __entry->n_wr_as_avg = iostat_lat[WRITE_ASYNC_IO][NODE].avg_lat;
+ __entry->n_wr_as_cnt = iostat_lat[WRITE_ASYNC_IO][NODE].cnt;
+ __entry->m_wr_as_peak = iostat_lat[WRITE_ASYNC_IO][META].peak_lat;
+ __entry->m_wr_as_avg = iostat_lat[WRITE_ASYNC_IO][META].avg_lat;
+ __entry->m_wr_as_cnt = iostat_lat[WRITE_ASYNC_IO][META].cnt;
),
TP_printk("dev = (%d,%d), "
diff --git a/include/trace/events/habanalabs.h b/include/trace/events/habanalabs.h
index f05c5fa668a2..951643e6a7a9 100644
--- a/include/trace/events/habanalabs.h
+++ b/include/trace/events/habanalabs.h
@@ -87,6 +87,81 @@ DEFINE_EVENT(habanalabs_dma_alloc_template, habanalabs_dma_free,
TP_PROTO(struct device *dev, u64 cpu_addr, u64 dma_addr, size_t size, const char *caller),
TP_ARGS(dev, cpu_addr, dma_addr, size, caller));
+DECLARE_EVENT_CLASS(habanalabs_comms_template,
+ TP_PROTO(struct device *dev, char *op_str),
+
+ TP_ARGS(dev, op_str),
+
+ TP_STRUCT__entry(
+ __string(dname, dev_name(dev))
+ __field(char *, op_str)
+ ),
+
+ TP_fast_assign(
+ __assign_str(dname, dev_name(dev));
+ __entry->op_str = op_str;
+ ),
+
+ TP_printk("%s: cms: %s",
+ __get_str(dname),
+ __entry->op_str)
+);
+
+DEFINE_EVENT(habanalabs_comms_template, habanalabs_comms_protocol_cmd,
+ TP_PROTO(struct device *dev, char *op_str),
+ TP_ARGS(dev, op_str));
+
+DEFINE_EVENT(habanalabs_comms_template, habanalabs_comms_send_cmd,
+ TP_PROTO(struct device *dev, char *op_str),
+ TP_ARGS(dev, op_str));
+
+DEFINE_EVENT(habanalabs_comms_template, habanalabs_comms_wait_status,
+ TP_PROTO(struct device *dev, char *op_str),
+ TP_ARGS(dev, op_str));
+
+DEFINE_EVENT(habanalabs_comms_template, habanalabs_comms_wait_status_done,
+ TP_PROTO(struct device *dev, char *op_str),
+ TP_ARGS(dev, op_str));
+
+DECLARE_EVENT_CLASS(habanalabs_reg_access_template,
+ TP_PROTO(struct device *dev, u32 addr, u32 val),
+
+ TP_ARGS(dev, addr, val),
+
+ TP_STRUCT__entry(
+ __string(dname, dev_name(dev))
+ __field(u32, addr)
+ __field(u32, val)
+ ),
+
+ TP_fast_assign(
+ __assign_str(dname, dev_name(dev));
+ __entry->addr = addr;
+ __entry->val = val;
+ ),
+
+ TP_printk("%s: addr: %#x, val: %#x",
+ __get_str(dname),
+ __entry->addr,
+ __entry->val)
+);
+
+DEFINE_EVENT(habanalabs_reg_access_template, habanalabs_rreg32,
+ TP_PROTO(struct device *dev, u32 addr, u32 val),
+ TP_ARGS(dev, addr, val));
+
+DEFINE_EVENT(habanalabs_reg_access_template, habanalabs_wreg32,
+ TP_PROTO(struct device *dev, u32 addr, u32 val),
+ TP_ARGS(dev, addr, val));
+
+DEFINE_EVENT(habanalabs_reg_access_template, habanalabs_elbi_read,
+ TP_PROTO(struct device *dev, u32 addr, u32 val),
+ TP_ARGS(dev, addr, val));
+
+DEFINE_EVENT(habanalabs_reg_access_template, habanalabs_elbi_write,
+ TP_PROTO(struct device *dev, u32 addr, u32 val),
+ TP_ARGS(dev, addr, val));
+
#endif /* if !defined(_TRACE_HABANALABS_H) || defined(TRACE_HEADER_MULTI_READ) */
/* This part must be outside protection */
diff --git a/include/trace/events/iommu.h b/include/trace/events/iommu.h
index 29096fe12623..70743db1fb75 100644
--- a/include/trace/events/iommu.h
+++ b/include/trace/events/iommu.h
@@ -76,13 +76,6 @@ DEFINE_EVENT(iommu_device_event, attach_device_to_domain,
TP_ARGS(dev)
);
-DEFINE_EVENT(iommu_device_event, detach_device_from_domain,
-
- TP_PROTO(struct device *dev),
-
- TP_ARGS(dev)
-);
-
TRACE_EVENT(map,
TP_PROTO(unsigned long iova, phys_addr_t paddr, size_t size),
diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h
index 412b5a46374c..9db52bc4ce19 100644
--- a/include/trace/events/mmflags.h
+++ b/include/trace/events/mmflags.h
@@ -31,7 +31,6 @@
gfpflag_string(__GFP_HIGHMEM), \
gfpflag_string(GFP_DMA32), \
gfpflag_string(__GFP_HIGH), \
- gfpflag_string(__GFP_ATOMIC), \
gfpflag_string(__GFP_IO), \
gfpflag_string(__GFP_FS), \
gfpflag_string(__GFP_NOWARN), \
diff --git a/include/trace/events/rpcgss.h b/include/trace/events/rpcgss.h
index 3f121eed369e..ba2d96a1bc2f 100644
--- a/include/trace/events/rpcgss.h
+++ b/include/trace/events/rpcgss.h
@@ -206,8 +206,30 @@ DECLARE_EVENT_CLASS(rpcgss_svc_gssapi_class,
), \
TP_ARGS(rqstp, maj_stat))
+DEFINE_SVC_GSSAPI_EVENT(wrap);
DEFINE_SVC_GSSAPI_EVENT(unwrap);
DEFINE_SVC_GSSAPI_EVENT(mic);
+DEFINE_SVC_GSSAPI_EVENT(get_mic);
+
+TRACE_EVENT(rpcgss_svc_wrap_failed,
+ TP_PROTO(
+ const struct svc_rqst *rqstp
+ ),
+
+ TP_ARGS(rqstp),
+
+ TP_STRUCT__entry(
+ __field(u32, xid)
+ __string(addr, rqstp->rq_xprt->xpt_remotebuf)
+ ),
+
+ TP_fast_assign(
+ __entry->xid = be32_to_cpu(rqstp->rq_xid);
+ __assign_str(addr, rqstp->rq_xprt->xpt_remotebuf);
+ ),
+
+ TP_printk("addr=%s xid=0x%08x", __get_str(addr), __entry->xid)
+);
TRACE_EVENT(rpcgss_svc_unwrap_failed,
TP_PROTO(
diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
index 283db0ea3db4..4c53a5ef6257 100644
--- a/include/trace/events/rxrpc.h
+++ b/include/trace/events/rxrpc.h
@@ -163,7 +163,7 @@
EM(rxrpc_local_put_for_use, "PUT for-use ") \
EM(rxrpc_local_put_kill_conn, "PUT conn-kil") \
EM(rxrpc_local_put_peer, "PUT peer ") \
- EM(rxrpc_local_put_prealloc_conn, "PUT conn-pre") \
+ EM(rxrpc_local_put_prealloc_peer, "PUT peer-pre") \
EM(rxrpc_local_put_release_sock, "PUT rel-sock") \
EM(rxrpc_local_stop, "STOP ") \
EM(rxrpc_local_stopped, "STOPPED ") \
@@ -318,6 +318,7 @@
EM(rxrpc_recvmsg_return, "RETN") \
EM(rxrpc_recvmsg_terminal, "TERM") \
EM(rxrpc_recvmsg_to_be_accepted, "TBAC") \
+ EM(rxrpc_recvmsg_unqueue, "UNQU") \
E_(rxrpc_recvmsg_wait, "WAIT")
#define rxrpc_rtt_tx_traces \
@@ -360,11 +361,12 @@
EM(rxrpc_propose_ack_client_tx_end, "ClTxEnd") \
EM(rxrpc_propose_ack_input_data, "DataIn ") \
EM(rxrpc_propose_ack_input_data_hole, "DataInH") \
- EM(rxrpc_propose_ack_ping_for_check_life, "ChkLife") \
EM(rxrpc_propose_ack_ping_for_keepalive, "KeepAlv") \
EM(rxrpc_propose_ack_ping_for_lost_ack, "LostAck") \
EM(rxrpc_propose_ack_ping_for_lost_reply, "LostRpl") \
+ EM(rxrpc_propose_ack_ping_for_old_rtt, "OldRtt ") \
EM(rxrpc_propose_ack_ping_for_params, "Params ") \
+ EM(rxrpc_propose_ack_ping_for_rtt, "Rtt ") \
EM(rxrpc_propose_ack_processing_op, "ProcOp ") \
EM(rxrpc_propose_ack_respond_to_ack, "Rsp2Ack") \
EM(rxrpc_propose_ack_respond_to_ping, "Rsp2Png") \
@@ -421,6 +423,13 @@
EM(RXRPC_ACK_IDLE, "IDL") \
E_(RXRPC_ACK__INVALID, "-?-")
+#define rxrpc_sack_traces \
+ EM(rxrpc_sack_advance, "ADV") \
+ EM(rxrpc_sack_fill, "FIL") \
+ EM(rxrpc_sack_nack, "NAK") \
+ EM(rxrpc_sack_none, "---") \
+ E_(rxrpc_sack_oos, "OOS")
+
#define rxrpc_completions \
EM(RXRPC_CALL_SUCCEEDED, "Succeeded") \
EM(RXRPC_CALL_REMOTELY_ABORTED, "RemoteAbort") \
@@ -496,6 +505,7 @@ enum rxrpc_recvmsg_trace { rxrpc_recvmsg_traces } __mode(byte);
enum rxrpc_req_ack_trace { rxrpc_req_ack_traces } __mode(byte);
enum rxrpc_rtt_rx_trace { rxrpc_rtt_rx_traces } __mode(byte);
enum rxrpc_rtt_tx_trace { rxrpc_rtt_tx_traces } __mode(byte);
+enum rxrpc_sack_trace { rxrpc_sack_traces } __mode(byte);
enum rxrpc_skb_trace { rxrpc_skb_traces } __mode(byte);
enum rxrpc_timer_trace { rxrpc_timer_traces } __mode(byte);
enum rxrpc_tx_point { rxrpc_tx_points } __mode(byte);
@@ -530,6 +540,7 @@ rxrpc_recvmsg_traces;
rxrpc_req_ack_traces;
rxrpc_rtt_rx_traces;
rxrpc_rtt_tx_traces;
+rxrpc_sack_traces;
rxrpc_skb_traces;
rxrpc_timer_traces;
rxrpc_tx_points;
@@ -552,10 +563,10 @@ TRACE_EVENT(rxrpc_local,
TP_ARGS(local_debug_id, op, ref, usage),
TP_STRUCT__entry(
- __field(unsigned int, local )
- __field(int, op )
- __field(int, ref )
- __field(int, usage )
+ __field(unsigned int, local)
+ __field(int, op)
+ __field(int, ref)
+ __field(int, usage)
),
TP_fast_assign(
@@ -578,9 +589,9 @@ TRACE_EVENT(rxrpc_peer,
TP_ARGS(peer_debug_id, ref, why),
TP_STRUCT__entry(
- __field(unsigned int, peer )
- __field(int, ref )
- __field(enum rxrpc_peer_trace, why )
+ __field(unsigned int, peer)
+ __field(int, ref)
+ __field(enum rxrpc_peer_trace, why)
),
TP_fast_assign(
@@ -601,9 +612,9 @@ TRACE_EVENT(rxrpc_bundle,
TP_ARGS(bundle_debug_id, ref, why),
TP_STRUCT__entry(
- __field(unsigned int, bundle )
- __field(int, ref )
- __field(int, why )
+ __field(unsigned int, bundle)
+ __field(int, ref)
+ __field(int, why)
),
TP_fast_assign(
@@ -624,9 +635,9 @@ TRACE_EVENT(rxrpc_conn,
TP_ARGS(conn_debug_id, ref, why),
TP_STRUCT__entry(
- __field(unsigned int, conn )
- __field(int, ref )
- __field(int, why )
+ __field(unsigned int, conn)
+ __field(int, ref)
+ __field(int, why)
),
TP_fast_assign(
@@ -648,11 +659,11 @@ TRACE_EVENT(rxrpc_client,
TP_ARGS(conn, channel, op),
TP_STRUCT__entry(
- __field(unsigned int, conn )
- __field(u32, cid )
- __field(int, channel )
- __field(int, usage )
- __field(enum rxrpc_client_trace, op )
+ __field(unsigned int, conn)
+ __field(u32, cid)
+ __field(int, channel)
+ __field(int, usage)
+ __field(enum rxrpc_client_trace, op)
),
TP_fast_assign(
@@ -678,10 +689,10 @@ TRACE_EVENT(rxrpc_call,
TP_ARGS(call_debug_id, ref, aux, why),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(int, ref )
- __field(int, why )
- __field(unsigned long, aux )
+ __field(unsigned int, call)
+ __field(int, ref)
+ __field(int, why)
+ __field(unsigned long, aux)
),
TP_fast_assign(
@@ -705,10 +716,10 @@ TRACE_EVENT(rxrpc_skb,
TP_ARGS(skb, usage, mod_count, why),
TP_STRUCT__entry(
- __field(struct sk_buff *, skb )
- __field(int, usage )
- __field(int, mod_count )
- __field(enum rxrpc_skb_trace, why )
+ __field(struct sk_buff *, skb)
+ __field(int, usage)
+ __field(int, mod_count)
+ __field(enum rxrpc_skb_trace, why)
),
TP_fast_assign(
@@ -731,7 +742,7 @@ TRACE_EVENT(rxrpc_rx_packet,
TP_ARGS(sp),
TP_STRUCT__entry(
- __field_struct(struct rxrpc_host_header, hdr )
+ __field_struct(struct rxrpc_host_header, hdr)
),
TP_fast_assign(
@@ -742,9 +753,8 @@ TRACE_EVENT(rxrpc_rx_packet,
__entry->hdr.epoch, __entry->hdr.cid,
__entry->hdr.callNumber, __entry->hdr.serviceId,
__entry->hdr.serial, __entry->hdr.seq,
- __entry->hdr.type, __entry->hdr.flags,
- __entry->hdr.type <= 15 ?
- __print_symbolic(__entry->hdr.type, rxrpc_pkts) : "?UNK")
+ __entry->hdr.securityIndex, __entry->hdr.flags,
+ __print_symbolic(__entry->hdr.type, rxrpc_pkts))
);
TRACE_EVENT(rxrpc_rx_done,
@@ -753,8 +763,8 @@ TRACE_EVENT(rxrpc_rx_done,
TP_ARGS(result, abort_code),
TP_STRUCT__entry(
- __field(int, result )
- __field(int, abort_code )
+ __field(int, result)
+ __field(int, abort_code)
),
TP_fast_assign(
@@ -772,13 +782,13 @@ TRACE_EVENT(rxrpc_abort,
TP_ARGS(call_nr, why, cid, call_id, seq, abort_code, error),
TP_STRUCT__entry(
- __field(unsigned int, call_nr )
- __field(enum rxrpc_abort_reason, why )
- __field(u32, cid )
- __field(u32, call_id )
- __field(rxrpc_seq_t, seq )
- __field(int, abort_code )
- __field(int, error )
+ __field(unsigned int, call_nr)
+ __field(enum rxrpc_abort_reason, why)
+ __field(u32, cid)
+ __field(u32, call_id)
+ __field(rxrpc_seq_t, seq)
+ __field(int, abort_code)
+ __field(int, error)
),
TP_fast_assign(
@@ -804,10 +814,10 @@ TRACE_EVENT(rxrpc_call_complete,
TP_ARGS(call),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum rxrpc_call_completion, compl )
- __field(int, error )
- __field(u32, abort_code )
+ __field(unsigned int, call)
+ __field(enum rxrpc_call_completion, compl)
+ __field(int, error)
+ __field(u32, abort_code)
),
TP_fast_assign(
@@ -830,13 +840,13 @@ TRACE_EVENT(rxrpc_txqueue,
TP_ARGS(call, why),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum rxrpc_txqueue_trace, why )
- __field(rxrpc_seq_t, acks_hard_ack )
- __field(rxrpc_seq_t, tx_bottom )
- __field(rxrpc_seq_t, tx_top )
- __field(rxrpc_seq_t, tx_prepared )
- __field(int, tx_winsize )
+ __field(unsigned int, call)
+ __field(enum rxrpc_txqueue_trace, why)
+ __field(rxrpc_seq_t, acks_hard_ack)
+ __field(rxrpc_seq_t, tx_bottom)
+ __field(rxrpc_seq_t, tx_top)
+ __field(rxrpc_seq_t, tx_prepared)
+ __field(int, tx_winsize)
),
TP_fast_assign(
@@ -867,10 +877,10 @@ TRACE_EVENT(rxrpc_rx_data,
TP_ARGS(call, seq, serial, flags),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(rxrpc_seq_t, seq )
- __field(rxrpc_serial_t, serial )
- __field(u8, flags )
+ __field(unsigned int, call)
+ __field(rxrpc_seq_t, seq)
+ __field(rxrpc_serial_t, serial)
+ __field(u8, flags)
),
TP_fast_assign(
@@ -895,13 +905,13 @@ TRACE_EVENT(rxrpc_rx_ack,
TP_ARGS(call, serial, ack_serial, first, prev, reason, n_acks),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(rxrpc_serial_t, serial )
- __field(rxrpc_serial_t, ack_serial )
- __field(rxrpc_seq_t, first )
- __field(rxrpc_seq_t, prev )
- __field(u8, reason )
- __field(u8, n_acks )
+ __field(unsigned int, call)
+ __field(rxrpc_serial_t, serial)
+ __field(rxrpc_serial_t, ack_serial)
+ __field(rxrpc_seq_t, first)
+ __field(rxrpc_seq_t, prev)
+ __field(u8, reason)
+ __field(u8, n_acks)
),
TP_fast_assign(
@@ -931,9 +941,9 @@ TRACE_EVENT(rxrpc_rx_abort,
TP_ARGS(call, serial, abort_code),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(rxrpc_serial_t, serial )
- __field(u32, abort_code )
+ __field(unsigned int, call)
+ __field(rxrpc_serial_t, serial)
+ __field(u32, abort_code)
),
TP_fast_assign(
@@ -955,11 +965,11 @@ TRACE_EVENT(rxrpc_rx_challenge,
TP_ARGS(conn, serial, version, nonce, min_level),
TP_STRUCT__entry(
- __field(unsigned int, conn )
- __field(rxrpc_serial_t, serial )
- __field(u32, version )
- __field(u32, nonce )
- __field(u32, min_level )
+ __field(unsigned int, conn)
+ __field(rxrpc_serial_t, serial)
+ __field(u32, version)
+ __field(u32, nonce)
+ __field(u32, min_level)
),
TP_fast_assign(
@@ -985,11 +995,11 @@ TRACE_EVENT(rxrpc_rx_response,
TP_ARGS(conn, serial, version, kvno, ticket_len),
TP_STRUCT__entry(
- __field(unsigned int, conn )
- __field(rxrpc_serial_t, serial )
- __field(u32, version )
- __field(u32, kvno )
- __field(u32, ticket_len )
+ __field(unsigned int, conn)
+ __field(rxrpc_serial_t, serial)
+ __field(u32, version)
+ __field(u32, kvno)
+ __field(u32, ticket_len)
),
TP_fast_assign(
@@ -1015,10 +1025,10 @@ TRACE_EVENT(rxrpc_rx_rwind_change,
TP_ARGS(call, serial, rwind, wake),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(rxrpc_serial_t, serial )
- __field(u32, rwind )
- __field(bool, wake )
+ __field(unsigned int, call)
+ __field(rxrpc_serial_t, serial)
+ __field(u32, rwind)
+ __field(bool, wake)
),
TP_fast_assign(
@@ -1042,9 +1052,9 @@ TRACE_EVENT(rxrpc_tx_packet,
TP_ARGS(call_id, whdr, where),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum rxrpc_tx_point, where )
- __field_struct(struct rxrpc_wire_header, whdr )
+ __field(unsigned int, call)
+ __field(enum rxrpc_tx_point, where)
+ __field_struct(struct rxrpc_wire_header, whdr)
),
TP_fast_assign(
@@ -1074,14 +1084,14 @@ TRACE_EVENT(rxrpc_tx_data,
TP_ARGS(call, seq, serial, flags, retrans, lose),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(rxrpc_seq_t, seq )
- __field(rxrpc_serial_t, serial )
- __field(u32, cid )
- __field(u32, call_id )
- __field(u8, flags )
- __field(bool, retrans )
- __field(bool, lose )
+ __field(unsigned int, call)
+ __field(rxrpc_seq_t, seq)
+ __field(rxrpc_serial_t, serial)
+ __field(u32, cid)
+ __field(u32, call_id)
+ __field(u8, flags)
+ __field(bool, retrans)
+ __field(bool, lose)
),
TP_fast_assign(
@@ -1109,17 +1119,18 @@ TRACE_EVENT(rxrpc_tx_data,
TRACE_EVENT(rxrpc_tx_ack,
TP_PROTO(unsigned int call, rxrpc_serial_t serial,
rxrpc_seq_t ack_first, rxrpc_serial_t ack_serial,
- u8 reason, u8 n_acks),
+ u8 reason, u8 n_acks, u16 rwind),
- TP_ARGS(call, serial, ack_first, ack_serial, reason, n_acks),
+ TP_ARGS(call, serial, ack_first, ack_serial, reason, n_acks, rwind),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(rxrpc_serial_t, serial )
- __field(rxrpc_seq_t, ack_first )
- __field(rxrpc_serial_t, ack_serial )
- __field(u8, reason )
- __field(u8, n_acks )
+ __field(unsigned int, call)
+ __field(rxrpc_serial_t, serial)
+ __field(rxrpc_seq_t, ack_first)
+ __field(rxrpc_serial_t, ack_serial)
+ __field(u8, reason)
+ __field(u8, n_acks)
+ __field(u16, rwind)
),
TP_fast_assign(
@@ -1129,15 +1140,17 @@ TRACE_EVENT(rxrpc_tx_ack,
__entry->ack_serial = ack_serial;
__entry->reason = reason;
__entry->n_acks = n_acks;
+ __entry->rwind = rwind;
),
- TP_printk(" c=%08x ACK %08x %s f=%08x r=%08x n=%u",
+ TP_printk(" c=%08x ACK %08x %s f=%08x r=%08x n=%u rw=%u",
__entry->call,
__entry->serial,
__print_symbolic(__entry->reason, rxrpc_ack_names),
__entry->ack_first,
__entry->ack_serial,
- __entry->n_acks)
+ __entry->n_acks,
+ __entry->rwind)
);
TRACE_EVENT(rxrpc_receive,
@@ -1147,11 +1160,12 @@ TRACE_EVENT(rxrpc_receive,
TP_ARGS(call, why, serial, seq),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum rxrpc_receive_trace, why )
- __field(rxrpc_serial_t, serial )
- __field(rxrpc_seq_t, seq )
- __field(u64, window )
+ __field(unsigned int, call)
+ __field(enum rxrpc_receive_trace, why)
+ __field(rxrpc_serial_t, serial)
+ __field(rxrpc_seq_t, seq)
+ __field(rxrpc_seq_t, window)
+ __field(rxrpc_seq_t, wtop)
),
TP_fast_assign(
@@ -1159,7 +1173,8 @@ TRACE_EVENT(rxrpc_receive,
__entry->why = why;
__entry->serial = serial;
__entry->seq = seq;
- __entry->window = atomic64_read(&call->ackr_window);
+ __entry->window = call->ackr_window;
+ __entry->wtop = call->ackr_wtop;
),
TP_printk("c=%08x %s r=%08x q=%08x w=%08x-%08x",
@@ -1167,8 +1182,8 @@ TRACE_EVENT(rxrpc_receive,
__print_symbolic(__entry->why, rxrpc_receive_traces),
__entry->serial,
__entry->seq,
- lower_32_bits(__entry->window),
- upper_32_bits(__entry->window))
+ __entry->window,
+ __entry->wtop)
);
TRACE_EVENT(rxrpc_recvmsg,
@@ -1178,9 +1193,9 @@ TRACE_EVENT(rxrpc_recvmsg,
TP_ARGS(call_debug_id, why, ret),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum rxrpc_recvmsg_trace, why )
- __field(int, ret )
+ __field(unsigned int, call)
+ __field(enum rxrpc_recvmsg_trace, why)
+ __field(int, ret)
),
TP_fast_assign(
@@ -1203,12 +1218,12 @@ TRACE_EVENT(rxrpc_recvdata,
TP_ARGS(call, why, seq, offset, len, ret),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum rxrpc_recvmsg_trace, why )
- __field(rxrpc_seq_t, seq )
- __field(unsigned int, offset )
- __field(unsigned int, len )
- __field(int, ret )
+ __field(unsigned int, call)
+ __field(enum rxrpc_recvmsg_trace, why)
+ __field(rxrpc_seq_t, seq)
+ __field(unsigned int, offset)
+ __field(unsigned int, len)
+ __field(int, ret)
),
TP_fast_assign(
@@ -1236,10 +1251,10 @@ TRACE_EVENT(rxrpc_rtt_tx,
TP_ARGS(call, why, slot, send_serial),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum rxrpc_rtt_tx_trace, why )
- __field(int, slot )
- __field(rxrpc_serial_t, send_serial )
+ __field(unsigned int, call)
+ __field(enum rxrpc_rtt_tx_trace, why)
+ __field(int, slot)
+ __field(rxrpc_serial_t, send_serial)
),
TP_fast_assign(
@@ -1265,13 +1280,13 @@ TRACE_EVENT(rxrpc_rtt_rx,
TP_ARGS(call, why, slot, send_serial, resp_serial, rtt, rto),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum rxrpc_rtt_rx_trace, why )
- __field(int, slot )
- __field(rxrpc_serial_t, send_serial )
- __field(rxrpc_serial_t, resp_serial )
- __field(u32, rtt )
- __field(u32, rto )
+ __field(unsigned int, call)
+ __field(enum rxrpc_rtt_rx_trace, why)
+ __field(int, slot)
+ __field(rxrpc_serial_t, send_serial)
+ __field(rxrpc_serial_t, resp_serial)
+ __field(u32, rtt)
+ __field(u32, rto)
),
TP_fast_assign(
@@ -1301,17 +1316,17 @@ TRACE_EVENT(rxrpc_timer,
TP_ARGS(call, why, now),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum rxrpc_timer_trace, why )
- __field(long, now )
- __field(long, ack_at )
- __field(long, ack_lost_at )
- __field(long, resend_at )
- __field(long, ping_at )
- __field(long, expect_rx_by )
- __field(long, expect_req_by )
- __field(long, expect_term_by )
- __field(long, timer )
+ __field(unsigned int, call)
+ __field(enum rxrpc_timer_trace, why)
+ __field(long, now)
+ __field(long, ack_at)
+ __field(long, ack_lost_at)
+ __field(long, resend_at)
+ __field(long, ping_at)
+ __field(long, expect_rx_by)
+ __field(long, expect_req_by)
+ __field(long, expect_term_by)
+ __field(long, timer)
),
TP_fast_assign(
@@ -1345,16 +1360,16 @@ TRACE_EVENT(rxrpc_timer_expired,
TP_ARGS(call, now),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(long, now )
- __field(long, ack_at )
- __field(long, ack_lost_at )
- __field(long, resend_at )
- __field(long, ping_at )
- __field(long, expect_rx_by )
- __field(long, expect_req_by )
- __field(long, expect_term_by )
- __field(long, timer )
+ __field(unsigned int, call)
+ __field(long, now)
+ __field(long, ack_at)
+ __field(long, ack_lost_at)
+ __field(long, resend_at)
+ __field(long, ping_at)
+ __field(long, expect_rx_by)
+ __field(long, expect_req_by)
+ __field(long, expect_term_by)
+ __field(long, timer)
),
TP_fast_assign(
@@ -1386,7 +1401,7 @@ TRACE_EVENT(rxrpc_rx_lose,
TP_ARGS(sp),
TP_STRUCT__entry(
- __field_struct(struct rxrpc_host_header, hdr )
+ __field_struct(struct rxrpc_host_header, hdr)
),
TP_fast_assign(
@@ -1409,10 +1424,10 @@ TRACE_EVENT(rxrpc_propose_ack,
TP_ARGS(call, why, ack_reason, serial),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum rxrpc_propose_ack_trace, why )
- __field(rxrpc_serial_t, serial )
- __field(u8, ack_reason )
+ __field(unsigned int, call)
+ __field(enum rxrpc_propose_ack_trace, why)
+ __field(rxrpc_serial_t, serial)
+ __field(u8, ack_reason)
),
TP_fast_assign(
@@ -1436,10 +1451,10 @@ TRACE_EVENT(rxrpc_send_ack,
TP_ARGS(call, why, ack_reason, serial),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum rxrpc_propose_ack_trace, why )
- __field(rxrpc_serial_t, serial )
- __field(u8, ack_reason )
+ __field(unsigned int, call)
+ __field(enum rxrpc_propose_ack_trace, why)
+ __field(rxrpc_serial_t, serial)
+ __field(u8, ack_reason)
),
TP_fast_assign(
@@ -1463,11 +1478,11 @@ TRACE_EVENT(rxrpc_drop_ack,
TP_ARGS(call, why, ack_reason, serial, nobuf),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum rxrpc_propose_ack_trace, why )
- __field(rxrpc_serial_t, serial )
- __field(u8, ack_reason )
- __field(bool, nobuf )
+ __field(unsigned int, call)
+ __field(enum rxrpc_propose_ack_trace, why)
+ __field(rxrpc_serial_t, serial)
+ __field(u8, ack_reason)
+ __field(bool, nobuf)
),
TP_fast_assign(
@@ -1491,9 +1506,9 @@ TRACE_EVENT(rxrpc_retransmit,
TP_ARGS(call, seq, expiry),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(rxrpc_seq_t, seq )
- __field(s64, expiry )
+ __field(unsigned int, call)
+ __field(rxrpc_seq_t, seq)
+ __field(s64, expiry)
),
TP_fast_assign(
@@ -1515,13 +1530,13 @@ TRACE_EVENT(rxrpc_congest,
TP_ARGS(call, summary, ack_serial, change),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum rxrpc_congest_change, change )
- __field(rxrpc_seq_t, hard_ack )
- __field(rxrpc_seq_t, top )
- __field(rxrpc_seq_t, lowest_nak )
- __field(rxrpc_serial_t, ack_serial )
- __field_struct(struct rxrpc_ack_summary, sum )
+ __field(unsigned int, call)
+ __field(enum rxrpc_congest_change, change)
+ __field(rxrpc_seq_t, hard_ack)
+ __field(rxrpc_seq_t, top)
+ __field(rxrpc_seq_t, lowest_nak)
+ __field(rxrpc_serial_t, ack_serial)
+ __field_struct(struct rxrpc_ack_summary, sum)
),
TP_fast_assign(
@@ -1559,14 +1574,14 @@ TRACE_EVENT(rxrpc_reset_cwnd,
TP_ARGS(call, now),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum rxrpc_congest_mode, mode )
- __field(unsigned short, cwnd )
- __field(unsigned short, extra )
- __field(rxrpc_seq_t, hard_ack )
- __field(rxrpc_seq_t, prepared )
- __field(ktime_t, since_last_tx )
- __field(bool, has_data )
+ __field(unsigned int, call)
+ __field(enum rxrpc_congest_mode, mode)
+ __field(unsigned short, cwnd)
+ __field(unsigned short, extra)
+ __field(rxrpc_seq_t, hard_ack)
+ __field(rxrpc_seq_t, prepared)
+ __field(ktime_t, since_last_tx)
+ __field(bool, has_data)
),
TP_fast_assign(
@@ -1597,8 +1612,8 @@ TRACE_EVENT(rxrpc_disconnect_call,
TP_ARGS(call),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(u32, abort_code )
+ __field(unsigned int, call)
+ __field(u32, abort_code)
),
TP_fast_assign(
@@ -1617,8 +1632,8 @@ TRACE_EVENT(rxrpc_improper_term,
TP_ARGS(call),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(u32, abort_code )
+ __field(unsigned int, call)
+ __field(u32, abort_code)
),
TP_fast_assign(
@@ -1637,11 +1652,11 @@ TRACE_EVENT(rxrpc_connect_call,
TP_ARGS(call),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(unsigned long, user_call_ID )
- __field(u32, cid )
- __field(u32, call_id )
- __field_struct(struct sockaddr_rxrpc, srx )
+ __field(unsigned int, call)
+ __field(unsigned long, user_call_ID)
+ __field(u32, cid)
+ __field(u32, call_id)
+ __field_struct(struct sockaddr_rxrpc, srx)
),
TP_fast_assign(
@@ -1666,10 +1681,10 @@ TRACE_EVENT(rxrpc_resend,
TP_ARGS(call, ack),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(rxrpc_seq_t, seq )
- __field(rxrpc_seq_t, transmitted )
- __field(rxrpc_serial_t, ack_serial )
+ __field(unsigned int, call)
+ __field(rxrpc_seq_t, seq)
+ __field(rxrpc_seq_t, transmitted)
+ __field(rxrpc_serial_t, ack_serial)
),
TP_fast_assign(
@@ -1694,9 +1709,9 @@ TRACE_EVENT(rxrpc_rx_icmp,
TP_ARGS(peer, ee, srx),
TP_STRUCT__entry(
- __field(unsigned int, peer )
- __field_struct(struct sock_extended_err, ee )
- __field_struct(struct sockaddr_rxrpc, srx )
+ __field(unsigned int, peer)
+ __field_struct(struct sock_extended_err, ee)
+ __field_struct(struct sockaddr_rxrpc, srx)
),
TP_fast_assign(
@@ -1723,10 +1738,10 @@ TRACE_EVENT(rxrpc_tx_fail,
TP_ARGS(debug_id, serial, ret, where),
TP_STRUCT__entry(
- __field(unsigned int, debug_id )
- __field(rxrpc_serial_t, serial )
- __field(int, ret )
- __field(enum rxrpc_tx_point, where )
+ __field(unsigned int, debug_id)
+ __field(rxrpc_serial_t, serial)
+ __field(int, ret)
+ __field(enum rxrpc_tx_point, where)
),
TP_fast_assign(
@@ -1749,13 +1764,13 @@ TRACE_EVENT(rxrpc_call_reset,
TP_ARGS(call),
TP_STRUCT__entry(
- __field(unsigned int, debug_id )
- __field(u32, cid )
- __field(u32, call_id )
- __field(rxrpc_serial_t, call_serial )
- __field(rxrpc_serial_t, conn_serial )
- __field(rxrpc_seq_t, tx_seq )
- __field(rxrpc_seq_t, rx_seq )
+ __field(unsigned int, debug_id)
+ __field(u32, cid)
+ __field(u32, call_id)
+ __field(rxrpc_serial_t, call_serial)
+ __field(rxrpc_serial_t, conn_serial)
+ __field(rxrpc_seq_t, tx_seq)
+ __field(rxrpc_seq_t, rx_seq)
),
TP_fast_assign(
@@ -1781,8 +1796,8 @@ TRACE_EVENT(rxrpc_notify_socket,
TP_ARGS(debug_id, serial),
TP_STRUCT__entry(
- __field(unsigned int, debug_id )
- __field(rxrpc_serial_t, serial )
+ __field(unsigned int, debug_id)
+ __field(rxrpc_serial_t, serial)
),
TP_fast_assign(
@@ -1804,8 +1819,8 @@ TRACE_EVENT(rxrpc_rx_discard_ack,
prev_pkt, call_ackr_prev),
TP_STRUCT__entry(
- __field(unsigned int, debug_id )
- __field(rxrpc_serial_t, serial )
+ __field(unsigned int, debug_id)
+ __field(rxrpc_serial_t, serial)
__field(rxrpc_seq_t, first_soft_ack)
__field(rxrpc_seq_t, call_ackr_first)
__field(rxrpc_seq_t, prev_pkt)
@@ -1837,9 +1852,9 @@ TRACE_EVENT(rxrpc_req_ack,
TP_ARGS(call_debug_id, seq, why),
TP_STRUCT__entry(
- __field(unsigned int, call_debug_id )
- __field(rxrpc_seq_t, seq )
- __field(enum rxrpc_req_ack_trace, why )
+ __field(unsigned int, call_debug_id)
+ __field(rxrpc_seq_t, seq)
+ __field(enum rxrpc_req_ack_trace, why)
),
TP_fast_assign(
@@ -1862,11 +1877,11 @@ TRACE_EVENT(rxrpc_txbuf,
TP_ARGS(debug_id, call_debug_id, seq, ref, what),
TP_STRUCT__entry(
- __field(unsigned int, debug_id )
- __field(unsigned int, call_debug_id )
- __field(rxrpc_seq_t, seq )
- __field(int, ref )
- __field(enum rxrpc_txbuf_trace, what )
+ __field(unsigned int, debug_id)
+ __field(unsigned int, call_debug_id)
+ __field(rxrpc_seq_t, seq)
+ __field(int, ref)
+ __field(enum rxrpc_txbuf_trace, what)
),
TP_fast_assign(
@@ -1892,9 +1907,9 @@ TRACE_EVENT(rxrpc_poke_call,
TP_ARGS(call, busy, what),
TP_STRUCT__entry(
- __field(unsigned int, call_debug_id )
- __field(bool, busy )
- __field(enum rxrpc_call_poke_trace, what )
+ __field(unsigned int, call_debug_id)
+ __field(bool, busy)
+ __field(enum rxrpc_call_poke_trace, what)
),
TP_fast_assign(
@@ -1915,7 +1930,7 @@ TRACE_EVENT(rxrpc_call_poked,
TP_ARGS(call),
TP_STRUCT__entry(
- __field(unsigned int, call_debug_id )
+ __field(unsigned int, call_debug_id)
),
TP_fast_assign(
@@ -1926,6 +1941,33 @@ TRACE_EVENT(rxrpc_call_poked,
__entry->call_debug_id)
);
+TRACE_EVENT(rxrpc_sack,
+ TP_PROTO(struct rxrpc_call *call, rxrpc_seq_t seq,
+ unsigned int sack, enum rxrpc_sack_trace what),
+
+ TP_ARGS(call, seq, sack, what),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, call_debug_id)
+ __field(rxrpc_seq_t, seq)
+ __field(unsigned int, sack)
+ __field(enum rxrpc_sack_trace, what)
+ ),
+
+ TP_fast_assign(
+ __entry->call_debug_id = call->debug_id;
+ __entry->seq = seq;
+ __entry->sack = sack;
+ __entry->what = what;
+ ),
+
+ TP_printk("c=%08x q=%08x %s k=%x",
+ __entry->call_debug_id,
+ __entry->seq,
+ __print_symbolic(__entry->what, rxrpc_sack_traces),
+ __entry->sack)
+ );
+
#undef EM
#undef E_
diff --git a/include/trace/events/scmi.h b/include/trace/events/scmi.h
index f160d68f961d..422c1ad9484d 100644
--- a/include/trace/events/scmi.h
+++ b/include/trace/events/scmi.h
@@ -139,11 +139,15 @@ TRACE_EVENT(scmi_rx_done,
);
TRACE_EVENT(scmi_msg_dump,
- TP_PROTO(u8 protocol_id, u8 msg_id, unsigned char *tag, u16 seq,
- int status, void *buf, size_t len),
- TP_ARGS(protocol_id, msg_id, tag, seq, status, buf, len),
+ TP_PROTO(int id, u8 channel_id, u8 protocol_id, u8 msg_id,
+ unsigned char *tag, u16 seq, int status,
+ void *buf, size_t len),
+ TP_ARGS(id, channel_id, protocol_id, msg_id, tag, seq, status,
+ buf, len),
TP_STRUCT__entry(
+ __field(int, id)
+ __field(u8, channel_id)
__field(u8, protocol_id)
__field(u8, msg_id)
__array(char, tag, 5)
@@ -154,6 +158,8 @@ TRACE_EVENT(scmi_msg_dump,
),
TP_fast_assign(
+ __entry->id = id;
+ __entry->channel_id = channel_id;
__entry->protocol_id = protocol_id;
__entry->msg_id = msg_id;
strscpy(__entry->tag, tag, 5);
@@ -163,9 +169,9 @@ TRACE_EVENT(scmi_msg_dump,
memcpy(__get_dynamic_array(cmd), buf, __entry->len);
),
- TP_printk("pt=%02X t=%s msg_id=%02X seq=%04X s=%d pyld=%s",
- __entry->protocol_id, __entry->tag, __entry->msg_id,
- __entry->seq, __entry->status,
+ TP_printk("id=%d ch=%02X pt=%02X t=%s msg_id=%02X seq=%04X s=%d pyld=%s",
+ __entry->id, __entry->channel_id, __entry->protocol_id,
+ __entry->tag, __entry->msg_id, __entry->seq, __entry->status,
__print_hex_str(__get_dynamic_array(cmd), __entry->len))
);
#endif /* _TRACE_SCMI_H */
diff --git a/include/trace/events/skb.h b/include/trace/events/skb.h
index 25ab1ff9423d..07e0715628ec 100644
--- a/include/trace/events/skb.h
+++ b/include/trace/events/skb.h
@@ -53,19 +53,21 @@ TRACE_EVENT(kfree_skb,
TRACE_EVENT(consume_skb,
- TP_PROTO(struct sk_buff *skb),
+ TP_PROTO(struct sk_buff *skb, void *location),
- TP_ARGS(skb),
+ TP_ARGS(skb, location),
TP_STRUCT__entry(
- __field( void *, skbaddr )
+ __field( void *, skbaddr)
+ __field( void *, location)
),
TP_fast_assign(
__entry->skbaddr = skb;
+ __entry->location = location;
),
- TP_printk("skbaddr=%p", __entry->skbaddr)
+ TP_printk("skbaddr=%p location=%pS", __entry->skbaddr, __entry->location)
);
TRACE_EVENT(skb_copy_datagram_iovec,
diff --git a/include/trace/events/sock.h b/include/trace/events/sock.h
index 777ee6cbe933..03d19fc562f8 100644
--- a/include/trace/events/sock.h
+++ b/include/trace/events/sock.h
@@ -263,6 +263,75 @@ TRACE_EVENT(inet_sk_error_report,
__entry->error)
);
+TRACE_EVENT(sk_data_ready,
+
+ TP_PROTO(const struct sock *sk),
+
+ TP_ARGS(sk),
+
+ TP_STRUCT__entry(
+ __field(const void *, skaddr)
+ __field(__u16, family)
+ __field(__u16, protocol)
+ __field(unsigned long, ip)
+ ),
+
+ TP_fast_assign(
+ __entry->skaddr = sk;
+ __entry->family = sk->sk_family;
+ __entry->protocol = sk->sk_protocol;
+ __entry->ip = _RET_IP_;
+ ),
+
+ TP_printk("family=%u protocol=%u func=%ps",
+ __entry->family, __entry->protocol, (void *)__entry->ip)
+);
+
+/*
+ * sock send/recv msg length
+ */
+DECLARE_EVENT_CLASS(sock_msg_length,
+
+ TP_PROTO(struct sock *sk, int ret, int flags),
+
+ TP_ARGS(sk, ret, flags),
+
+ TP_STRUCT__entry(
+ __field(void *, sk)
+ __field(__u16, family)
+ __field(__u16, protocol)
+ __field(int, ret)
+ __field(int, flags)
+ ),
+
+ TP_fast_assign(
+ __entry->sk = sk;
+ __entry->family = sk->sk_family;
+ __entry->protocol = sk->sk_protocol;
+ __entry->ret = ret;
+ __entry->flags = flags;
+ ),
+
+ TP_printk("sk address = %p, family = %s protocol = %s, length = %d, error = %d, flags = 0x%x",
+ __entry->sk, show_family_name(__entry->family),
+ show_inet_protocol_name(__entry->protocol),
+ !(__entry->flags & MSG_PEEK) ?
+ (__entry->ret > 0 ? __entry->ret : 0) : 0,
+ __entry->ret < 0 ? __entry->ret : 0,
+ __entry->flags)
+);
+
+DEFINE_EVENT(sock_msg_length, sock_send_length,
+ TP_PROTO(struct sock *sk, int ret, int flags),
+
+ TP_ARGS(sk, ret, flags)
+);
+
+DEFINE_EVENT(sock_msg_length, sock_recv_length,
+ TP_PROTO(struct sock *sk, int ret, int flags),
+
+ TP_ARGS(sk, ret, flags)
+);
#endif /* _TRACE_SOCK_H */
/* This part must be outside protection */
diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
index 37604e0e5963..3ca54536f8f7 100644
--- a/include/trace/events/sunrpc.h
+++ b/include/trace/events/sunrpc.h
@@ -1819,20 +1819,20 @@ TRACE_EVENT(svc_stats_latency,
#define show_svc_xprt_flags(flags) \
__print_flags(flags, "|", \
- { (1UL << XPT_BUSY), "XPT_BUSY"}, \
- { (1UL << XPT_CONN), "XPT_CONN"}, \
- { (1UL << XPT_CLOSE), "XPT_CLOSE"}, \
- { (1UL << XPT_DATA), "XPT_DATA"}, \
- { (1UL << XPT_TEMP), "XPT_TEMP"}, \
- { (1UL << XPT_DEAD), "XPT_DEAD"}, \
- { (1UL << XPT_CHNGBUF), "XPT_CHNGBUF"}, \
- { (1UL << XPT_DEFERRED), "XPT_DEFERRED"}, \
- { (1UL << XPT_OLD), "XPT_OLD"}, \
- { (1UL << XPT_LISTENER), "XPT_LISTENER"}, \
- { (1UL << XPT_CACHE_AUTH), "XPT_CACHE_AUTH"}, \
- { (1UL << XPT_LOCAL), "XPT_LOCAL"}, \
- { (1UL << XPT_KILL_TEMP), "XPT_KILL_TEMP"}, \
- { (1UL << XPT_CONG_CTRL), "XPT_CONG_CTRL"})
+ { BIT(XPT_BUSY), "BUSY" }, \
+ { BIT(XPT_CONN), "CONN" }, \
+ { BIT(XPT_CLOSE), "CLOSE" }, \
+ { BIT(XPT_DATA), "DATA" }, \
+ { BIT(XPT_TEMP), "TEMP" }, \
+ { BIT(XPT_DEAD), "DEAD" }, \
+ { BIT(XPT_CHNGBUF), "CHNGBUF" }, \
+ { BIT(XPT_DEFERRED), "DEFERRED" }, \
+ { BIT(XPT_OLD), "OLD" }, \
+ { BIT(XPT_LISTENER), "LISTENER" }, \
+ { BIT(XPT_CACHE_AUTH), "CACHE_AUTH" }, \
+ { BIT(XPT_LOCAL), "LOCAL" }, \
+ { BIT(XPT_KILL_TEMP), "KILL_TEMP" }, \
+ { BIT(XPT_CONG_CTRL), "CONG_CTRL" })
TRACE_EVENT(svc_xprt_create_err,
TP_PROTO(
diff --git a/include/trace/perf.h b/include/trace/perf.h
index 8f3bf1e17707..2c11181c82e0 100644
--- a/include/trace/perf.h
+++ b/include/trace/perf.h
@@ -4,51 +4,7 @@
#ifdef CONFIG_PERF_EVENTS
-#undef __entry
-#define __entry entry
-
-#undef __get_dynamic_array
-#define __get_dynamic_array(field) \
- ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
-
-#undef __get_dynamic_array_len
-#define __get_dynamic_array_len(field) \
- ((__entry->__data_loc_##field >> 16) & 0xffff)
-
-#undef __get_str
-#define __get_str(field) ((char *)__get_dynamic_array(field))
-
-#undef __get_bitmask
-#define __get_bitmask(field) (char *)__get_dynamic_array(field)
-
-#undef __get_cpumask
-#define __get_cpumask(field) (char *)__get_dynamic_array(field)
-
-#undef __get_sockaddr
-#define __get_sockaddr(field) ((struct sockaddr *)__get_dynamic_array(field))
-
-#undef __get_rel_dynamic_array
-#define __get_rel_dynamic_array(field) \
- ((void *)__entry + \
- offsetof(typeof(*__entry), __rel_loc_##field) + \
- sizeof(__entry->__rel_loc_##field) + \
- (__entry->__rel_loc_##field & 0xffff))
-
-#undef __get_rel_dynamic_array_len
-#define __get_rel_dynamic_array_len(field) \
- ((__entry->__rel_loc_##field >> 16) & 0xffff)
-
-#undef __get_rel_str
-#define __get_rel_str(field) ((char *)__get_rel_dynamic_array(field))
-
-#undef __get_rel_bitmask
-#define __get_rel_bitmask(field) (char *)__get_rel_dynamic_array(field)
-
-#undef __get_rel_cpumask
-#define __get_rel_cpumask(field) (char *)__get_rel_dynamic_array(field)
-
-#undef __get_rel_sockaddr
-#define __get_rel_sockaddr(field) ((struct sockaddr *)__get_rel_dynamic_array(field))
+#include "stages/stage6_event_callback.h"
#undef __perf_count
#define __perf_count(c) (__count = (c))
diff --git a/include/trace/stages/stage3_trace_output.h b/include/trace/stages/stage3_trace_output.h
index 66374df61ed3..c1fb1355d309 100644
--- a/include/trace/stages/stage3_trace_output.h
+++ b/include/trace/stages/stage3_trace_output.h
@@ -139,3 +139,6 @@
u64 ____val = (u64)(value); \
(u32) do_div(____val, NSEC_PER_SEC); \
})
+
+#undef __get_buf
+#define __get_buf(len) trace_seq_acquire(p, (len))
diff --git a/include/trace/stages/stage6_event_callback.h b/include/trace/stages/stage6_event_callback.h
index 49c32394b53f..919b1a4da980 100644
--- a/include/trace/stages/stage6_event_callback.h
+++ b/include/trace/stages/stage6_event_callback.h
@@ -2,6 +2,9 @@
/* Stage 6 definitions for creating trace events */
+/* Reuse some of the stage 3 macros */
+#include "stage3_trace_output.h"
+
#undef __entry
#define __entry entry
diff --git a/include/trace/stages/stage7_class_define.h b/include/trace/stages/stage7_class_define.h
index 8795429f388b..bcb960d16fc0 100644
--- a/include/trace/stages/stage7_class_define.h
+++ b/include/trace/stages/stage7_class_define.h
@@ -23,6 +23,7 @@
#undef __get_rel_sockaddr
#undef __print_array
#undef __print_hex_dump
+#undef __get_buf
/*
* The below is not executed in the kernel. It is only what is
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index 4038abe8505a..973af6d06626 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -832,6 +832,10 @@ struct drm_amdgpu_cs_chunk_data {
#define AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_SCLK 0x8
/* Subquery id: Query GPU stable pstate memory clock */
#define AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_MCLK 0x9
+ /* Subquery id: Query GPU peak pstate shader clock */
+ #define AMDGPU_INFO_SENSOR_PEAK_PSTATE_GFX_SCLK 0xa
+ /* Subquery id: Query GPU peak pstate memory clock */
+ #define AMDGPU_INFO_SENSOR_PEAK_PSTATE_GFX_MCLK 0xb
/* Number of VRAM page faults on CPU access. */
#define AMDGPU_INFO_NUM_VRAM_CPU_PAGE_FAULTS 0x1E
#define AMDGPU_INFO_VRAM_LOST_COUNTER 0x1F
@@ -1049,7 +1053,8 @@ struct drm_amdgpu_info_device {
__u32 enabled_rb_pipes_mask;
__u32 num_rb_pipes;
__u32 num_hw_gfx_contexts;
- __u32 _pad;
+ /* PCIe version (the smaller of the GPU and the CPU/motherboard) */
+ __u32 pcie_gen;
__u64 ids_flags;
/** Starting virtual address for UMDs. */
__u64 virtual_address_offset;
@@ -1096,7 +1101,8 @@ struct drm_amdgpu_info_device {
__u32 gs_prim_buffer_depth;
/* max gs wavefront per vgt*/
__u32 max_gs_waves_per_vgt;
- __u32 _pad1;
+ /* PCIe number of lanes (the smaller of the GPU and the CPU/motherboard) */
+ __u32 pcie_num_lanes;
/* always on cu bitmap */
__u32 cu_ao_bitmap[4][4];
/** Starting high virtual address for UMDs. */
@@ -1107,6 +1113,8 @@ struct drm_amdgpu_info_device {
__u32 pa_sc_tile_steering_override;
/* disabled TCCs */
__u64 tcc_disabled_mask;
+ __u64 min_engine_clock;
+ __u64 min_memory_clock;
};
struct drm_amdgpu_info_hw_ip {
diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h
index bc056f2d537d..de703c6be969 100644
--- a/include/uapi/drm/drm_fourcc.h
+++ b/include/uapi/drm/drm_fourcc.h
@@ -88,6 +88,18 @@ extern "C" {
*
* The authoritative list of format modifier codes is found in
* `include/uapi/drm/drm_fourcc.h`
+ *
+ * Open Source User Waiver
+ * -----------------------
+ *
+ * Because this is the authoritative source for pixel formats and modifiers
+ * referenced by GL, Vulkan extensions and other standards and hence used both
+ * by open source and closed source driver stacks, the usual requirement for an
+ * upstream in-kernel or open source userspace user does not apply.
+ *
+ * To ensure, as much as feasible, compatibility across stacks and avoid
+ * confusion with incompatible enumerations stakeholders for all relevant driver
+ * stacks should approve additions.
*/
#define fourcc_code(a, b, c, d) ((__u32)(a) | ((__u32)(b) << 8) | \
diff --git a/include/uapi/misc/habanalabs.h b/include/uapi/drm/habanalabs_accel.h
index 3b995e841eb8..331567ec9e79 100644
--- a/include/uapi/misc/habanalabs.h
+++ b/include/uapi/drm/habanalabs_accel.h
@@ -789,6 +789,7 @@ enum hl_server_type {
* HL_INFO_ENGINE_STATUS - Retrieve the status of all the h/w engines in the asic.
* HL_INFO_PAGE_FAULT_EVENT - Retrieve parameters of captured page fault.
* HL_INFO_USER_MAPPINGS - Retrieve user mappings, captured after page fault event.
+ * HL_INFO_FW_GENERIC_REQ - Send generic request to FW.
*/
#define HL_INFO_HW_IP_INFO 0
#define HL_INFO_HW_EVENTS 1
@@ -822,6 +823,7 @@ enum hl_server_type {
#define HL_INFO_ENGINE_STATUS 32
#define HL_INFO_PAGE_FAULT_EVENT 33
#define HL_INFO_USER_MAPPINGS 34
+#define HL_INFO_FW_GENERIC_REQ 35
#define HL_INFO_VERSION_MAX_LEN 128
#define HL_INFO_CARD_NAME_MAX_LEN 16
@@ -1258,6 +1260,7 @@ enum gaudi_dcores {
* @sec_attest_nonce: Nonce number used for attestation report.
* @array_size: Number of array members copied to user buffer.
* Relevant for HL_INFO_USER_MAPPINGS info ioctl.
+ * @fw_sub_opcode: generic requests sub opcodes.
* @pad: Padding to 64 bit.
*/
struct hl_info_args {
@@ -1274,6 +1277,7 @@ struct hl_info_args {
__u32 user_buffer_actual_size;
__u32 sec_attest_nonce;
__u32 array_size;
+ __u32 fw_sub_opcode;
};
__u32 pad;
@@ -1474,6 +1478,14 @@ struct hl_cs_chunk {
*/
#define HL_CS_FLAGS_ENGINE_CORE_COMMAND 0x4000
+/*
+ * The flush HBW PCI writes is merged into the existing CS ioctls.
+ * Used to flush all HBW PCI writes.
+ * This is a blocking operation and for this reason the user shall not use
+ * the return sequence number (which will be invalid anyway)
+ */
+#define HL_CS_FLAGS_FLUSH_PCI_HBW_WRITES 0x8000
+
#define HL_CS_STATUS_SUCCESS 0
#define HL_MAX_JOBS_PER_CS 512
@@ -1851,15 +1863,24 @@ struct hl_mem_in {
/**
* structure for exporting DMABUF object (used with
* the HL_MEM_OP_EXPORT_DMABUF_FD op)
- * @handle: handle returned from HL_MEM_OP_ALLOC.
- * in Gaudi, where we don't have MMU for the device memory, the
- * driver expects a physical address (instead of a handle) in the
- * device memory space.
- * @mem_size: size of memory allocation. Relevant only for GAUDI
+ * @addr: for Gaudi1, the driver expects a physical address
+ * inside the device's DRAM. this is because in Gaudi1
+ * we don't have MMU that covers the device's DRAM.
+ * for all other ASICs, the driver expects a device
+ * virtual address that represents the start address of
+ * a mapped DRAM memory area inside the device.
+ * the address must be the same as was received from the
+ * driver during a previous HL_MEM_OP_MAP operation.
+ * @mem_size: size of memory to export.
+ * @offset: for Gaudi1, this value must be 0. For all other ASICs,
+ * the driver expects an offset inside of the memory area
+ * describe by addr. the offset represents the start
+ * address of that the exported dma-buf object describes.
*/
struct {
- __u64 handle;
+ __u64 addr;
__u64 mem_size;
+ __u64 offset;
} export_dmabuf_fd;
};
diff --git a/include/uapi/drm/i810_drm.h b/include/uapi/drm/i810_drm.h
deleted file mode 100644
index d285d5e72e6a..000000000000
--- a/include/uapi/drm/i810_drm.h
+++ /dev/null
@@ -1,292 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#ifndef _I810_DRM_H_
-#define _I810_DRM_H_
-
-#include "drm.h"
-
-#if defined(__cplusplus)
-extern "C" {
-#endif
-
-/* WARNING: These defines must be the same as what the Xserver uses.
- * if you change them, you must change the defines in the Xserver.
- */
-
-#ifndef _I810_DEFINES_
-#define _I810_DEFINES_
-
-#define I810_DMA_BUF_ORDER 12
-#define I810_DMA_BUF_SZ (1<<I810_DMA_BUF_ORDER)
-#define I810_DMA_BUF_NR 256
-#define I810_NR_SAREA_CLIPRECTS 8
-
-/* Each region is a minimum of 64k, and there are at most 64 of them.
- */
-#define I810_NR_TEX_REGIONS 64
-#define I810_LOG_MIN_TEX_REGION_SIZE 16
-#endif
-
-#define I810_UPLOAD_TEX0IMAGE 0x1 /* handled clientside */
-#define I810_UPLOAD_TEX1IMAGE 0x2 /* handled clientside */
-#define I810_UPLOAD_CTX 0x4
-#define I810_UPLOAD_BUFFERS 0x8
-#define I810_UPLOAD_TEX0 0x10
-#define I810_UPLOAD_TEX1 0x20
-#define I810_UPLOAD_CLIPRECTS 0x40
-
-/* Indices into buf.Setup where various bits of state are mirrored per
- * context and per buffer. These can be fired at the card as a unit,
- * or in a piecewise fashion as required.
- */
-
-/* Destbuffer state
- * - backbuffer linear offset and pitch -- invarient in the current dri
- * - zbuffer linear offset and pitch -- also invarient
- * - drawing origin in back and depth buffers.
- *
- * Keep the depth/back buffer state here to accommodate private buffers
- * in the future.
- */
-#define I810_DESTREG_DI0 0 /* CMD_OP_DESTBUFFER_INFO (2 dwords) */
-#define I810_DESTREG_DI1 1
-#define I810_DESTREG_DV0 2 /* GFX_OP_DESTBUFFER_VARS (2 dwords) */
-#define I810_DESTREG_DV1 3
-#define I810_DESTREG_DR0 4 /* GFX_OP_DRAWRECT_INFO (4 dwords) */
-#define I810_DESTREG_DR1 5
-#define I810_DESTREG_DR2 6
-#define I810_DESTREG_DR3 7
-#define I810_DESTREG_DR4 8
-#define I810_DEST_SETUP_SIZE 10
-
-/* Context state
- */
-#define I810_CTXREG_CF0 0 /* GFX_OP_COLOR_FACTOR */
-#define I810_CTXREG_CF1 1
-#define I810_CTXREG_ST0 2 /* GFX_OP_STIPPLE */
-#define I810_CTXREG_ST1 3
-#define I810_CTXREG_VF 4 /* GFX_OP_VERTEX_FMT */
-#define I810_CTXREG_MT 5 /* GFX_OP_MAP_TEXELS */
-#define I810_CTXREG_MC0 6 /* GFX_OP_MAP_COLOR_STAGES - stage 0 */
-#define I810_CTXREG_MC1 7 /* GFX_OP_MAP_COLOR_STAGES - stage 1 */
-#define I810_CTXREG_MC2 8 /* GFX_OP_MAP_COLOR_STAGES - stage 2 */
-#define I810_CTXREG_MA0 9 /* GFX_OP_MAP_ALPHA_STAGES - stage 0 */
-#define I810_CTXREG_MA1 10 /* GFX_OP_MAP_ALPHA_STAGES - stage 1 */
-#define I810_CTXREG_MA2 11 /* GFX_OP_MAP_ALPHA_STAGES - stage 2 */
-#define I810_CTXREG_SDM 12 /* GFX_OP_SRC_DEST_MONO */
-#define I810_CTXREG_FOG 13 /* GFX_OP_FOG_COLOR */
-#define I810_CTXREG_B1 14 /* GFX_OP_BOOL_1 */
-#define I810_CTXREG_B2 15 /* GFX_OP_BOOL_2 */
-#define I810_CTXREG_LCS 16 /* GFX_OP_LINEWIDTH_CULL_SHADE_MODE */
-#define I810_CTXREG_PV 17 /* GFX_OP_PV_RULE -- Invarient! */
-#define I810_CTXREG_ZA 18 /* GFX_OP_ZBIAS_ALPHAFUNC */
-#define I810_CTXREG_AA 19 /* GFX_OP_ANTIALIAS */
-#define I810_CTX_SETUP_SIZE 20
-
-/* Texture state (per tex unit)
- */
-#define I810_TEXREG_MI0 0 /* GFX_OP_MAP_INFO (4 dwords) */
-#define I810_TEXREG_MI1 1
-#define I810_TEXREG_MI2 2
-#define I810_TEXREG_MI3 3
-#define I810_TEXREG_MF 4 /* GFX_OP_MAP_FILTER */
-#define I810_TEXREG_MLC 5 /* GFX_OP_MAP_LOD_CTL */
-#define I810_TEXREG_MLL 6 /* GFX_OP_MAP_LOD_LIMITS */
-#define I810_TEXREG_MCS 7 /* GFX_OP_MAP_COORD_SETS ??? */
-#define I810_TEX_SETUP_SIZE 8
-
-/* Flags for clear ioctl
- */
-#define I810_FRONT 0x1
-#define I810_BACK 0x2
-#define I810_DEPTH 0x4
-
-typedef enum _drm_i810_init_func {
- I810_INIT_DMA = 0x01,
- I810_CLEANUP_DMA = 0x02,
- I810_INIT_DMA_1_4 = 0x03
-} drm_i810_init_func_t;
-
-/* This is the init structure after v1.2 */
-typedef struct _drm_i810_init {
- drm_i810_init_func_t func;
- unsigned int mmio_offset;
- unsigned int buffers_offset;
- int sarea_priv_offset;
- unsigned int ring_start;
- unsigned int ring_end;
- unsigned int ring_size;
- unsigned int front_offset;
- unsigned int back_offset;
- unsigned int depth_offset;
- unsigned int overlay_offset;
- unsigned int overlay_physical;
- unsigned int w;
- unsigned int h;
- unsigned int pitch;
- unsigned int pitch_bits;
-} drm_i810_init_t;
-
-/* This is the init structure prior to v1.2 */
-typedef struct _drm_i810_pre12_init {
- drm_i810_init_func_t func;
- unsigned int mmio_offset;
- unsigned int buffers_offset;
- int sarea_priv_offset;
- unsigned int ring_start;
- unsigned int ring_end;
- unsigned int ring_size;
- unsigned int front_offset;
- unsigned int back_offset;
- unsigned int depth_offset;
- unsigned int w;
- unsigned int h;
- unsigned int pitch;
- unsigned int pitch_bits;
-} drm_i810_pre12_init_t;
-
-/* Warning: If you change the SAREA structure you must change the Xserver
- * structure as well */
-
-typedef struct _drm_i810_tex_region {
- unsigned char next, prev; /* indices to form a circular LRU */
- unsigned char in_use; /* owned by a client, or free? */
- int age; /* tracked by clients to update local LRU's */
-} drm_i810_tex_region_t;
-
-typedef struct _drm_i810_sarea {
- unsigned int ContextState[I810_CTX_SETUP_SIZE];
- unsigned int BufferState[I810_DEST_SETUP_SIZE];
- unsigned int TexState[2][I810_TEX_SETUP_SIZE];
- unsigned int dirty;
-
- unsigned int nbox;
- struct drm_clip_rect boxes[I810_NR_SAREA_CLIPRECTS];
-
- /* Maintain an LRU of contiguous regions of texture space. If
- * you think you own a region of texture memory, and it has an
- * age different to the one you set, then you are mistaken and
- * it has been stolen by another client. If global texAge
- * hasn't changed, there is no need to walk the list.
- *
- * These regions can be used as a proxy for the fine-grained
- * texture information of other clients - by maintaining them
- * in the same lru which is used to age their own textures,
- * clients have an approximate lru for the whole of global
- * texture space, and can make informed decisions as to which
- * areas to kick out. There is no need to choose whether to
- * kick out your own texture or someone else's - simply eject
- * them all in LRU order.
- */
-
- drm_i810_tex_region_t texList[I810_NR_TEX_REGIONS + 1];
- /* Last elt is sentinal */
- int texAge; /* last time texture was uploaded */
- int last_enqueue; /* last time a buffer was enqueued */
- int last_dispatch; /* age of the most recently dispatched buffer */
- int last_quiescent; /* */
- int ctxOwner; /* last context to upload state */
-
- int vertex_prim;
-
- int pf_enabled; /* is pageflipping allowed? */
- int pf_active;
- int pf_current_page; /* which buffer is being displayed? */
-} drm_i810_sarea_t;
-
-/* WARNING: If you change any of these defines, make sure to change the
- * defines in the Xserver file (xf86drmMga.h)
- */
-
-/* i810 specific ioctls
- * The device specific ioctl range is 0x40 to 0x79.
- */
-#define DRM_I810_INIT 0x00
-#define DRM_I810_VERTEX 0x01
-#define DRM_I810_CLEAR 0x02
-#define DRM_I810_FLUSH 0x03
-#define DRM_I810_GETAGE 0x04
-#define DRM_I810_GETBUF 0x05
-#define DRM_I810_SWAP 0x06
-#define DRM_I810_COPY 0x07
-#define DRM_I810_DOCOPY 0x08
-#define DRM_I810_OV0INFO 0x09
-#define DRM_I810_FSTATUS 0x0a
-#define DRM_I810_OV0FLIP 0x0b
-#define DRM_I810_MC 0x0c
-#define DRM_I810_RSTATUS 0x0d
-#define DRM_I810_FLIP 0x0e
-
-#define DRM_IOCTL_I810_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I810_INIT, drm_i810_init_t)
-#define DRM_IOCTL_I810_VERTEX DRM_IOW( DRM_COMMAND_BASE + DRM_I810_VERTEX, drm_i810_vertex_t)
-#define DRM_IOCTL_I810_CLEAR DRM_IOW( DRM_COMMAND_BASE + DRM_I810_CLEAR, drm_i810_clear_t)
-#define DRM_IOCTL_I810_FLUSH DRM_IO( DRM_COMMAND_BASE + DRM_I810_FLUSH)
-#define DRM_IOCTL_I810_GETAGE DRM_IO( DRM_COMMAND_BASE + DRM_I810_GETAGE)
-#define DRM_IOCTL_I810_GETBUF DRM_IOWR(DRM_COMMAND_BASE + DRM_I810_GETBUF, drm_i810_dma_t)
-#define DRM_IOCTL_I810_SWAP DRM_IO( DRM_COMMAND_BASE + DRM_I810_SWAP)
-#define DRM_IOCTL_I810_COPY DRM_IOW( DRM_COMMAND_BASE + DRM_I810_COPY, drm_i810_copy_t)
-#define DRM_IOCTL_I810_DOCOPY DRM_IO( DRM_COMMAND_BASE + DRM_I810_DOCOPY)
-#define DRM_IOCTL_I810_OV0INFO DRM_IOR( DRM_COMMAND_BASE + DRM_I810_OV0INFO, drm_i810_overlay_t)
-#define DRM_IOCTL_I810_FSTATUS DRM_IO ( DRM_COMMAND_BASE + DRM_I810_FSTATUS)
-#define DRM_IOCTL_I810_OV0FLIP DRM_IO ( DRM_COMMAND_BASE + DRM_I810_OV0FLIP)
-#define DRM_IOCTL_I810_MC DRM_IOW( DRM_COMMAND_BASE + DRM_I810_MC, drm_i810_mc_t)
-#define DRM_IOCTL_I810_RSTATUS DRM_IO ( DRM_COMMAND_BASE + DRM_I810_RSTATUS)
-#define DRM_IOCTL_I810_FLIP DRM_IO ( DRM_COMMAND_BASE + DRM_I810_FLIP)
-
-typedef struct _drm_i810_clear {
- int clear_color;
- int clear_depth;
- int flags;
-} drm_i810_clear_t;
-
-/* These may be placeholders if we have more cliprects than
- * I810_NR_SAREA_CLIPRECTS. In that case, the client sets discard to
- * false, indicating that the buffer will be dispatched again with a
- * new set of cliprects.
- */
-typedef struct _drm_i810_vertex {
- int idx; /* buffer index */
- int used; /* nr bytes in use */
- int discard; /* client is finished with the buffer? */
-} drm_i810_vertex_t;
-
-typedef struct _drm_i810_copy_t {
- int idx; /* buffer index */
- int used; /* nr bytes in use */
- void *address; /* Address to copy from */
-} drm_i810_copy_t;
-
-#define PR_TRIANGLES (0x0<<18)
-#define PR_TRISTRIP_0 (0x1<<18)
-#define PR_TRISTRIP_1 (0x2<<18)
-#define PR_TRIFAN (0x3<<18)
-#define PR_POLYGON (0x4<<18)
-#define PR_LINES (0x5<<18)
-#define PR_LINESTRIP (0x6<<18)
-#define PR_RECTS (0x7<<18)
-#define PR_MASK (0x7<<18)
-
-typedef struct drm_i810_dma {
- void *virtual;
- int request_idx;
- int request_size;
- int granted;
-} drm_i810_dma_t;
-
-typedef struct _drm_i810_overlay_t {
- unsigned int offset; /* Address of the Overlay Regs */
- unsigned int physical;
-} drm_i810_overlay_t;
-
-typedef struct _drm_i810_mc {
- int idx; /* buffer index */
- int used; /* nr bytes in use */
- int num_blocks; /* number of GFXBlocks */
- int *length; /* List of lengths for GFXBlocks (FUTURE) */
- unsigned int last_render; /* Last Render Request */
-} drm_i810_mc_t;
-
-#if defined(__cplusplus)
-}
-#endif
-
-#endif /* _I810_DRM_H_ */
diff --git a/include/uapi/drm/ivpu_accel.h b/include/uapi/drm/ivpu_accel.h
new file mode 100644
index 000000000000..839820aed87e
--- /dev/null
+++ b/include/uapi/drm/ivpu_accel.h
@@ -0,0 +1,306 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+/*
+ * Copyright (C) 2020-2023 Intel Corporation
+ */
+
+#ifndef __UAPI_IVPU_DRM_H__
+#define __UAPI_IVPU_DRM_H__
+
+#include "drm.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#define DRM_IVPU_DRIVER_MAJOR 1
+#define DRM_IVPU_DRIVER_MINOR 0
+
+#define DRM_IVPU_GET_PARAM 0x00
+#define DRM_IVPU_SET_PARAM 0x01
+#define DRM_IVPU_BO_CREATE 0x02
+#define DRM_IVPU_BO_INFO 0x03
+#define DRM_IVPU_SUBMIT 0x05
+#define DRM_IVPU_BO_WAIT 0x06
+
+#define DRM_IOCTL_IVPU_GET_PARAM \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_IVPU_GET_PARAM, struct drm_ivpu_param)
+
+#define DRM_IOCTL_IVPU_SET_PARAM \
+ DRM_IOW(DRM_COMMAND_BASE + DRM_IVPU_SET_PARAM, struct drm_ivpu_param)
+
+#define DRM_IOCTL_IVPU_BO_CREATE \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_IVPU_BO_CREATE, struct drm_ivpu_bo_create)
+
+#define DRM_IOCTL_IVPU_BO_INFO \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_IVPU_BO_INFO, struct drm_ivpu_bo_info)
+
+#define DRM_IOCTL_IVPU_SUBMIT \
+ DRM_IOW(DRM_COMMAND_BASE + DRM_IVPU_SUBMIT, struct drm_ivpu_submit)
+
+#define DRM_IOCTL_IVPU_BO_WAIT \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_IVPU_BO_WAIT, struct drm_ivpu_bo_wait)
+
+/**
+ * DOC: contexts
+ *
+ * VPU contexts have private virtual address space, job queues and priority.
+ * Each context is identified by an unique ID. Context is created on open().
+ */
+
+#define DRM_IVPU_PARAM_DEVICE_ID 0
+#define DRM_IVPU_PARAM_DEVICE_REVISION 1
+#define DRM_IVPU_PARAM_PLATFORM_TYPE 2
+#define DRM_IVPU_PARAM_CORE_CLOCK_RATE 3
+#define DRM_IVPU_PARAM_NUM_CONTEXTS 4
+#define DRM_IVPU_PARAM_CONTEXT_BASE_ADDRESS 5
+#define DRM_IVPU_PARAM_CONTEXT_PRIORITY 6
+#define DRM_IVPU_PARAM_CONTEXT_ID 7
+#define DRM_IVPU_PARAM_FW_API_VERSION 8
+#define DRM_IVPU_PARAM_ENGINE_HEARTBEAT 9
+#define DRM_IVPU_PARAM_UNIQUE_INFERENCE_ID 10
+#define DRM_IVPU_PARAM_TILE_CONFIG 11
+#define DRM_IVPU_PARAM_SKU 12
+
+#define DRM_IVPU_PLATFORM_TYPE_SILICON 0
+
+#define DRM_IVPU_CONTEXT_PRIORITY_IDLE 0
+#define DRM_IVPU_CONTEXT_PRIORITY_NORMAL 1
+#define DRM_IVPU_CONTEXT_PRIORITY_FOCUS 2
+#define DRM_IVPU_CONTEXT_PRIORITY_REALTIME 3
+
+/**
+ * struct drm_ivpu_param - Get/Set VPU parameters
+ */
+struct drm_ivpu_param {
+ /**
+ * @param:
+ *
+ * Supported params:
+ *
+ * %DRM_IVPU_PARAM_DEVICE_ID:
+ * PCI Device ID of the VPU device (read-only)
+ *
+ * %DRM_IVPU_PARAM_DEVICE_REVISION:
+ * VPU device revision (read-only)
+ *
+ * %DRM_IVPU_PARAM_PLATFORM_TYPE:
+ * Returns %DRM_IVPU_PLATFORM_TYPE_SILICON on real hardware or device specific
+ * platform type when executing on a simulator or emulator (read-only)
+ *
+ * %DRM_IVPU_PARAM_CORE_CLOCK_RATE:
+ * Current PLL frequency (read-only)
+ *
+ * %DRM_IVPU_PARAM_NUM_CONTEXTS:
+ * Maximum number of simultaneously existing contexts (read-only)
+ *
+ * %DRM_IVPU_PARAM_CONTEXT_BASE_ADDRESS:
+ * Lowest VPU virtual address available in the current context (read-only)
+ *
+ * %DRM_IVPU_PARAM_CONTEXT_PRIORITY:
+ * Value of current context scheduling priority (read-write).
+ * See DRM_IVPU_CONTEXT_PRIORITY_* for possible values.
+ *
+ * %DRM_IVPU_PARAM_CONTEXT_ID:
+ * Current context ID, always greater than 0 (read-only)
+ *
+ * %DRM_IVPU_PARAM_FW_API_VERSION:
+ * Firmware API version array (read-only)
+ *
+ * %DRM_IVPU_PARAM_ENGINE_HEARTBEAT:
+ * Heartbeat value from an engine (read-only).
+ * Engine ID (i.e. DRM_IVPU_ENGINE_COMPUTE) is given via index.
+ *
+ * %DRM_IVPU_PARAM_UNIQUE_INFERENCE_ID:
+ * Device-unique inference ID (read-only)
+ *
+ * %DRM_IVPU_PARAM_TILE_CONFIG:
+ * VPU tile configuration (read-only)
+ *
+ * %DRM_IVPU_PARAM_SKU:
+ * VPU SKU ID (read-only)
+ *
+ */
+ __u32 param;
+
+ /** @index: Index for params that have multiple instances */
+ __u32 index;
+
+ /** @value: Param value */
+ __u64 value;
+};
+
+#define DRM_IVPU_BO_HIGH_MEM 0x00000001
+#define DRM_IVPU_BO_MAPPABLE 0x00000002
+
+#define DRM_IVPU_BO_CACHED 0x00000000
+#define DRM_IVPU_BO_UNCACHED 0x00010000
+#define DRM_IVPU_BO_WC 0x00020000
+#define DRM_IVPU_BO_CACHE_MASK 0x00030000
+
+#define DRM_IVPU_BO_FLAGS \
+ (DRM_IVPU_BO_HIGH_MEM | \
+ DRM_IVPU_BO_MAPPABLE | \
+ DRM_IVPU_BO_CACHE_MASK)
+
+/**
+ * struct drm_ivpu_bo_create - Create BO backed by SHMEM
+ *
+ * Create GEM buffer object allocated in SHMEM memory.
+ */
+struct drm_ivpu_bo_create {
+ /** @size: The size in bytes of the allocated memory */
+ __u64 size;
+
+ /**
+ * @flags:
+ *
+ * Supported flags:
+ *
+ * %DRM_IVPU_BO_HIGH_MEM:
+ *
+ * Allocate VPU address from >4GB range.
+ * Buffer object with vpu address >4GB can be always accessed by the
+ * VPU DMA engine, but some HW generation may not be able to access
+ * this memory from then firmware running on the VPU management processor.
+ * Suitable for input, output and some scratch buffers.
+ *
+ * %DRM_IVPU_BO_MAPPABLE:
+ *
+ * Buffer object can be mapped using mmap().
+ *
+ * %DRM_IVPU_BO_CACHED:
+ *
+ * Allocated BO will be cached on host side (WB) and snooped on the VPU side.
+ * This is the default caching mode.
+ *
+ * %DRM_IVPU_BO_UNCACHED:
+ *
+ * Allocated BO will not be cached on host side nor snooped on the VPU side.
+ *
+ * %DRM_IVPU_BO_WC:
+ *
+ * Allocated BO will use write combining buffer for writes but reads will be
+ * uncached.
+ */
+ __u32 flags;
+
+ /** @handle: Returned GEM object handle */
+ __u32 handle;
+
+ /** @vpu_addr: Returned VPU virtual address */
+ __u64 vpu_addr;
+};
+
+/**
+ * struct drm_ivpu_bo_info - Query buffer object info
+ */
+struct drm_ivpu_bo_info {
+ /** @handle: Handle of the queried BO */
+ __u32 handle;
+
+ /** @flags: Returned flags used to create the BO */
+ __u32 flags;
+
+ /** @vpu_addr: Returned VPU virtual address */
+ __u64 vpu_addr;
+
+ /**
+ * @mmap_offset:
+ *
+ * Returned offset to be used in mmap(). 0 in case the BO is not mappable.
+ */
+ __u64 mmap_offset;
+
+ /** @size: Returned GEM object size, aligned to PAGE_SIZE */
+ __u64 size;
+};
+
+/* drm_ivpu_submit engines */
+#define DRM_IVPU_ENGINE_COMPUTE 0
+#define DRM_IVPU_ENGINE_COPY 1
+
+/**
+ * struct drm_ivpu_submit - Submit commands to the VPU
+ *
+ * Execute a single command buffer on a given VPU engine.
+ * Handles to all referenced buffer objects have to be provided in @buffers_ptr.
+ *
+ * User space may wait on job completion using %DRM_IVPU_BO_WAIT ioctl.
+ */
+struct drm_ivpu_submit {
+ /**
+ * @buffers_ptr:
+ *
+ * A pointer to an u32 array of GEM handles of the BOs required for this job.
+ * The number of elements in the array must be equal to the value given by @buffer_count.
+ *
+ * The first BO is the command buffer. The rest of array has to contain all
+ * BOs referenced from the command buffer.
+ */
+ __u64 buffers_ptr;
+
+ /** @buffer_count: Number of elements in the @buffers_ptr */
+ __u32 buffer_count;
+
+ /**
+ * @engine: Select the engine this job should be executed on
+ *
+ * %DRM_IVPU_ENGINE_COMPUTE:
+ *
+ * Performs Deep Learning Neural Compute Inference Operations
+ *
+ * %DRM_IVPU_ENGINE_COPY:
+ *
+ * Performs memory copy operations to/from system memory allocated for VPU
+ */
+ __u32 engine;
+
+ /** @flags: Reserved for future use - must be zero */
+ __u32 flags;
+
+ /**
+ * @commands_offset:
+ *
+ * Offset inside the first buffer in @buffers_ptr containing commands
+ * to be executed. The offset has to be 8-byte aligned.
+ */
+ __u32 commands_offset;
+};
+
+/* drm_ivpu_bo_wait job status codes */
+#define DRM_IVPU_JOB_STATUS_SUCCESS 0
+
+/**
+ * struct drm_ivpu_bo_wait - Wait for BO to become inactive
+ *
+ * Blocks until a given buffer object becomes inactive.
+ * With @timeout_ms set to 0 returns immediately.
+ */
+struct drm_ivpu_bo_wait {
+ /** @handle: Handle to the buffer object to be waited on */
+ __u32 handle;
+
+ /** @flags: Reserved for future use - must be zero */
+ __u32 flags;
+
+ /** @timeout_ns: Absolute timeout in nanoseconds (may be zero) */
+ __s64 timeout_ns;
+
+ /**
+ * @job_status:
+ *
+ * Job status code which is updated after the job is completed.
+ * &DRM_IVPU_JOB_STATUS_SUCCESS or device specific error otherwise.
+ * Valid only if @handle points to a command buffer.
+ */
+ __u32 job_status;
+
+ /** @pad: Padding - must be zero */
+ __u32 pad;
+};
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* __UAPI_IVPU_DRM_H__ */
diff --git a/include/uapi/drm/mga_drm.h b/include/uapi/drm/mga_drm.h
deleted file mode 100644
index bb31567e66c0..000000000000
--- a/include/uapi/drm/mga_drm.h
+++ /dev/null
@@ -1,429 +0,0 @@
-/* mga_drm.h -- Public header for the Matrox g200/g400 driver -*- linux-c -*-
- * Created: Tue Jan 25 01:50:01 1999 by jhartmann@precisioninsight.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Jeff Hartmann <jhartmann@valinux.com>
- * Keith Whitwell <keith@tungstengraphics.com>
- *
- * Rewritten by:
- * Gareth Hughes <gareth@valinux.com>
- */
-
-#ifndef __MGA_DRM_H__
-#define __MGA_DRM_H__
-
-#include "drm.h"
-
-#if defined(__cplusplus)
-extern "C" {
-#endif
-
-/* WARNING: If you change any of these defines, make sure to change the
- * defines in the Xserver file (mga_sarea.h)
- */
-
-#ifndef __MGA_SAREA_DEFINES__
-#define __MGA_SAREA_DEFINES__
-
-/* WARP pipe flags
- */
-#define MGA_F 0x1 /* fog */
-#define MGA_A 0x2 /* alpha */
-#define MGA_S 0x4 /* specular */
-#define MGA_T2 0x8 /* multitexture */
-
-#define MGA_WARP_TGZ 0
-#define MGA_WARP_TGZF (MGA_F)
-#define MGA_WARP_TGZA (MGA_A)
-#define MGA_WARP_TGZAF (MGA_F|MGA_A)
-#define MGA_WARP_TGZS (MGA_S)
-#define MGA_WARP_TGZSF (MGA_S|MGA_F)
-#define MGA_WARP_TGZSA (MGA_S|MGA_A)
-#define MGA_WARP_TGZSAF (MGA_S|MGA_F|MGA_A)
-#define MGA_WARP_T2GZ (MGA_T2)
-#define MGA_WARP_T2GZF (MGA_T2|MGA_F)
-#define MGA_WARP_T2GZA (MGA_T2|MGA_A)
-#define MGA_WARP_T2GZAF (MGA_T2|MGA_A|MGA_F)
-#define MGA_WARP_T2GZS (MGA_T2|MGA_S)
-#define MGA_WARP_T2GZSF (MGA_T2|MGA_S|MGA_F)
-#define MGA_WARP_T2GZSA (MGA_T2|MGA_S|MGA_A)
-#define MGA_WARP_T2GZSAF (MGA_T2|MGA_S|MGA_F|MGA_A)
-
-#define MGA_MAX_G200_PIPES 8 /* no multitex */
-#define MGA_MAX_G400_PIPES 16
-#define MGA_MAX_WARP_PIPES MGA_MAX_G400_PIPES
-#define MGA_WARP_UCODE_SIZE 32768 /* in bytes */
-
-#define MGA_CARD_TYPE_G200 1
-#define MGA_CARD_TYPE_G400 2
-#define MGA_CARD_TYPE_G450 3 /* not currently used */
-#define MGA_CARD_TYPE_G550 4
-
-#define MGA_FRONT 0x1
-#define MGA_BACK 0x2
-#define MGA_DEPTH 0x4
-
-/* What needs to be changed for the current vertex dma buffer?
- */
-#define MGA_UPLOAD_CONTEXT 0x1
-#define MGA_UPLOAD_TEX0 0x2
-#define MGA_UPLOAD_TEX1 0x4
-#define MGA_UPLOAD_PIPE 0x8
-#define MGA_UPLOAD_TEX0IMAGE 0x10 /* handled client-side */
-#define MGA_UPLOAD_TEX1IMAGE 0x20 /* handled client-side */
-#define MGA_UPLOAD_2D 0x40
-#define MGA_WAIT_AGE 0x80 /* handled client-side */
-#define MGA_UPLOAD_CLIPRECTS 0x100 /* handled client-side */
-#if 0
-#define MGA_DMA_FLUSH 0x200 /* set when someone gets the lock
- quiescent */
-#endif
-
-/* 32 buffers of 64k each, total 2 meg.
- */
-#define MGA_BUFFER_SIZE (1 << 16)
-#define MGA_NUM_BUFFERS 128
-
-/* Keep these small for testing.
- */
-#define MGA_NR_SAREA_CLIPRECTS 8
-
-/* 2 heaps (1 for card, 1 for agp), each divided into up to 128
- * regions, subject to a minimum region size of (1<<16) == 64k.
- *
- * Clients may subdivide regions internally, but when sharing between
- * clients, the region size is the minimum granularity.
- */
-
-#define MGA_CARD_HEAP 0
-#define MGA_AGP_HEAP 1
-#define MGA_NR_TEX_HEAPS 2
-#define MGA_NR_TEX_REGIONS 16
-#define MGA_LOG_MIN_TEX_REGION_SIZE 16
-
-#define DRM_MGA_IDLE_RETRY 2048
-
-#endif /* __MGA_SAREA_DEFINES__ */
-
-/* Setup registers for 3D context
- */
-typedef struct {
- unsigned int dstorg;
- unsigned int maccess;
- unsigned int plnwt;
- unsigned int dwgctl;
- unsigned int alphactrl;
- unsigned int fogcolor;
- unsigned int wflag;
- unsigned int tdualstage0;
- unsigned int tdualstage1;
- unsigned int fcol;
- unsigned int stencil;
- unsigned int stencilctl;
-} drm_mga_context_regs_t;
-
-/* Setup registers for 2D, X server
- */
-typedef struct {
- unsigned int pitch;
-} drm_mga_server_regs_t;
-
-/* Setup registers for each texture unit
- */
-typedef struct {
- unsigned int texctl;
- unsigned int texctl2;
- unsigned int texfilter;
- unsigned int texbordercol;
- unsigned int texorg;
- unsigned int texwidth;
- unsigned int texheight;
- unsigned int texorg1;
- unsigned int texorg2;
- unsigned int texorg3;
- unsigned int texorg4;
-} drm_mga_texture_regs_t;
-
-/* General aging mechanism
- */
-typedef struct {
- unsigned int head; /* Position of head pointer */
- unsigned int wrap; /* Primary DMA wrap count */
-} drm_mga_age_t;
-
-typedef struct _drm_mga_sarea {
- /* The channel for communication of state information to the kernel
- * on firing a vertex dma buffer.
- */
- drm_mga_context_regs_t context_state;
- drm_mga_server_regs_t server_state;
- drm_mga_texture_regs_t tex_state[2];
- unsigned int warp_pipe;
- unsigned int dirty;
- unsigned int vertsize;
-
- /* The current cliprects, or a subset thereof.
- */
- struct drm_clip_rect boxes[MGA_NR_SAREA_CLIPRECTS];
- unsigned int nbox;
-
- /* Information about the most recently used 3d drawable. The
- * client fills in the req_* fields, the server fills in the
- * exported_ fields and puts the cliprects into boxes, above.
- *
- * The client clears the exported_drawable field before
- * clobbering the boxes data.
- */
- unsigned int req_drawable; /* the X drawable id */
- unsigned int req_draw_buffer; /* MGA_FRONT or MGA_BACK */
-
- unsigned int exported_drawable;
- unsigned int exported_index;
- unsigned int exported_stamp;
- unsigned int exported_buffers;
- unsigned int exported_nfront;
- unsigned int exported_nback;
- int exported_back_x, exported_front_x, exported_w;
- int exported_back_y, exported_front_y, exported_h;
- struct drm_clip_rect exported_boxes[MGA_NR_SAREA_CLIPRECTS];
-
- /* Counters for aging textures and for client-side throttling.
- */
- unsigned int status[4];
- unsigned int last_wrap;
-
- drm_mga_age_t last_frame;
- unsigned int last_enqueue; /* last time a buffer was enqueued */
- unsigned int last_dispatch; /* age of the most recently dispatched buffer */
- unsigned int last_quiescent; /* */
-
- /* LRU lists for texture memory in agp space and on the card.
- */
- struct drm_tex_region texList[MGA_NR_TEX_HEAPS][MGA_NR_TEX_REGIONS + 1];
- unsigned int texAge[MGA_NR_TEX_HEAPS];
-
- /* Mechanism to validate card state.
- */
- int ctxOwner;
-} drm_mga_sarea_t;
-
-/* MGA specific ioctls
- * The device specific ioctl range is 0x40 to 0x79.
- */
-#define DRM_MGA_INIT 0x00
-#define DRM_MGA_FLUSH 0x01
-#define DRM_MGA_RESET 0x02
-#define DRM_MGA_SWAP 0x03
-#define DRM_MGA_CLEAR 0x04
-#define DRM_MGA_VERTEX 0x05
-#define DRM_MGA_INDICES 0x06
-#define DRM_MGA_ILOAD 0x07
-#define DRM_MGA_BLIT 0x08
-#define DRM_MGA_GETPARAM 0x09
-
-/* 3.2:
- * ioctls for operating on fences.
- */
-#define DRM_MGA_SET_FENCE 0x0a
-#define DRM_MGA_WAIT_FENCE 0x0b
-#define DRM_MGA_DMA_BOOTSTRAP 0x0c
-
-#define DRM_IOCTL_MGA_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_INIT, drm_mga_init_t)
-#define DRM_IOCTL_MGA_FLUSH DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_FLUSH, struct drm_lock)
-#define DRM_IOCTL_MGA_RESET DRM_IO( DRM_COMMAND_BASE + DRM_MGA_RESET)
-#define DRM_IOCTL_MGA_SWAP DRM_IO( DRM_COMMAND_BASE + DRM_MGA_SWAP)
-#define DRM_IOCTL_MGA_CLEAR DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_CLEAR, drm_mga_clear_t)
-#define DRM_IOCTL_MGA_VERTEX DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_VERTEX, drm_mga_vertex_t)
-#define DRM_IOCTL_MGA_INDICES DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_INDICES, drm_mga_indices_t)
-#define DRM_IOCTL_MGA_ILOAD DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_ILOAD, drm_mga_iload_t)
-#define DRM_IOCTL_MGA_BLIT DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_BLIT, drm_mga_blit_t)
-#define DRM_IOCTL_MGA_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_MGA_GETPARAM, drm_mga_getparam_t)
-#define DRM_IOCTL_MGA_SET_FENCE DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_SET_FENCE, __u32)
-#define DRM_IOCTL_MGA_WAIT_FENCE DRM_IOWR(DRM_COMMAND_BASE + DRM_MGA_WAIT_FENCE, __u32)
-#define DRM_IOCTL_MGA_DMA_BOOTSTRAP DRM_IOWR(DRM_COMMAND_BASE + DRM_MGA_DMA_BOOTSTRAP, drm_mga_dma_bootstrap_t)
-
-typedef struct _drm_mga_warp_index {
- int installed;
- unsigned long phys_addr;
- int size;
-} drm_mga_warp_index_t;
-
-typedef struct drm_mga_init {
- enum {
- MGA_INIT_DMA = 0x01,
- MGA_CLEANUP_DMA = 0x02
- } func;
-
- unsigned long sarea_priv_offset;
-
- __struct_group(/* no tag */, always32bit, /* no attrs */,
- int chipset;
- int sgram;
-
- unsigned int maccess;
-
- unsigned int fb_cpp;
- unsigned int front_offset, front_pitch;
- unsigned int back_offset, back_pitch;
-
- unsigned int depth_cpp;
- unsigned int depth_offset, depth_pitch;
-
- unsigned int texture_offset[MGA_NR_TEX_HEAPS];
- unsigned int texture_size[MGA_NR_TEX_HEAPS];
- );
-
- unsigned long fb_offset;
- unsigned long mmio_offset;
- unsigned long status_offset;
- unsigned long warp_offset;
- unsigned long primary_offset;
- unsigned long buffers_offset;
-} drm_mga_init_t;
-
-typedef struct drm_mga_dma_bootstrap {
- /**
- * \name AGP texture region
- *
- * On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, these fields will
- * be filled in with the actual AGP texture settings.
- *
- * \warning
- * If these fields are non-zero, but dma_mga_dma_bootstrap::agp_mode
- * is zero, it means that PCI memory (most likely through the use of
- * an IOMMU) is being used for "AGP" textures.
- */
- /*@{ */
- unsigned long texture_handle; /**< Handle used to map AGP textures. */
- __u32 texture_size; /**< Size of the AGP texture region. */
- /*@} */
-
- /**
- * Requested size of the primary DMA region.
- *
- * On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, this field will be
- * filled in with the actual AGP mode. If AGP was not available
- */
- __u32 primary_size;
-
- /**
- * Requested number of secondary DMA buffers.
- *
- * On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, this field will be
- * filled in with the actual number of secondary DMA buffers
- * allocated. Particularly when PCI DMA is used, this may be
- * (subtantially) less than the number requested.
- */
- __u32 secondary_bin_count;
-
- /**
- * Requested size of each secondary DMA buffer.
- *
- * While the kernel \b is free to reduce
- * dma_mga_dma_bootstrap::secondary_bin_count, it is \b not allowed
- * to reduce dma_mga_dma_bootstrap::secondary_bin_size.
- */
- __u32 secondary_bin_size;
-
- /**
- * Bit-wise mask of AGPSTAT2_* values. Currently only \c AGPSTAT2_1X,
- * \c AGPSTAT2_2X, and \c AGPSTAT2_4X are supported. If this value is
- * zero, it means that PCI DMA should be used, even if AGP is
- * possible.
- *
- * On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, this field will be
- * filled in with the actual AGP mode. If AGP was not available
- * (i.e., PCI DMA was used), this value will be zero.
- */
- __u32 agp_mode;
-
- /**
- * Desired AGP GART size, measured in megabytes.
- */
- __u8 agp_size;
-} drm_mga_dma_bootstrap_t;
-
-typedef struct drm_mga_clear {
- unsigned int flags;
- unsigned int clear_color;
- unsigned int clear_depth;
- unsigned int color_mask;
- unsigned int depth_mask;
-} drm_mga_clear_t;
-
-typedef struct drm_mga_vertex {
- int idx; /* buffer to queue */
- int used; /* bytes in use */
- int discard; /* client finished with buffer? */
-} drm_mga_vertex_t;
-
-typedef struct drm_mga_indices {
- int idx; /* buffer to queue */
- unsigned int start;
- unsigned int end;
- int discard; /* client finished with buffer? */
-} drm_mga_indices_t;
-
-typedef struct drm_mga_iload {
- int idx;
- unsigned int dstorg;
- unsigned int length;
-} drm_mga_iload_t;
-
-typedef struct _drm_mga_blit {
- unsigned int planemask;
- unsigned int srcorg;
- unsigned int dstorg;
- int src_pitch, dst_pitch;
- int delta_sx, delta_sy;
- int delta_dx, delta_dy;
- int height, ydir; /* flip image vertically */
- int source_pitch, dest_pitch;
-} drm_mga_blit_t;
-
-/* 3.1: An ioctl to get parameters that aren't available to the 3d
- * client any other way.
- */
-#define MGA_PARAM_IRQ_NR 1
-
-/* 3.2: Query the actual card type. The DDX only distinguishes between
- * G200 chips and non-G200 chips, which it calls G400. It turns out that
- * there are some very sublte differences between the G4x0 chips and the G550
- * chips. Using this parameter query, a client-side driver can detect the
- * difference between a G4x0 and a G550.
- */
-#define MGA_PARAM_CARD_TYPE 2
-
-typedef struct drm_mga_getparam {
- int param;
- void __user *value;
-} drm_mga_getparam_t;
-
-#if defined(__cplusplus)
-}
-#endif
-
-#endif
diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h
index f54b48ef6a2d..329100016e7c 100644
--- a/include/uapi/drm/msm_drm.h
+++ b/include/uapi/drm/msm_drm.h
@@ -222,10 +222,12 @@ struct drm_msm_gem_submit_cmd {
#define MSM_SUBMIT_BO_READ 0x0001
#define MSM_SUBMIT_BO_WRITE 0x0002
#define MSM_SUBMIT_BO_DUMP 0x0004
+#define MSM_SUBMIT_BO_NO_IMPLICIT 0x0008
#define MSM_SUBMIT_BO_FLAGS (MSM_SUBMIT_BO_READ | \
MSM_SUBMIT_BO_WRITE | \
- MSM_SUBMIT_BO_DUMP)
+ MSM_SUBMIT_BO_DUMP | \
+ MSM_SUBMIT_BO_NO_IMPLICIT)
struct drm_msm_gem_submit_bo {
__u32 flags; /* in, mask of MSM_SUBMIT_BO_x */
diff --git a/include/uapi/drm/r128_drm.h b/include/uapi/drm/r128_drm.h
deleted file mode 100644
index 690e9c62f510..000000000000
--- a/include/uapi/drm/r128_drm.h
+++ /dev/null
@@ -1,336 +0,0 @@
-/* r128_drm.h -- Public header for the r128 driver -*- linux-c -*-
- * Created: Wed Apr 5 19:24:19 2000 by kevin@precisioninsight.com
- */
-/*
- * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Gareth Hughes <gareth@valinux.com>
- * Kevin E. Martin <martin@valinux.com>
- */
-
-#ifndef __R128_DRM_H__
-#define __R128_DRM_H__
-
-#include "drm.h"
-
-#if defined(__cplusplus)
-extern "C" {
-#endif
-
-/* WARNING: If you change any of these defines, make sure to change the
- * defines in the X server file (r128_sarea.h)
- */
-#ifndef __R128_SAREA_DEFINES__
-#define __R128_SAREA_DEFINES__
-
-/* What needs to be changed for the current vertex buffer?
- */
-#define R128_UPLOAD_CONTEXT 0x001
-#define R128_UPLOAD_SETUP 0x002
-#define R128_UPLOAD_TEX0 0x004
-#define R128_UPLOAD_TEX1 0x008
-#define R128_UPLOAD_TEX0IMAGES 0x010
-#define R128_UPLOAD_TEX1IMAGES 0x020
-#define R128_UPLOAD_CORE 0x040
-#define R128_UPLOAD_MASKS 0x080
-#define R128_UPLOAD_WINDOW 0x100
-#define R128_UPLOAD_CLIPRECTS 0x200 /* handled client-side */
-#define R128_REQUIRE_QUIESCENCE 0x400
-#define R128_UPLOAD_ALL 0x7ff
-
-#define R128_FRONT 0x1
-#define R128_BACK 0x2
-#define R128_DEPTH 0x4
-
-/* Primitive types
- */
-#define R128_POINTS 0x1
-#define R128_LINES 0x2
-#define R128_LINE_STRIP 0x3
-#define R128_TRIANGLES 0x4
-#define R128_TRIANGLE_FAN 0x5
-#define R128_TRIANGLE_STRIP 0x6
-
-/* Vertex/indirect buffer size
- */
-#define R128_BUFFER_SIZE 16384
-
-/* Byte offsets for indirect buffer data
- */
-#define R128_INDEX_PRIM_OFFSET 20
-#define R128_HOSTDATA_BLIT_OFFSET 32
-
-/* Keep these small for testing.
- */
-#define R128_NR_SAREA_CLIPRECTS 12
-
-/* There are 2 heaps (local/AGP). Each region within a heap is a
- * minimum of 64k, and there are at most 64 of them per heap.
- */
-#define R128_LOCAL_TEX_HEAP 0
-#define R128_AGP_TEX_HEAP 1
-#define R128_NR_TEX_HEAPS 2
-#define R128_NR_TEX_REGIONS 64
-#define R128_LOG_TEX_GRANULARITY 16
-
-#define R128_NR_CONTEXT_REGS 12
-
-#define R128_MAX_TEXTURE_LEVELS 11
-#define R128_MAX_TEXTURE_UNITS 2
-
-#endif /* __R128_SAREA_DEFINES__ */
-
-typedef struct {
- /* Context state - can be written in one large chunk */
- unsigned int dst_pitch_offset_c;
- unsigned int dp_gui_master_cntl_c;
- unsigned int sc_top_left_c;
- unsigned int sc_bottom_right_c;
- unsigned int z_offset_c;
- unsigned int z_pitch_c;
- unsigned int z_sten_cntl_c;
- unsigned int tex_cntl_c;
- unsigned int misc_3d_state_cntl_reg;
- unsigned int texture_clr_cmp_clr_c;
- unsigned int texture_clr_cmp_msk_c;
- unsigned int fog_color_c;
-
- /* Texture state */
- unsigned int tex_size_pitch_c;
- unsigned int constant_color_c;
-
- /* Setup state */
- unsigned int pm4_vc_fpu_setup;
- unsigned int setup_cntl;
-
- /* Mask state */
- unsigned int dp_write_mask;
- unsigned int sten_ref_mask_c;
- unsigned int plane_3d_mask_c;
-
- /* Window state */
- unsigned int window_xy_offset;
-
- /* Core state */
- unsigned int scale_3d_cntl;
-} drm_r128_context_regs_t;
-
-/* Setup registers for each texture unit
- */
-typedef struct {
- unsigned int tex_cntl;
- unsigned int tex_combine_cntl;
- unsigned int tex_size_pitch;
- unsigned int tex_offset[R128_MAX_TEXTURE_LEVELS];
- unsigned int tex_border_color;
-} drm_r128_texture_regs_t;
-
-typedef struct drm_r128_sarea {
- /* The channel for communication of state information to the kernel
- * on firing a vertex buffer.
- */
- drm_r128_context_regs_t context_state;
- drm_r128_texture_regs_t tex_state[R128_MAX_TEXTURE_UNITS];
- unsigned int dirty;
- unsigned int vertsize;
- unsigned int vc_format;
-
- /* The current cliprects, or a subset thereof.
- */
- struct drm_clip_rect boxes[R128_NR_SAREA_CLIPRECTS];
- unsigned int nbox;
-
- /* Counters for client-side throttling of rendering clients.
- */
- unsigned int last_frame;
- unsigned int last_dispatch;
-
- struct drm_tex_region tex_list[R128_NR_TEX_HEAPS][R128_NR_TEX_REGIONS + 1];
- unsigned int tex_age[R128_NR_TEX_HEAPS];
- int ctx_owner;
- int pfAllowPageFlip; /* number of 3d windows (0,1,2 or more) */
- int pfCurrentPage; /* which buffer is being displayed? */
-} drm_r128_sarea_t;
-
-/* WARNING: If you change any of these defines, make sure to change the
- * defines in the Xserver file (xf86drmR128.h)
- */
-
-/* Rage 128 specific ioctls
- * The device specific ioctl range is 0x40 to 0x79.
- */
-#define DRM_R128_INIT 0x00
-#define DRM_R128_CCE_START 0x01
-#define DRM_R128_CCE_STOP 0x02
-#define DRM_R128_CCE_RESET 0x03
-#define DRM_R128_CCE_IDLE 0x04
-/* 0x05 not used */
-#define DRM_R128_RESET 0x06
-#define DRM_R128_SWAP 0x07
-#define DRM_R128_CLEAR 0x08
-#define DRM_R128_VERTEX 0x09
-#define DRM_R128_INDICES 0x0a
-#define DRM_R128_BLIT 0x0b
-#define DRM_R128_DEPTH 0x0c
-#define DRM_R128_STIPPLE 0x0d
-/* 0x0e not used */
-#define DRM_R128_INDIRECT 0x0f
-#define DRM_R128_FULLSCREEN 0x10
-#define DRM_R128_CLEAR2 0x11
-#define DRM_R128_GETPARAM 0x12
-#define DRM_R128_FLIP 0x13
-
-#define DRM_IOCTL_R128_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_R128_INIT, drm_r128_init_t)
-#define DRM_IOCTL_R128_CCE_START DRM_IO( DRM_COMMAND_BASE + DRM_R128_CCE_START)
-#define DRM_IOCTL_R128_CCE_STOP DRM_IOW( DRM_COMMAND_BASE + DRM_R128_CCE_STOP, drm_r128_cce_stop_t)
-#define DRM_IOCTL_R128_CCE_RESET DRM_IO( DRM_COMMAND_BASE + DRM_R128_CCE_RESET)
-#define DRM_IOCTL_R128_CCE_IDLE DRM_IO( DRM_COMMAND_BASE + DRM_R128_CCE_IDLE)
-/* 0x05 not used */
-#define DRM_IOCTL_R128_RESET DRM_IO( DRM_COMMAND_BASE + DRM_R128_RESET)
-#define DRM_IOCTL_R128_SWAP DRM_IO( DRM_COMMAND_BASE + DRM_R128_SWAP)
-#define DRM_IOCTL_R128_CLEAR DRM_IOW( DRM_COMMAND_BASE + DRM_R128_CLEAR, drm_r128_clear_t)
-#define DRM_IOCTL_R128_VERTEX DRM_IOW( DRM_COMMAND_BASE + DRM_R128_VERTEX, drm_r128_vertex_t)
-#define DRM_IOCTL_R128_INDICES DRM_IOW( DRM_COMMAND_BASE + DRM_R128_INDICES, drm_r128_indices_t)
-#define DRM_IOCTL_R128_BLIT DRM_IOW( DRM_COMMAND_BASE + DRM_R128_BLIT, drm_r128_blit_t)
-#define DRM_IOCTL_R128_DEPTH DRM_IOW( DRM_COMMAND_BASE + DRM_R128_DEPTH, drm_r128_depth_t)
-#define DRM_IOCTL_R128_STIPPLE DRM_IOW( DRM_COMMAND_BASE + DRM_R128_STIPPLE, drm_r128_stipple_t)
-/* 0x0e not used */
-#define DRM_IOCTL_R128_INDIRECT DRM_IOWR(DRM_COMMAND_BASE + DRM_R128_INDIRECT, drm_r128_indirect_t)
-#define DRM_IOCTL_R128_FULLSCREEN DRM_IOW( DRM_COMMAND_BASE + DRM_R128_FULLSCREEN, drm_r128_fullscreen_t)
-#define DRM_IOCTL_R128_CLEAR2 DRM_IOW( DRM_COMMAND_BASE + DRM_R128_CLEAR2, drm_r128_clear2_t)
-#define DRM_IOCTL_R128_GETPARAM DRM_IOWR( DRM_COMMAND_BASE + DRM_R128_GETPARAM, drm_r128_getparam_t)
-#define DRM_IOCTL_R128_FLIP DRM_IO( DRM_COMMAND_BASE + DRM_R128_FLIP)
-
-typedef struct drm_r128_init {
- enum {
- R128_INIT_CCE = 0x01,
- R128_CLEANUP_CCE = 0x02
- } func;
- unsigned long sarea_priv_offset;
- int is_pci;
- int cce_mode;
- int cce_secure;
- int ring_size;
- int usec_timeout;
-
- unsigned int fb_bpp;
- unsigned int front_offset, front_pitch;
- unsigned int back_offset, back_pitch;
- unsigned int depth_bpp;
- unsigned int depth_offset, depth_pitch;
- unsigned int span_offset;
-
- unsigned long fb_offset;
- unsigned long mmio_offset;
- unsigned long ring_offset;
- unsigned long ring_rptr_offset;
- unsigned long buffers_offset;
- unsigned long agp_textures_offset;
-} drm_r128_init_t;
-
-typedef struct drm_r128_cce_stop {
- int flush;
- int idle;
-} drm_r128_cce_stop_t;
-
-typedef struct drm_r128_clear {
- unsigned int flags;
- unsigned int clear_color;
- unsigned int clear_depth;
- unsigned int color_mask;
- unsigned int depth_mask;
-} drm_r128_clear_t;
-
-typedef struct drm_r128_vertex {
- int prim;
- int idx; /* Index of vertex buffer */
- int count; /* Number of vertices in buffer */
- int discard; /* Client finished with buffer? */
-} drm_r128_vertex_t;
-
-typedef struct drm_r128_indices {
- int prim;
- int idx;
- int start;
- int end;
- int discard; /* Client finished with buffer? */
-} drm_r128_indices_t;
-
-typedef struct drm_r128_blit {
- int idx;
- int pitch;
- int offset;
- int format;
- unsigned short x, y;
- unsigned short width, height;
-} drm_r128_blit_t;
-
-typedef struct drm_r128_depth {
- enum {
- R128_WRITE_SPAN = 0x01,
- R128_WRITE_PIXELS = 0x02,
- R128_READ_SPAN = 0x03,
- R128_READ_PIXELS = 0x04
- } func;
- int n;
- int __user *x;
- int __user *y;
- unsigned int __user *buffer;
- unsigned char __user *mask;
-} drm_r128_depth_t;
-
-typedef struct drm_r128_stipple {
- unsigned int __user *mask;
-} drm_r128_stipple_t;
-
-typedef struct drm_r128_indirect {
- int idx;
- int start;
- int end;
- int discard;
-} drm_r128_indirect_t;
-
-typedef struct drm_r128_fullscreen {
- enum {
- R128_INIT_FULLSCREEN = 0x01,
- R128_CLEANUP_FULLSCREEN = 0x02
- } func;
-} drm_r128_fullscreen_t;
-
-/* 2.3: An ioctl to get parameters that aren't available to the 3d
- * client any other way.
- */
-#define R128_PARAM_IRQ_NR 1
-
-typedef struct drm_r128_getparam {
- int param;
- void __user *value;
-} drm_r128_getparam_t;
-
-#if defined(__cplusplus)
-}
-#endif
-
-#endif
diff --git a/include/uapi/drm/savage_drm.h b/include/uapi/drm/savage_drm.h
deleted file mode 100644
index 0f6eddef74aa..000000000000
--- a/include/uapi/drm/savage_drm.h
+++ /dev/null
@@ -1,220 +0,0 @@
-/* savage_drm.h -- Public header for the savage driver
- *
- * Copyright 2004 Felix Kuehling
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sub license,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
- * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#ifndef __SAVAGE_DRM_H__
-#define __SAVAGE_DRM_H__
-
-#include "drm.h"
-
-#if defined(__cplusplus)
-extern "C" {
-#endif
-
-#ifndef __SAVAGE_SAREA_DEFINES__
-#define __SAVAGE_SAREA_DEFINES__
-
-/* 2 heaps (1 for card, 1 for agp), each divided into up to 128
- * regions, subject to a minimum region size of (1<<16) == 64k.
- *
- * Clients may subdivide regions internally, but when sharing between
- * clients, the region size is the minimum granularity.
- */
-
-#define SAVAGE_CARD_HEAP 0
-#define SAVAGE_AGP_HEAP 1
-#define SAVAGE_NR_TEX_HEAPS 2
-#define SAVAGE_NR_TEX_REGIONS 16
-#define SAVAGE_LOG_MIN_TEX_REGION_SIZE 16
-
-#endif /* __SAVAGE_SAREA_DEFINES__ */
-
-typedef struct _drm_savage_sarea {
- /* LRU lists for texture memory in agp space and on the card.
- */
- struct drm_tex_region texList[SAVAGE_NR_TEX_HEAPS][SAVAGE_NR_TEX_REGIONS +
- 1];
- unsigned int texAge[SAVAGE_NR_TEX_HEAPS];
-
- /* Mechanism to validate card state.
- */
- int ctxOwner;
-} drm_savage_sarea_t, *drm_savage_sarea_ptr;
-
-/* Savage-specific ioctls
- */
-#define DRM_SAVAGE_BCI_INIT 0x00
-#define DRM_SAVAGE_BCI_CMDBUF 0x01
-#define DRM_SAVAGE_BCI_EVENT_EMIT 0x02
-#define DRM_SAVAGE_BCI_EVENT_WAIT 0x03
-
-#define DRM_IOCTL_SAVAGE_BCI_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_INIT, drm_savage_init_t)
-#define DRM_IOCTL_SAVAGE_BCI_CMDBUF DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_CMDBUF, drm_savage_cmdbuf_t)
-#define DRM_IOCTL_SAVAGE_BCI_EVENT_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_SAVAGE_BCI_EVENT_EMIT, drm_savage_event_emit_t)
-#define DRM_IOCTL_SAVAGE_BCI_EVENT_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_EVENT_WAIT, drm_savage_event_wait_t)
-
-#define SAVAGE_DMA_PCI 1
-#define SAVAGE_DMA_AGP 3
-typedef struct drm_savage_init {
- enum {
- SAVAGE_INIT_BCI = 1,
- SAVAGE_CLEANUP_BCI = 2
- } func;
- unsigned int sarea_priv_offset;
-
- /* some parameters */
- unsigned int cob_size;
- unsigned int bci_threshold_lo, bci_threshold_hi;
- unsigned int dma_type;
-
- /* frame buffer layout */
- unsigned int fb_bpp;
- unsigned int front_offset, front_pitch;
- unsigned int back_offset, back_pitch;
- unsigned int depth_bpp;
- unsigned int depth_offset, depth_pitch;
-
- /* local textures */
- unsigned int texture_offset;
- unsigned int texture_size;
-
- /* physical locations of non-permanent maps */
- unsigned long status_offset;
- unsigned long buffers_offset;
- unsigned long agp_textures_offset;
- unsigned long cmd_dma_offset;
-} drm_savage_init_t;
-
-typedef union drm_savage_cmd_header drm_savage_cmd_header_t;
-typedef struct drm_savage_cmdbuf {
- /* command buffer in client's address space */
- drm_savage_cmd_header_t __user *cmd_addr;
- unsigned int size; /* size of the command buffer in 64bit units */
-
- unsigned int dma_idx; /* DMA buffer index to use */
- int discard; /* discard DMA buffer when done */
- /* vertex buffer in client's address space */
- unsigned int __user *vb_addr;
- unsigned int vb_size; /* size of client vertex buffer in bytes */
- unsigned int vb_stride; /* stride of vertices in 32bit words */
- /* boxes in client's address space */
- struct drm_clip_rect __user *box_addr;
- unsigned int nbox; /* number of clipping boxes */
-} drm_savage_cmdbuf_t;
-
-#define SAVAGE_WAIT_2D 0x1 /* wait for 2D idle before updating event tag */
-#define SAVAGE_WAIT_3D 0x2 /* wait for 3D idle before updating event tag */
-#define SAVAGE_WAIT_IRQ 0x4 /* emit or wait for IRQ, not implemented yet */
-typedef struct drm_savage_event {
- unsigned int count;
- unsigned int flags;
-} drm_savage_event_emit_t, drm_savage_event_wait_t;
-
-/* Commands for the cmdbuf ioctl
- */
-#define SAVAGE_CMD_STATE 0 /* a range of state registers */
-#define SAVAGE_CMD_DMA_PRIM 1 /* vertices from DMA buffer */
-#define SAVAGE_CMD_VB_PRIM 2 /* vertices from client vertex buffer */
-#define SAVAGE_CMD_DMA_IDX 3 /* indexed vertices from DMA buffer */
-#define SAVAGE_CMD_VB_IDX 4 /* indexed vertices client vertex buffer */
-#define SAVAGE_CMD_CLEAR 5 /* clear buffers */
-#define SAVAGE_CMD_SWAP 6 /* swap buffers */
-
-/* Primitive types
-*/
-#define SAVAGE_PRIM_TRILIST 0 /* triangle list */
-#define SAVAGE_PRIM_TRISTRIP 1 /* triangle strip */
-#define SAVAGE_PRIM_TRIFAN 2 /* triangle fan */
-#define SAVAGE_PRIM_TRILIST_201 3 /* reorder verts for correct flat
- * shading on s3d */
-
-/* Skip flags (vertex format)
- */
-#define SAVAGE_SKIP_Z 0x01
-#define SAVAGE_SKIP_W 0x02
-#define SAVAGE_SKIP_C0 0x04
-#define SAVAGE_SKIP_C1 0x08
-#define SAVAGE_SKIP_S0 0x10
-#define SAVAGE_SKIP_T0 0x20
-#define SAVAGE_SKIP_ST0 0x30
-#define SAVAGE_SKIP_S1 0x40
-#define SAVAGE_SKIP_T1 0x80
-#define SAVAGE_SKIP_ST1 0xc0
-#define SAVAGE_SKIP_ALL_S3D 0x3f
-#define SAVAGE_SKIP_ALL_S4 0xff
-
-/* Buffer names for clear command
- */
-#define SAVAGE_FRONT 0x1
-#define SAVAGE_BACK 0x2
-#define SAVAGE_DEPTH 0x4
-
-/* 64-bit command header
- */
-union drm_savage_cmd_header {
- struct {
- unsigned char cmd; /* command */
- unsigned char pad0;
- unsigned short pad1;
- unsigned short pad2;
- unsigned short pad3;
- } cmd; /* generic */
- struct {
- unsigned char cmd;
- unsigned char global; /* need idle engine? */
- unsigned short count; /* number of consecutive registers */
- unsigned short start; /* first register */
- unsigned short pad3;
- } state; /* SAVAGE_CMD_STATE */
- struct {
- unsigned char cmd;
- unsigned char prim; /* primitive type */
- unsigned short skip; /* vertex format (skip flags) */
- unsigned short count; /* number of vertices */
- unsigned short start; /* first vertex in DMA/vertex buffer */
- } prim; /* SAVAGE_CMD_DMA_PRIM, SAVAGE_CMD_VB_PRIM */
- struct {
- unsigned char cmd;
- unsigned char prim;
- unsigned short skip;
- unsigned short count; /* number of indices that follow */
- unsigned short pad3;
- } idx; /* SAVAGE_CMD_DMA_IDX, SAVAGE_CMD_VB_IDX */
- struct {
- unsigned char cmd;
- unsigned char pad0;
- unsigned short pad1;
- unsigned int flags;
- } clear0; /* SAVAGE_CMD_CLEAR */
- struct {
- unsigned int mask;
- unsigned int value;
- } clear1; /* SAVAGE_CMD_CLEAR data */
-};
-
-#if defined(__cplusplus)
-}
-#endif
-
-#endif
diff --git a/include/uapi/drm/sis_drm.h b/include/uapi/drm/sis_drm.h
deleted file mode 100644
index 3e3f7e989e0b..000000000000
--- a/include/uapi/drm/sis_drm.h
+++ /dev/null
@@ -1,77 +0,0 @@
-/* sis_drv.h -- Private header for sis driver -*- linux-c -*- */
-/*
- * Copyright 2005 Eric Anholt
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#ifndef __SIS_DRM_H__
-#define __SIS_DRM_H__
-
-#include "drm.h"
-
-#if defined(__cplusplus)
-extern "C" {
-#endif
-
-/* SiS specific ioctls */
-#define NOT_USED_0_3
-#define DRM_SIS_FB_ALLOC 0x04
-#define DRM_SIS_FB_FREE 0x05
-#define NOT_USED_6_12
-#define DRM_SIS_AGP_INIT 0x13
-#define DRM_SIS_AGP_ALLOC 0x14
-#define DRM_SIS_AGP_FREE 0x15
-#define DRM_SIS_FB_INIT 0x16
-
-#define DRM_IOCTL_SIS_FB_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_SIS_FB_ALLOC, drm_sis_mem_t)
-#define DRM_IOCTL_SIS_FB_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_SIS_FB_FREE, drm_sis_mem_t)
-#define DRM_IOCTL_SIS_AGP_INIT DRM_IOWR(DRM_COMMAND_BASE + DRM_SIS_AGP_INIT, drm_sis_agp_t)
-#define DRM_IOCTL_SIS_AGP_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_SIS_AGP_ALLOC, drm_sis_mem_t)
-#define DRM_IOCTL_SIS_AGP_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_SIS_AGP_FREE, drm_sis_mem_t)
-#define DRM_IOCTL_SIS_FB_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_SIS_FB_INIT, drm_sis_fb_t)
-/*
-#define DRM_IOCTL_SIS_FLIP DRM_IOW( 0x48, drm_sis_flip_t)
-#define DRM_IOCTL_SIS_FLIP_INIT DRM_IO( 0x49)
-#define DRM_IOCTL_SIS_FLIP_FINAL DRM_IO( 0x50)
-*/
-
-typedef struct {
- int context;
- unsigned long offset;
- unsigned long size;
- unsigned long free;
-} drm_sis_mem_t;
-
-typedef struct {
- unsigned long offset, size;
-} drm_sis_agp_t;
-
-typedef struct {
- unsigned long offset, size;
-} drm_sis_fb_t;
-
-#if defined(__cplusplus)
-}
-#endif
-
-#endif /* __SIS_DRM_H__ */
diff --git a/include/uapi/drm/via_drm.h b/include/uapi/drm/via_drm.h
deleted file mode 100644
index a1e125d42208..000000000000
--- a/include/uapi/drm/via_drm.h
+++ /dev/null
@@ -1,282 +0,0 @@
-/*
- * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved.
- * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sub license,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-#ifndef _VIA_DRM_H_
-#define _VIA_DRM_H_
-
-#include "drm.h"
-
-#if defined(__cplusplus)
-extern "C" {
-#endif
-
-/* WARNING: These defines must be the same as what the Xserver uses.
- * if you change them, you must change the defines in the Xserver.
- */
-
-#ifndef _VIA_DEFINES_
-#define _VIA_DEFINES_
-
-
-#define VIA_NR_SAREA_CLIPRECTS 8
-#define VIA_NR_XVMC_PORTS 10
-#define VIA_NR_XVMC_LOCKS 5
-#define VIA_MAX_CACHELINE_SIZE 64
-#define XVMCLOCKPTR(saPriv,lockNo) \
- ((volatile struct drm_hw_lock *)(((((unsigned long) (saPriv)->XvMCLockArea) + \
- (VIA_MAX_CACHELINE_SIZE - 1)) & \
- ~(VIA_MAX_CACHELINE_SIZE - 1)) + \
- VIA_MAX_CACHELINE_SIZE*(lockNo)))
-
-/* Each region is a minimum of 64k, and there are at most 64 of them.
- */
-#define VIA_NR_TEX_REGIONS 64
-#define VIA_LOG_MIN_TEX_REGION_SIZE 16
-#endif
-
-#define VIA_UPLOAD_TEX0IMAGE 0x1 /* handled clientside */
-#define VIA_UPLOAD_TEX1IMAGE 0x2 /* handled clientside */
-#define VIA_UPLOAD_CTX 0x4
-#define VIA_UPLOAD_BUFFERS 0x8
-#define VIA_UPLOAD_TEX0 0x10
-#define VIA_UPLOAD_TEX1 0x20
-#define VIA_UPLOAD_CLIPRECTS 0x40
-#define VIA_UPLOAD_ALL 0xff
-
-/* VIA specific ioctls */
-#define DRM_VIA_ALLOCMEM 0x00
-#define DRM_VIA_FREEMEM 0x01
-#define DRM_VIA_AGP_INIT 0x02
-#define DRM_VIA_FB_INIT 0x03
-#define DRM_VIA_MAP_INIT 0x04
-#define DRM_VIA_DEC_FUTEX 0x05
-#define NOT_USED
-#define DRM_VIA_DMA_INIT 0x07
-#define DRM_VIA_CMDBUFFER 0x08
-#define DRM_VIA_FLUSH 0x09
-#define DRM_VIA_PCICMD 0x0a
-#define DRM_VIA_CMDBUF_SIZE 0x0b
-#define NOT_USED
-#define DRM_VIA_WAIT_IRQ 0x0d
-#define DRM_VIA_DMA_BLIT 0x0e
-#define DRM_VIA_BLIT_SYNC 0x0f
-
-#define DRM_IOCTL_VIA_ALLOCMEM DRM_IOWR(DRM_COMMAND_BASE + DRM_VIA_ALLOCMEM, drm_via_mem_t)
-#define DRM_IOCTL_VIA_FREEMEM DRM_IOW( DRM_COMMAND_BASE + DRM_VIA_FREEMEM, drm_via_mem_t)
-#define DRM_IOCTL_VIA_AGP_INIT DRM_IOWR(DRM_COMMAND_BASE + DRM_VIA_AGP_INIT, drm_via_agp_t)
-#define DRM_IOCTL_VIA_FB_INIT DRM_IOWR(DRM_COMMAND_BASE + DRM_VIA_FB_INIT, drm_via_fb_t)
-#define DRM_IOCTL_VIA_MAP_INIT DRM_IOWR(DRM_COMMAND_BASE + DRM_VIA_MAP_INIT, drm_via_init_t)
-#define DRM_IOCTL_VIA_DEC_FUTEX DRM_IOW( DRM_COMMAND_BASE + DRM_VIA_DEC_FUTEX, drm_via_futex_t)
-#define DRM_IOCTL_VIA_DMA_INIT DRM_IOWR(DRM_COMMAND_BASE + DRM_VIA_DMA_INIT, drm_via_dma_init_t)
-#define DRM_IOCTL_VIA_CMDBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_VIA_CMDBUFFER, drm_via_cmdbuffer_t)
-#define DRM_IOCTL_VIA_FLUSH DRM_IO( DRM_COMMAND_BASE + DRM_VIA_FLUSH)
-#define DRM_IOCTL_VIA_PCICMD DRM_IOW( DRM_COMMAND_BASE + DRM_VIA_PCICMD, drm_via_cmdbuffer_t)
-#define DRM_IOCTL_VIA_CMDBUF_SIZE DRM_IOWR( DRM_COMMAND_BASE + DRM_VIA_CMDBUF_SIZE, \
- drm_via_cmdbuf_size_t)
-#define DRM_IOCTL_VIA_WAIT_IRQ DRM_IOWR( DRM_COMMAND_BASE + DRM_VIA_WAIT_IRQ, drm_via_irqwait_t)
-#define DRM_IOCTL_VIA_DMA_BLIT DRM_IOW(DRM_COMMAND_BASE + DRM_VIA_DMA_BLIT, drm_via_dmablit_t)
-#define DRM_IOCTL_VIA_BLIT_SYNC DRM_IOW(DRM_COMMAND_BASE + DRM_VIA_BLIT_SYNC, drm_via_blitsync_t)
-
-/* Indices into buf.Setup where various bits of state are mirrored per
- * context and per buffer. These can be fired at the card as a unit,
- * or in a piecewise fashion as required.
- */
-
-#define VIA_TEX_SETUP_SIZE 8
-
-/* Flags for clear ioctl
- */
-#define VIA_FRONT 0x1
-#define VIA_BACK 0x2
-#define VIA_DEPTH 0x4
-#define VIA_STENCIL 0x8
-#define VIA_MEM_VIDEO 0 /* matches drm constant */
-#define VIA_MEM_AGP 1 /* matches drm constant */
-#define VIA_MEM_SYSTEM 2
-#define VIA_MEM_MIXED 3
-#define VIA_MEM_UNKNOWN 4
-
-typedef struct {
- __u32 offset;
- __u32 size;
-} drm_via_agp_t;
-
-typedef struct {
- __u32 offset;
- __u32 size;
-} drm_via_fb_t;
-
-typedef struct {
- __u32 context;
- __u32 type;
- __u32 size;
- unsigned long index;
- unsigned long offset;
-} drm_via_mem_t;
-
-typedef struct _drm_via_init {
- enum {
- VIA_INIT_MAP = 0x01,
- VIA_CLEANUP_MAP = 0x02
- } func;
-
- unsigned long sarea_priv_offset;
- unsigned long fb_offset;
- unsigned long mmio_offset;
- unsigned long agpAddr;
-} drm_via_init_t;
-
-typedef struct _drm_via_futex {
- enum {
- VIA_FUTEX_WAIT = 0x00,
- VIA_FUTEX_WAKE = 0X01
- } func;
- __u32 ms;
- __u32 lock;
- __u32 val;
-} drm_via_futex_t;
-
-typedef struct _drm_via_dma_init {
- enum {
- VIA_INIT_DMA = 0x01,
- VIA_CLEANUP_DMA = 0x02,
- VIA_DMA_INITIALIZED = 0x03
- } func;
-
- unsigned long offset;
- unsigned long size;
- unsigned long reg_pause_addr;
-} drm_via_dma_init_t;
-
-typedef struct _drm_via_cmdbuffer {
- char __user *buf;
- unsigned long size;
-} drm_via_cmdbuffer_t;
-
-/* Warning: If you change the SAREA structure you must change the Xserver
- * structure as well */
-
-typedef struct _drm_via_tex_region {
- unsigned char next, prev; /* indices to form a circular LRU */
- unsigned char inUse; /* owned by a client, or free? */
- int age; /* tracked by clients to update local LRU's */
-} drm_via_tex_region_t;
-
-typedef struct _drm_via_sarea {
- unsigned int dirty;
- unsigned int nbox;
- struct drm_clip_rect boxes[VIA_NR_SAREA_CLIPRECTS];
- drm_via_tex_region_t texList[VIA_NR_TEX_REGIONS + 1];
- int texAge; /* last time texture was uploaded */
- int ctxOwner; /* last context to upload state */
- int vertexPrim;
-
- /*
- * Below is for XvMC.
- * We want the lock integers alone on, and aligned to, a cache line.
- * Therefore this somewhat strange construct.
- */
-
- char XvMCLockArea[VIA_MAX_CACHELINE_SIZE * (VIA_NR_XVMC_LOCKS + 1)];
-
- unsigned int XvMCDisplaying[VIA_NR_XVMC_PORTS];
- unsigned int XvMCSubPicOn[VIA_NR_XVMC_PORTS];
- unsigned int XvMCCtxNoGrabbed; /* Last context to hold decoder */
-
- /* Used by the 3d driver only at this point, for pageflipping:
- */
- unsigned int pfCurrentOffset;
-} drm_via_sarea_t;
-
-typedef struct _drm_via_cmdbuf_size {
- enum {
- VIA_CMDBUF_SPACE = 0x01,
- VIA_CMDBUF_LAG = 0x02
- } func;
- int wait;
- __u32 size;
-} drm_via_cmdbuf_size_t;
-
-typedef enum {
- VIA_IRQ_ABSOLUTE = 0x0,
- VIA_IRQ_RELATIVE = 0x1,
- VIA_IRQ_SIGNAL = 0x10000000,
- VIA_IRQ_FORCE_SEQUENCE = 0x20000000
-} via_irq_seq_type_t;
-
-#define VIA_IRQ_FLAGS_MASK 0xF0000000
-
-enum drm_via_irqs {
- drm_via_irq_hqv0 = 0,
- drm_via_irq_hqv1,
- drm_via_irq_dma0_dd,
- drm_via_irq_dma0_td,
- drm_via_irq_dma1_dd,
- drm_via_irq_dma1_td,
- drm_via_irq_num
-};
-
-struct drm_via_wait_irq_request {
- unsigned irq;
- via_irq_seq_type_t type;
- __u32 sequence;
- __u32 signal;
-};
-
-typedef union drm_via_irqwait {
- struct drm_via_wait_irq_request request;
- struct drm_wait_vblank_reply reply;
-} drm_via_irqwait_t;
-
-typedef struct drm_via_blitsync {
- __u32 sync_handle;
- unsigned engine;
-} drm_via_blitsync_t;
-
-/* - * Below,"flags" is currently unused but will be used for possible future
- * extensions like kernel space bounce buffers for bad alignments and
- * blit engine busy-wait polling for better latency in the absence of
- * interrupts.
- */
-
-typedef struct drm_via_dmablit {
- __u32 num_lines;
- __u32 line_length;
-
- __u32 fb_addr;
- __u32 fb_stride;
-
- unsigned char *mem_addr;
- __u32 mem_stride;
-
- __u32 flags;
- int to_fb;
-
- drm_via_blitsync_t sync;
-} drm_via_dmablit_t;
-
-#if defined(__cplusplus)
-}
-#endif
-
-#endif /* _VIA_DRM_H_ */
diff --git a/include/uapi/linux/android/binder.h b/include/uapi/linux/android/binder.h
index e72e4de8f452..5f636b5afcd7 100644
--- a/include/uapi/linux/android/binder.h
+++ b/include/uapi/linux/android/binder.h
@@ -450,7 +450,7 @@ enum binder_driver_return_protocol {
BR_FROZEN_REPLY = _IO('r', 18),
/*
- * The target of the last transaction (either a bcTRANSACTION or
+ * The target of the last sync transaction (either a bcTRANSACTION or
* a bcATTEMPT_ACQUIRE) is frozen. No parameters.
*/
@@ -460,6 +460,11 @@ enum binder_driver_return_protocol {
* asynchronous transaction makes the allocated async buffer size exceed
* detection threshold. No parameters.
*/
+
+ BR_TRANSACTION_PENDING_FROZEN = _IO('r', 20),
+ /*
+ * The target of the last async transaction is frozen. No parameters.
+ */
};
enum binder_driver_command_protocol {
diff --git a/include/uapi/linux/batadv_packet.h b/include/uapi/linux/batadv_packet.h
index ea4692c339ce..9204e4494b25 100644
--- a/include/uapi/linux/batadv_packet.h
+++ b/include/uapi/linux/batadv_packet.h
@@ -26,6 +26,7 @@
* @BATADV_CODED: network coded packets
* @BATADV_ELP: echo location packets for B.A.T.M.A.N. V
* @BATADV_OGM2: originator messages for B.A.T.M.A.N. V
+ * @BATADV_MCAST: multicast packet with multiple destination addresses
*
* @BATADV_UNICAST: unicast packets carrying unicast payload traffic
* @BATADV_UNICAST_FRAG: unicast packets carrying a fragment of the original
@@ -42,6 +43,7 @@ enum batadv_packettype {
BATADV_CODED = 0x02,
BATADV_ELP = 0x03,
BATADV_OGM2 = 0x04,
+ BATADV_MCAST = 0x05,
/* 0x40 - 0x7f: unicast */
#define BATADV_UNICAST_MIN 0x40
BATADV_UNICAST = 0x40,
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 464ca3f01fe7..62ce1f5d1b1d 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -1156,6 +1156,11 @@ enum bpf_link_type {
*/
#define BPF_F_XDP_HAS_FRAGS (1U << 5)
+/* If BPF_F_XDP_DEV_BOUND_ONLY is used in BPF_PROG_LOAD command, the loaded
+ * program becomes device-bound but can access XDP metadata.
+ */
+#define BPF_F_XDP_DEV_BOUND_ONLY (1U << 6)
+
/* link_create.kprobe_multi.flags used in LINK_CREATE command for
* BPF_TRACE_KPROBE_MULTI attach type to create return probe.
*/
@@ -2001,6 +2006,9 @@ union bpf_attr {
* sending the packet. This flag was added for GRE
* encapsulation, but might be used with other protocols
* as well in the future.
+ * **BPF_F_NO_TUNNEL_KEY**
+ * Add a flag to tunnel metadata indicating that no tunnel
+ * key should be set in the resulting tunnel header.
*
* Here is a typical usage on the transmit path:
*
@@ -2644,6 +2652,11 @@ union bpf_attr {
* Use with BPF_F_ADJ_ROOM_ENCAP_L2 flag to further specify the
* L2 type as Ethernet.
*
+ * * **BPF_F_ADJ_ROOM_DECAP_L3_IPV4**,
+ * **BPF_F_ADJ_ROOM_DECAP_L3_IPV6**:
+ * Indicate the new IP header version after decapsulating the outer
+ * IP header. Used when the inner and outer IP versions are different.
+ *
* A call to this helper is susceptible to change the underlying
* packet buffer. Therefore, at load time, all checks on pointers
* previously done by the verifier are invalidated and must be
@@ -2788,7 +2801,7 @@ union bpf_attr {
*
* long bpf_perf_prog_read_value(struct bpf_perf_event_data *ctx, struct bpf_perf_event_value *buf, u32 buf_size)
* Description
- * For en eBPF program attached to a perf event, retrieve the
+ * For an eBPF program attached to a perf event, retrieve the
* value of the event counter associated to *ctx* and store it in
* the structure pointed by *buf* and of size *buf_size*. Enabled
* and running times are also stored in the structure (see
@@ -3121,6 +3134,11 @@ union bpf_attr {
* **BPF_FIB_LOOKUP_OUTPUT**
* Perform lookup from an egress perspective (default is
* ingress).
+ * **BPF_FIB_LOOKUP_SKIP_NEIGH**
+ * Skip the neighbour table lookup. *params*->dmac
+ * and *params*->smac will not be set as output. A common
+ * use case is to call **bpf_redirect_neigh**\ () after
+ * doing **bpf_fib_lookup**\ ().
*
* *ctx* is either **struct xdp_md** for XDP programs or
* **struct sk_buff** tc cls_act programs.
@@ -5764,6 +5782,7 @@ enum {
BPF_F_ZERO_CSUM_TX = (1ULL << 1),
BPF_F_DONT_FRAGMENT = (1ULL << 2),
BPF_F_SEQ_NUMBER = (1ULL << 3),
+ BPF_F_NO_TUNNEL_KEY = (1ULL << 4),
};
/* BPF_FUNC_skb_get_tunnel_key flags. */
@@ -5803,6 +5822,8 @@ enum {
BPF_F_ADJ_ROOM_ENCAP_L4_UDP = (1ULL << 4),
BPF_F_ADJ_ROOM_NO_CSUM_RESET = (1ULL << 5),
BPF_F_ADJ_ROOM_ENCAP_L2_ETH = (1ULL << 6),
+ BPF_F_ADJ_ROOM_DECAP_L3_IPV4 = (1ULL << 7),
+ BPF_F_ADJ_ROOM_DECAP_L3_IPV6 = (1ULL << 8),
};
enum {
@@ -6734,6 +6755,7 @@ struct bpf_raw_tracepoint_args {
enum {
BPF_FIB_LOOKUP_DIRECT = (1U << 0),
BPF_FIB_LOOKUP_OUTPUT = (1U << 1),
+ BPF_FIB_LOOKUP_SKIP_NEIGH = (1U << 2),
};
enum {
@@ -6901,6 +6923,17 @@ struct bpf_list_node {
__u64 :64;
} __attribute__((aligned(8)));
+struct bpf_rb_root {
+ __u64 :64;
+ __u64 :64;
+} __attribute__((aligned(8)));
+
+struct bpf_rb_node {
+ __u64 :64;
+ __u64 :64;
+ __u64 :64;
+} __attribute__((aligned(8)));
+
struct bpf_sysctl {
__u32 write; /* Sysctl is being read (= 0) or written (= 1).
* Allows 1,2,4-byte read, but no write.
diff --git a/include/uapi/linux/cxl_mem.h b/include/uapi/linux/cxl_mem.h
index c71021a2a9ed..86bbacf2a315 100644
--- a/include/uapi/linux/cxl_mem.h
+++ b/include/uapi/linux/cxl_mem.h
@@ -11,14 +11,19 @@
/**
* DOC: UAPI
*
- * Not all of all commands that the driver supports are always available for use
- * by userspace. Userspace must check the results from the QUERY command in
- * order to determine the live set of commands.
+ * Not all of the commands that the driver supports are available for use by
+ * userspace at all times. Userspace can check the result of the QUERY command
+ * to determine the live set of commands. Alternatively, it can issue the
+ * command and check for failure.
*/
#define CXL_MEM_QUERY_COMMANDS _IOR(0xCE, 1, struct cxl_mem_query_commands)
#define CXL_MEM_SEND_COMMAND _IOWR(0xCE, 2, struct cxl_send_command)
+/*
+ * NOTE: New defines must be added to the end of the list to preserve
+ * compatibility because this enum is exported to user space.
+ */
#define CXL_CMDS \
___C(INVALID, "Invalid Command"), \
___C(IDENTIFY, "Identify Command"), \
@@ -68,6 +73,19 @@ static const struct {
* struct cxl_command_info - Command information returned from a query.
* @id: ID number for the command.
* @flags: Flags that specify command behavior.
+ *
+ * CXL_MEM_COMMAND_FLAG_USER_ENABLED
+ *
+ * The given command id is supported by the driver and is supported by
+ * a related opcode on the device.
+ *
+ * CXL_MEM_COMMAND_FLAG_EXCLUSIVE
+ *
+ * Requests with the given command id will terminate with EBUSY as the
+ * kernel actively owns management of the given resource. For example,
+ * the label-storage-area can not be written while the kernel is
+ * actively managing that space.
+ *
* @size_in: Expected input size, or ~0 if variable length.
* @size_out: Expected output size, or ~0 if variable length.
*
@@ -77,7 +95,7 @@ static const struct {
* bytes of output.
*
* - @id = 10
- * - @flags = 0
+ * - @flags = CXL_MEM_COMMAND_FLAG_ENABLED
* - @size_in = ~0
* - @size_out = 0
*
@@ -87,7 +105,9 @@ struct cxl_command_info {
__u32 id;
__u32 flags;
-#define CXL_MEM_COMMAND_FLAG_MASK GENMASK(0, 0)
+#define CXL_MEM_COMMAND_FLAG_MASK GENMASK(1, 0)
+#define CXL_MEM_COMMAND_FLAG_ENABLED BIT(0)
+#define CXL_MEM_COMMAND_FLAG_EXCLUSIVE BIT(1)
__u32 size_in;
__u32 size_out;
diff --git a/include/uapi/linux/dcbnl.h b/include/uapi/linux/dcbnl.h
index 99047223ab26..7e15214aa5dd 100644
--- a/include/uapi/linux/dcbnl.h
+++ b/include/uapi/linux/dcbnl.h
@@ -411,6 +411,7 @@ enum dcbnl_attrs {
* @DCB_ATTR_IEEE_PEER_PFC: peer PFC configuration - get only
* @DCB_ATTR_IEEE_PEER_APP: peer APP tlv - get only
* @DCB_ATTR_DCB_APP_TRUST_TABLE: selector trust table
+ * @DCB_ATTR_DCB_REWR_TABLE: rewrite configuration
*/
enum ieee_attrs {
DCB_ATTR_IEEE_UNSPEC,
@@ -425,6 +426,7 @@ enum ieee_attrs {
DCB_ATTR_IEEE_QCN_STATS,
DCB_ATTR_DCB_BUFFER,
DCB_ATTR_DCB_APP_TRUST_TABLE,
+ DCB_ATTR_DCB_REWR_TABLE,
__DCB_ATTR_IEEE_MAX
};
#define DCB_ATTR_IEEE_MAX (__DCB_ATTR_IEEE_MAX - 1)
diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
index 68de6f4c4eee..ac3da855fb19 100644
--- a/include/uapi/linux/elf.h
+++ b/include/uapi/linux/elf.h
@@ -445,6 +445,8 @@ typedef struct elf64_shdr {
#define NT_LOONGARCH_LSX 0xa02 /* LoongArch Loongson SIMD Extension registers */
#define NT_LOONGARCH_LASX 0xa03 /* LoongArch Loongson Advanced SIMD Extension registers */
#define NT_LOONGARCH_LBT 0xa04 /* LoongArch Loongson Binary Translation registers */
+#define NT_LOONGARCH_HW_BREAK 0xa05 /* LoongArch hardware breakpoint registers */
+#define NT_LOONGARCH_HW_WATCH 0xa06 /* LoongArch hardware watchpoint registers */
/* Note types with note name "GNU" */
#define NT_GNU_PROPERTY_TYPE_0 5
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index 58e587ba0450..f7fba0dc87e5 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -712,6 +712,24 @@ enum ethtool_stringset {
};
/**
+ * enum ethtool_mac_stats_src - source of ethtool MAC statistics
+ * @ETHTOOL_MAC_STATS_SRC_AGGREGATE:
+ * if device supports a MAC merge layer, this retrieves the aggregate
+ * statistics of the eMAC and pMAC. Otherwise, it retrieves just the
+ * statistics of the single (express) MAC.
+ * @ETHTOOL_MAC_STATS_SRC_EMAC:
+ * if device supports a MM layer, this retrieves the eMAC statistics.
+ * Otherwise, it retrieves the statistics of the single (express) MAC.
+ * @ETHTOOL_MAC_STATS_SRC_PMAC:
+ * if device supports a MM layer, this retrieves the pMAC statistics.
+ */
+enum ethtool_mac_stats_src {
+ ETHTOOL_MAC_STATS_SRC_AGGREGATE,
+ ETHTOOL_MAC_STATS_SRC_EMAC,
+ ETHTOOL_MAC_STATS_SRC_PMAC,
+};
+
+/**
* enum ethtool_module_power_mode_policy - plug-in module power mode policy
* @ETHTOOL_MODULE_POWER_MODE_POLICY_HIGH: Module is always in high power mode.
* @ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO: Module is transitioned by the host
@@ -780,6 +798,31 @@ enum ethtool_podl_pse_pw_d_status {
};
/**
+ * enum ethtool_mm_verify_status - status of MAC Merge Verify function
+ * @ETHTOOL_MM_VERIFY_STATUS_UNKNOWN:
+ * verification status is unknown
+ * @ETHTOOL_MM_VERIFY_STATUS_INITIAL:
+ * the 802.3 Verify State diagram is in the state INIT_VERIFICATION
+ * @ETHTOOL_MM_VERIFY_STATUS_VERIFYING:
+ * the Verify State diagram is in the state VERIFICATION_IDLE,
+ * SEND_VERIFY or WAIT_FOR_RESPONSE
+ * @ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED:
+ * indicates that the Verify State diagram is in the state VERIFIED
+ * @ETHTOOL_MM_VERIFY_STATUS_FAILED:
+ * the Verify State diagram is in the state VERIFY_FAIL
+ * @ETHTOOL_MM_VERIFY_STATUS_DISABLED:
+ * verification of preemption operation is disabled
+ */
+enum ethtool_mm_verify_status {
+ ETHTOOL_MM_VERIFY_STATUS_UNKNOWN,
+ ETHTOOL_MM_VERIFY_STATUS_INITIAL,
+ ETHTOOL_MM_VERIFY_STATUS_VERIFYING,
+ ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED,
+ ETHTOOL_MM_VERIFY_STATUS_FAILED,
+ ETHTOOL_MM_VERIFY_STATUS_DISABLED,
+};
+
+/**
* struct ethtool_gstrings - string set for data tagging
* @cmd: Command number = %ETHTOOL_GSTRINGS
* @string_set: String set ID; one of &enum ethtool_stringset
@@ -1183,7 +1226,7 @@ struct ethtool_rxnfc {
__u32 rule_cnt;
__u32 rss_context;
};
- __u32 rule_locs[0];
+ __u32 rule_locs[];
};
@@ -1741,6 +1784,9 @@ enum ethtool_link_mode_bit_indices {
ETHTOOL_LINK_MODE_800000baseDR8_2_Full_BIT = 96,
ETHTOOL_LINK_MODE_800000baseSR8_Full_BIT = 97,
ETHTOOL_LINK_MODE_800000baseVR8_Full_BIT = 98,
+ ETHTOOL_LINK_MODE_10baseT1S_Full_BIT = 99,
+ ETHTOOL_LINK_MODE_10baseT1S_Half_BIT = 100,
+ ETHTOOL_LINK_MODE_10baseT1S_P2MP_Half_BIT = 101,
/* must be last entry */
__ETHTOOL_LINK_MODE_MASK_NBITS
diff --git a/include/uapi/linux/ethtool_netlink.h b/include/uapi/linux/ethtool_netlink.h
index 5799a9db034e..d39ce21381c5 100644
--- a/include/uapi/linux/ethtool_netlink.h
+++ b/include/uapi/linux/ethtool_netlink.h
@@ -52,6 +52,11 @@ enum {
ETHTOOL_MSG_PSE_GET,
ETHTOOL_MSG_PSE_SET,
ETHTOOL_MSG_RSS_GET,
+ ETHTOOL_MSG_PLCA_GET_CFG,
+ ETHTOOL_MSG_PLCA_SET_CFG,
+ ETHTOOL_MSG_PLCA_GET_STATUS,
+ ETHTOOL_MSG_MM_GET,
+ ETHTOOL_MSG_MM_SET,
/* add new constants above here */
__ETHTOOL_MSG_USER_CNT,
@@ -99,6 +104,11 @@ enum {
ETHTOOL_MSG_MODULE_NTF,
ETHTOOL_MSG_PSE_GET_REPLY,
ETHTOOL_MSG_RSS_GET_REPLY,
+ ETHTOOL_MSG_PLCA_GET_CFG_REPLY,
+ ETHTOOL_MSG_PLCA_GET_STATUS_REPLY,
+ ETHTOOL_MSG_PLCA_NTF,
+ ETHTOOL_MSG_MM_GET_REPLY,
+ ETHTOOL_MSG_MM_NTF,
/* add new constants above here */
__ETHTOOL_MSG_KERNEL_CNT,
@@ -346,6 +356,7 @@ enum {
ETHTOOL_A_RINGS_TCP_DATA_SPLIT, /* u8 */
ETHTOOL_A_RINGS_CQE_SIZE, /* u32 */
ETHTOOL_A_RINGS_TX_PUSH, /* u8 */
+ ETHTOOL_A_RINGS_RX_PUSH, /* u8 */
/* add new constants above here */
__ETHTOOL_A_RINGS_CNT,
@@ -400,6 +411,9 @@ enum {
ETHTOOL_A_COALESCE_RATE_SAMPLE_INTERVAL, /* u32 */
ETHTOOL_A_COALESCE_USE_CQE_MODE_TX, /* u8 */
ETHTOOL_A_COALESCE_USE_CQE_MODE_RX, /* u8 */
+ ETHTOOL_A_COALESCE_TX_AGGR_MAX_BYTES, /* u32 */
+ ETHTOOL_A_COALESCE_TX_AGGR_MAX_FRAMES, /* u32 */
+ ETHTOOL_A_COALESCE_TX_AGGR_TIME_USECS, /* u32 */
/* add new constants above here */
__ETHTOOL_A_COALESCE_CNT,
@@ -415,6 +429,7 @@ enum {
ETHTOOL_A_PAUSE_RX, /* u8 */
ETHTOOL_A_PAUSE_TX, /* u8 */
ETHTOOL_A_PAUSE_STATS, /* nest - _PAUSE_STAT_* */
+ ETHTOOL_A_PAUSE_STATS_SRC, /* u32 */
/* add new constants above here */
__ETHTOOL_A_PAUSE_CNT,
@@ -731,6 +746,8 @@ enum {
ETHTOOL_A_STATS_GRP, /* nest - _A_STATS_GRP_* */
+ ETHTOOL_A_STATS_SRC, /* u32 */
+
/* add new constants above here */
__ETHTOOL_A_STATS_CNT,
ETHTOOL_A_STATS_MAX = (__ETHTOOL_A_STATS_CNT - 1)
@@ -894,6 +911,68 @@ enum {
ETHTOOL_A_RSS_MAX = (__ETHTOOL_A_RSS_CNT - 1),
};
+/* PLCA */
+
+enum {
+ ETHTOOL_A_PLCA_UNSPEC,
+ ETHTOOL_A_PLCA_HEADER, /* nest - _A_HEADER_* */
+ ETHTOOL_A_PLCA_VERSION, /* u16 */
+ ETHTOOL_A_PLCA_ENABLED, /* u8 */
+ ETHTOOL_A_PLCA_STATUS, /* u8 */
+ ETHTOOL_A_PLCA_NODE_CNT, /* u32 */
+ ETHTOOL_A_PLCA_NODE_ID, /* u32 */
+ ETHTOOL_A_PLCA_TO_TMR, /* u32 */
+ ETHTOOL_A_PLCA_BURST_CNT, /* u32 */
+ ETHTOOL_A_PLCA_BURST_TMR, /* u32 */
+
+ /* add new constants above here */
+ __ETHTOOL_A_PLCA_CNT,
+ ETHTOOL_A_PLCA_MAX = (__ETHTOOL_A_PLCA_CNT - 1)
+};
+
+/* MAC Merge (802.3) */
+
+enum {
+ ETHTOOL_A_MM_STAT_UNSPEC,
+ ETHTOOL_A_MM_STAT_PAD,
+
+ /* aMACMergeFrameAssErrorCount */
+ ETHTOOL_A_MM_STAT_REASSEMBLY_ERRORS, /* u64 */
+ /* aMACMergeFrameSmdErrorCount */
+ ETHTOOL_A_MM_STAT_SMD_ERRORS, /* u64 */
+ /* aMACMergeFrameAssOkCount */
+ ETHTOOL_A_MM_STAT_REASSEMBLY_OK, /* u64 */
+ /* aMACMergeFragCountRx */
+ ETHTOOL_A_MM_STAT_RX_FRAG_COUNT, /* u64 */
+ /* aMACMergeFragCountTx */
+ ETHTOOL_A_MM_STAT_TX_FRAG_COUNT, /* u64 */
+ /* aMACMergeHoldCount */
+ ETHTOOL_A_MM_STAT_HOLD_COUNT, /* u64 */
+
+ /* add new constants above here */
+ __ETHTOOL_A_MM_STAT_CNT,
+ ETHTOOL_A_MM_STAT_MAX = (__ETHTOOL_A_MM_STAT_CNT - 1)
+};
+
+enum {
+ ETHTOOL_A_MM_UNSPEC,
+ ETHTOOL_A_MM_HEADER, /* nest - _A_HEADER_* */
+ ETHTOOL_A_MM_PMAC_ENABLED, /* u8 */
+ ETHTOOL_A_MM_TX_ENABLED, /* u8 */
+ ETHTOOL_A_MM_TX_ACTIVE, /* u8 */
+ ETHTOOL_A_MM_TX_MIN_FRAG_SIZE, /* u32 */
+ ETHTOOL_A_MM_RX_MIN_FRAG_SIZE, /* u32 */
+ ETHTOOL_A_MM_VERIFY_ENABLED, /* u8 */
+ ETHTOOL_A_MM_VERIFY_STATUS, /* u8 */
+ ETHTOOL_A_MM_VERIFY_TIME, /* u32 */
+ ETHTOOL_A_MM_MAX_VERIFY_TIME, /* u32 */
+ ETHTOOL_A_MM_STATS, /* nest - _A_MM_STAT_* */
+
+ /* add new constants above here */
+ __ETHTOOL_A_MM_CNT,
+ ETHTOOL_A_MM_MAX = (__ETHTOOL_A_MM_CNT - 1)
+};
+
/* generic netlink info */
#define ETHTOOL_GENL_NAME "ethtool"
#define ETHTOOL_GENL_VERSION 1
diff --git a/include/uapi/linux/fcntl.h b/include/uapi/linux/fcntl.h
index 2f86b2ad6d7e..e8c07da58c9f 100644
--- a/include/uapi/linux/fcntl.h
+++ b/include/uapi/linux/fcntl.h
@@ -43,6 +43,7 @@
#define F_SEAL_GROW 0x0004 /* prevent file from growing */
#define F_SEAL_WRITE 0x0008 /* prevent writes */
#define F_SEAL_FUTURE_WRITE 0x0010 /* prevent future writes while mapped */
+#define F_SEAL_EXEC 0x0020 /* prevent chmod modifying exec bits */
/* (1U << 31) is reserved for signed error codes */
/*
diff --git a/include/uapi/linux/fou.h b/include/uapi/linux/fou.h
index 87c2c9f08803..19ebbef41a63 100644
--- a/include/uapi/linux/fou.h
+++ b/include/uapi/linux/fou.h
@@ -1,32 +1,37 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/* fou.h - FOU Interface */
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/fou.yaml */
+/* YNL-GEN uapi header */
#ifndef _UAPI_LINUX_FOU_H
#define _UAPI_LINUX_FOU_H
-/* NETLINK_GENERIC related info
- */
#define FOU_GENL_NAME "fou"
-#define FOU_GENL_VERSION 0x1
+#define FOU_GENL_VERSION 1
enum {
- FOU_ATTR_UNSPEC,
- FOU_ATTR_PORT, /* u16 */
- FOU_ATTR_AF, /* u8 */
- FOU_ATTR_IPPROTO, /* u8 */
- FOU_ATTR_TYPE, /* u8 */
- FOU_ATTR_REMCSUM_NOPARTIAL, /* flag */
- FOU_ATTR_LOCAL_V4, /* u32 */
- FOU_ATTR_LOCAL_V6, /* in6_addr */
- FOU_ATTR_PEER_V4, /* u32 */
- FOU_ATTR_PEER_V6, /* in6_addr */
- FOU_ATTR_PEER_PORT, /* u16 */
- FOU_ATTR_IFINDEX, /* s32 */
-
- __FOU_ATTR_MAX,
+ FOU_ENCAP_UNSPEC,
+ FOU_ENCAP_DIRECT,
+ FOU_ENCAP_GUE,
};
-#define FOU_ATTR_MAX (__FOU_ATTR_MAX - 1)
+enum {
+ FOU_ATTR_UNSPEC,
+ FOU_ATTR_PORT,
+ FOU_ATTR_AF,
+ FOU_ATTR_IPPROTO,
+ FOU_ATTR_TYPE,
+ FOU_ATTR_REMCSUM_NOPARTIAL,
+ FOU_ATTR_LOCAL_V4,
+ FOU_ATTR_LOCAL_V6,
+ FOU_ATTR_PEER_V4,
+ FOU_ATTR_PEER_V6,
+ FOU_ATTR_PEER_PORT,
+ FOU_ATTR_IFINDEX,
+
+ __FOU_ATTR_MAX
+};
+#define FOU_ATTR_MAX (__FOU_ATTR_MAX - 1)
enum {
FOU_CMD_UNSPEC,
@@ -34,15 +39,8 @@ enum {
FOU_CMD_DEL,
FOU_CMD_GET,
- __FOU_CMD_MAX,
+ __FOU_CMD_MAX
};
-
-enum {
- FOU_ENCAP_UNSPEC,
- FOU_ENCAP_DIRECT,
- FOU_ENCAP_GUE,
-};
-
-#define FOU_CMD_MAX (__FOU_CMD_MAX - 1)
+#define FOU_CMD_MAX (__FOU_CMD_MAX - 1)
#endif /* _UAPI_LINUX_FOU_H */
diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h
index e3c54109bae9..1b9d0dfae72d 100644
--- a/include/uapi/linux/fuse.h
+++ b/include/uapi/linux/fuse.h
@@ -201,6 +201,11 @@
* 7.38
* - add FUSE_EXPIRE_ONLY flag to fuse_notify_inval_entry
* - add FOPEN_PARALLEL_DIRECT_WRITES
+ * - add total_extlen to fuse_in_header
+ * - add FUSE_MAX_NR_SECCTX
+ * - add extension header
+ * - add FUSE_EXT_GROUPS
+ * - add FUSE_CREATE_SUPP_GROUP
*/
#ifndef _LINUX_FUSE_H
@@ -362,6 +367,8 @@ struct fuse_file_lock {
* FUSE_SECURITY_CTX: add security context to create, mkdir, symlink, and
* mknod
* FUSE_HAS_INODE_DAX: use per inode DAX
+ * FUSE_CREATE_SUPP_GROUP: add supplementary group info to create, mkdir,
+ * symlink and mknod (single group that matches parent)
*/
#define FUSE_ASYNC_READ (1 << 0)
#define FUSE_POSIX_LOCKS (1 << 1)
@@ -398,6 +405,7 @@ struct fuse_file_lock {
/* bits 32..63 get shifted down 32 bits into the flags2 field */
#define FUSE_SECURITY_CTX (1ULL << 32)
#define FUSE_HAS_INODE_DAX (1ULL << 33)
+#define FUSE_CREATE_SUPP_GROUP (1ULL << 34)
/**
* CUSE INIT request/reply flags
@@ -503,6 +511,17 @@ struct fuse_file_lock {
*/
#define FUSE_EXPIRE_ONLY (1 << 0)
+/**
+ * extension type
+ * FUSE_MAX_NR_SECCTX: maximum value of &fuse_secctx_header.nr_secctx
+ * FUSE_EXT_GROUPS: &fuse_supp_groups extension
+ */
+enum fuse_ext_type {
+ /* Types 0..31 are reserved for fuse_secctx_header */
+ FUSE_MAX_NR_SECCTX = 31,
+ FUSE_EXT_GROUPS = 32,
+};
+
enum fuse_opcode {
FUSE_LOOKUP = 1,
FUSE_FORGET = 2, /* no reply */
@@ -886,7 +905,8 @@ struct fuse_in_header {
uint32_t uid;
uint32_t gid;
uint32_t pid;
- uint32_t padding;
+ uint16_t total_extlen; /* length of extensions in 8byte units */
+ uint16_t padding;
};
struct fuse_out_header {
@@ -1047,4 +1067,27 @@ struct fuse_secctx_header {
uint32_t nr_secctx;
};
+/**
+ * struct fuse_ext_header - extension header
+ * @size: total size of this extension including this header
+ * @type: type of extension
+ *
+ * This is made compatible with fuse_secctx_header by using type values >
+ * FUSE_MAX_NR_SECCTX
+ */
+struct fuse_ext_header {
+ uint32_t size;
+ uint32_t type;
+};
+
+/**
+ * struct fuse_supp_groups - Supplementary group extension
+ * @nr_groups: number of supplementary groups
+ * @groups: flexible array of group IDs
+ */
+struct fuse_supp_groups {
+ uint32_t nr_groups;
+ uint32_t groups[];
+};
+
#endif /* _LINUX_FUSE_H */
diff --git a/include/uapi/linux/gsmmux.h b/include/uapi/linux/gsmmux.h
index cb8693b39cb7..a703780aa095 100644
--- a/include/uapi/linux/gsmmux.h
+++ b/include/uapi/linux/gsmmux.h
@@ -19,8 +19,7 @@ struct gsm_config
unsigned int mtu;
unsigned int k;
unsigned int i;
- unsigned int unused[8]; /* Padding for expansion without
- breaking stuff */
+ unsigned int unused[8]; /* Can not be used */
};
#define GSMIOC_GETCONF _IOR('G', 0, struct gsm_config)
@@ -29,9 +28,9 @@ struct gsm_config
struct gsm_netconfig {
unsigned int adaption; /* Adaption to use in network mode */
unsigned short protocol;/* Protocol to use - only ETH_P_IP supported */
- unsigned short unused2;
+ unsigned short unused2; /* Can not be used */
char if_name[IFNAMSIZ]; /* interface name format string */
- __u8 unused[28]; /* For future use */
+ __u8 unused[28]; /* Can not be used */
};
#define GSMIOC_ENABLE_NET _IOW('G', 2, struct gsm_netconfig)
@@ -40,4 +39,14 @@ struct gsm_netconfig {
/* get the base tty number for a configured gsmmux tty */
#define GSMIOC_GETFIRST _IOR('G', 4, __u32)
+struct gsm_config_ext {
+ __u32 keep_alive; /* Control channel keep-alive in 1/100th of a
+ * second (0 to disable)
+ */
+ __u32 reserved[7]; /* For future use, must be initialized to zero */
+};
+
+#define GSMIOC_GETCONF_EXT _IOR('G', 5, struct gsm_config_ext)
+#define GSMIOC_SETCONF_EXT _IOW('G', 6, struct gsm_config_ext)
+
#endif
diff --git a/include/uapi/linux/if_bridge.h b/include/uapi/linux/if_bridge.h
index d9de241d90f9..d60c456710b3 100644
--- a/include/uapi/linux/if_bridge.h
+++ b/include/uapi/linux/if_bridge.h
@@ -523,6 +523,8 @@ enum {
BRIDGE_VLANDB_ENTRY_TUNNEL_INFO,
BRIDGE_VLANDB_ENTRY_STATS,
BRIDGE_VLANDB_ENTRY_MCAST_ROUTER,
+ BRIDGE_VLANDB_ENTRY_MCAST_N_GROUPS,
+ BRIDGE_VLANDB_ENTRY_MCAST_MAX_GROUPS,
__BRIDGE_VLANDB_ENTRY_MAX,
};
#define BRIDGE_VLANDB_ENTRY_MAX (__BRIDGE_VLANDB_ENTRY_MAX - 1)
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index 1021a7e47a86..57ceb788250f 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -374,6 +374,9 @@ enum {
IFLA_DEVLINK_PORT,
+ IFLA_GSO_IPV4_MAX_SIZE,
+ IFLA_GRO_IPV4_MAX_SIZE,
+
__IFLA_MAX
};
@@ -564,6 +567,8 @@ enum {
IFLA_BRPORT_MCAST_EHT_HOSTS_CNT,
IFLA_BRPORT_LOCKED,
IFLA_BRPORT_MAB,
+ IFLA_BRPORT_MCAST_N_GROUPS,
+ IFLA_BRPORT_MCAST_MAX_GROUPS,
__IFLA_BRPORT_MAX
};
#define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1)
diff --git a/include/uapi/linux/if_packet.h b/include/uapi/linux/if_packet.h
index a8516b3594a4..78c981d6a9d4 100644
--- a/include/uapi/linux/if_packet.h
+++ b/include/uapi/linux/if_packet.h
@@ -115,6 +115,7 @@ struct tpacket_auxdata {
#define TP_STATUS_BLK_TMO (1 << 5)
#define TP_STATUS_VLAN_TPID_VALID (1 << 6) /* auxdata has valid tp_vlan_tpid */
#define TP_STATUS_CSUM_VALID (1 << 7)
+#define TP_STATUS_GSO_TCP (1 << 8)
/* Tx ring - header status */
#define TP_STATUS_AVAILABLE 0
diff --git a/include/uapi/linux/in.h b/include/uapi/linux/in.h
index 07a4cb149305..4b7f2df66b99 100644
--- a/include/uapi/linux/in.h
+++ b/include/uapi/linux/in.h
@@ -162,6 +162,7 @@ struct in_addr {
#define MCAST_MSFILTER 48
#define IP_MULTICAST_ALL 49
#define IP_UNICAST_IF 50
+#define IP_LOCAL_PORT_RANGE 51
#define MCAST_EXCLUDE 0
#define MCAST_INCLUDE 1
diff --git a/include/uapi/linux/ioam6.h b/include/uapi/linux/ioam6.h
index ac4de376f0ce..8f72b24fefb3 100644
--- a/include/uapi/linux/ioam6.h
+++ b/include/uapi/linux/ioam6.h
@@ -127,7 +127,7 @@ struct ioam6_trace_hdr {
#endif
#define IOAM6_TRACE_DATA_SIZE_MAX 244
- __u8 data[0];
+ __u8 data[];
} __attribute__((packed));
#endif /* _UAPI_LINUX_IOAM6_H */
diff --git a/include/uapi/linux/kd.h b/include/uapi/linux/kd.h
index ee929ece4112..6b384065c013 100644
--- a/include/uapi/linux/kd.h
+++ b/include/uapi/linux/kd.h
@@ -161,19 +161,25 @@ struct console_font_op {
unsigned int flags; /* KD_FONT_FLAG_* */
unsigned int width, height; /* font size */
unsigned int charcount;
- unsigned char __user *data; /* font data with height fixed to 32 */
+ unsigned char __user *data; /* font data with vpitch fixed to 32 for
+ * KD_FONT_OP_SET/GET
+ */
};
struct console_font {
unsigned int width, height; /* font size */
unsigned int charcount;
- unsigned char *data; /* font data with height fixed to 32 */
+ unsigned char *data; /* font data with vpitch fixed to 32 for
+ * KD_FONT_OP_SET/GET
+ */
};
#define KD_FONT_OP_SET 0 /* Set font */
#define KD_FONT_OP_GET 1 /* Get font */
#define KD_FONT_OP_SET_DEFAULT 2 /* Set font to default, data points to name / NULL */
#define KD_FONT_OP_COPY 3 /* Obsolete, do not use */
+#define KD_FONT_OP_SET_TALL 4 /* Set font with vpitch = height */
+#define KD_FONT_OP_GET_TALL 5 /* Get font with vpitch = height */
#define KD_FONT_FLAG_DONT_RECALC 1 /* Don't recalculate hw charcell size [compat] */
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 55155e262646..d77aef872a0a 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -583,6 +583,8 @@ struct kvm_s390_mem_op {
struct {
__u8 ar; /* the access register number */
__u8 key; /* access key, ignored if flag unset */
+ __u8 pad1[6]; /* ignored */
+ __u64 old_addr; /* ignored if cmpxchg flag unset */
};
__u32 sida_offset; /* offset into the sida */
__u8 reserved[32]; /* ignored */
@@ -595,11 +597,17 @@ struct kvm_s390_mem_op {
#define KVM_S390_MEMOP_SIDA_WRITE 3
#define KVM_S390_MEMOP_ABSOLUTE_READ 4
#define KVM_S390_MEMOP_ABSOLUTE_WRITE 5
+#define KVM_S390_MEMOP_ABSOLUTE_CMPXCHG 6
+
/* flags for kvm_s390_mem_op->flags */
#define KVM_S390_MEMOP_F_CHECK_ONLY (1ULL << 0)
#define KVM_S390_MEMOP_F_INJECT_EXCEPTION (1ULL << 1)
#define KVM_S390_MEMOP_F_SKEY_PROTECTION (1ULL << 2)
+/* flags specifying extension support via KVM_CAP_S390_MEM_OP_EXTENSION */
+#define KVM_S390_MEMOP_EXTENSION_CAP_BASE (1 << 0)
+#define KVM_S390_MEMOP_EXTENSION_CAP_CMPXCHG (1 << 1)
+
/* for KVM_INTERRUPT */
struct kvm_interrupt {
/* in */
@@ -1175,6 +1183,7 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_DIRTY_LOG_RING_ACQ_REL 223
#define KVM_CAP_S390_PROTECTED_ASYNC_DISABLE 224
#define KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP 225
+#define KVM_CAP_PMU_EVENT_MASKED_EVENTS 226
#ifdef KVM_CAP_IRQ_ROUTING
diff --git a/include/uapi/linux/mdio.h b/include/uapi/linux/mdio.h
index 75b7257a51e1..256b463e47a6 100644
--- a/include/uapi/linux/mdio.h
+++ b/include/uapi/linux/mdio.h
@@ -79,6 +79,8 @@
#define MDIO_AN_T1_LP_L 517 /* BASE-T1 AN LP Base Page ability register [15:0] */
#define MDIO_AN_T1_LP_M 518 /* BASE-T1 AN LP Base Page ability register [31:16] */
#define MDIO_AN_T1_LP_H 519 /* BASE-T1 AN LP Base Page ability register [47:32] */
+#define MDIO_AN_10BT1_AN_CTRL 526 /* 10BASE-T1 AN control register */
+#define MDIO_AN_10BT1_AN_STAT 527 /* 10BASE-T1 AN status register */
#define MDIO_PMA_PMD_BT1_CTRL 2100 /* BASE-T1 PMA/PMD control register */
/* LASI (Link Alarm Status Interrupt) registers, defined by XENPAK MSA. */
@@ -340,6 +342,12 @@
#define MDIO_AN_T1_LP_H_10L_TX_HI_REQ 0x1000 /* 10BASE-T1L High Level LP Transmit Request */
#define MDIO_AN_T1_LP_H_10L_TX_HI 0x2000 /* 10BASE-T1L High Level LP Transmit Ability */
+/* 10BASE-T1 AN control register */
+#define MDIO_AN_10BT1_AN_CTRL_ADV_EEE_T1L 0x4000 /* 10BASE-T1L EEE ability advertisement */
+
+/* 10BASE-T1 AN status register */
+#define MDIO_AN_10BT1_AN_STAT_LPA_EEE_T1L 0x4000 /* 10BASE-T1L LP EEE ability advertisement */
+
/* BASE-T1 PMA/PMD control register */
#define MDIO_PMA_PMD_BT1_CTRL_CFG_MST 0x4000 /* MASTER-SLAVE config value */
diff --git a/include/uapi/linux/media-bus-format.h b/include/uapi/linux/media-bus-format.h
index ca9a24c874da..a03c543cb072 100644
--- a/include/uapi/linux/media-bus-format.h
+++ b/include/uapi/linux/media-bus-format.h
@@ -34,7 +34,7 @@
#define MEDIA_BUS_FMT_FIXED 0x0001
-/* RGB - next is 0x1022 */
+/* RGB - next is 0x1025 */
#define MEDIA_BUS_FMT_RGB444_1X12 0x1016
#define MEDIA_BUS_FMT_RGB444_2X8_PADHI_BE 0x1001
#define MEDIA_BUS_FMT_RGB444_2X8_PADHI_LE 0x1002
@@ -46,8 +46,11 @@
#define MEDIA_BUS_FMT_RGB565_2X8_BE 0x1007
#define MEDIA_BUS_FMT_RGB565_2X8_LE 0x1008
#define MEDIA_BUS_FMT_RGB666_1X18 0x1009
+#define MEDIA_BUS_FMT_BGR666_1X18 0x1023
#define MEDIA_BUS_FMT_RBG888_1X24 0x100e
#define MEDIA_BUS_FMT_RGB666_1X24_CPADHI 0x1015
+#define MEDIA_BUS_FMT_BGR666_1X24_CPADHI 0x1024
+#define MEDIA_BUS_FMT_RGB565_1X24_CPADHI 0x1022
#define MEDIA_BUS_FMT_RGB666_1X7X3_SPWG 0x1010
#define MEDIA_BUS_FMT_BGR888_1X24 0x1013
#define MEDIA_BUS_FMT_BGR888_3X8 0x101b
diff --git a/include/uapi/linux/memfd.h b/include/uapi/linux/memfd.h
index 7a8a26751c23..273a4e15dfcf 100644
--- a/include/uapi/linux/memfd.h
+++ b/include/uapi/linux/memfd.h
@@ -8,6 +8,10 @@
#define MFD_CLOEXEC 0x0001U
#define MFD_ALLOW_SEALING 0x0002U
#define MFD_HUGETLB 0x0004U
+/* not executable and sealed to prevent changing to executable. */
+#define MFD_NOEXEC_SEAL 0x0008U
+/* executable */
+#define MFD_EXEC 0x0010U
/*
* Huge page size encoding when MFD_HUGETLB is specified, and a huge page
diff --git a/include/uapi/linux/meye.h b/include/uapi/linux/meye.h
deleted file mode 100644
index de9e3a954f3d..000000000000
--- a/include/uapi/linux/meye.h
+++ /dev/null
@@ -1,65 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
-/*
- * Motion Eye video4linux driver for Sony Vaio PictureBook
- *
- * Copyright (C) 2001-2003 Stelian Pop <stelian@popies.net>
- *
- * Copyright (C) 2001-2002 Alcôve <www.alcove.com>
- *
- * Copyright (C) 2000 Andrew Tridgell <tridge@valinux.com>
- *
- * Earlier work by Werner Almesberger, Paul `Rusty' Russell and Paul Mackerras.
- *
- * Some parts borrowed from various video4linux drivers, especially
- * bttv-driver.c and zoran.c, see original files for credits.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#ifndef _MEYE_H_
-#define _MEYE_H_
-
-/****************************************************************************/
-/* Private API for handling mjpeg capture / playback. */
-/****************************************************************************/
-
-struct meye_params {
- unsigned char subsample;
- unsigned char quality;
- unsigned char sharpness;
- unsigned char agc;
- unsigned char picture;
- unsigned char framerate;
-};
-
-/* query the extended parameters */
-#define MEYEIOC_G_PARAMS _IOR ('v', BASE_VIDIOC_PRIVATE+0, struct meye_params)
-/* set the extended parameters */
-#define MEYEIOC_S_PARAMS _IOW ('v', BASE_VIDIOC_PRIVATE+1, struct meye_params)
-/* queue a buffer for mjpeg capture */
-#define MEYEIOC_QBUF_CAPT _IOW ('v', BASE_VIDIOC_PRIVATE+2, int)
-/* sync a previously queued mjpeg buffer */
-#define MEYEIOC_SYNC _IOWR('v', BASE_VIDIOC_PRIVATE+3, int)
-/* get a still uncompressed snapshot */
-#define MEYEIOC_STILLCAPT _IO ('v', BASE_VIDIOC_PRIVATE+4)
-/* get a jpeg compressed snapshot */
-#define MEYEIOC_STILLJCAPT _IOR ('v', BASE_VIDIOC_PRIVATE+5, int)
-
-/* V4L2 private controls */
-#define V4L2_CID_MEYE_AGC (V4L2_CID_USER_MEYE_BASE + 0)
-#define V4L2_CID_MEYE_PICTURE (V4L2_CID_USER_MEYE_BASE + 1)
-#define V4L2_CID_MEYE_FRAMERATE (V4L2_CID_USER_MEYE_BASE + 2)
-
-#endif
diff --git a/include/uapi/linux/netdev.h b/include/uapi/linux/netdev.h
new file mode 100644
index 000000000000..588391447bfb
--- /dev/null
+++ b/include/uapi/linux/netdev.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/netdev.yaml */
+/* YNL-GEN uapi header */
+
+#ifndef _UAPI_LINUX_NETDEV_H
+#define _UAPI_LINUX_NETDEV_H
+
+#define NETDEV_FAMILY_NAME "netdev"
+#define NETDEV_FAMILY_VERSION 1
+
+/**
+ * enum netdev_xdp_act
+ * @NETDEV_XDP_ACT_BASIC: XDP feautues set supported by all drivers
+ * (XDP_ABORTED, XDP_DROP, XDP_PASS, XDP_TX)
+ * @NETDEV_XDP_ACT_REDIRECT: The netdev supports XDP_REDIRECT
+ * @NETDEV_XDP_ACT_NDO_XMIT: This feature informs if netdev implements
+ * ndo_xdp_xmit callback.
+ * @NETDEV_XDP_ACT_XSK_ZEROCOPY: This feature informs if netdev supports AF_XDP
+ * in zero copy mode.
+ * @NETDEV_XDP_ACT_HW_OFFLOAD: This feature informs if netdev supports XDP hw
+ * offloading.
+ * @NETDEV_XDP_ACT_RX_SG: This feature informs if netdev implements non-linear
+ * XDP buffer support in the driver napi callback.
+ * @NETDEV_XDP_ACT_NDO_XMIT_SG: This feature informs if netdev implements
+ * non-linear XDP buffer support in ndo_xdp_xmit callback.
+ */
+enum netdev_xdp_act {
+ NETDEV_XDP_ACT_BASIC = 1,
+ NETDEV_XDP_ACT_REDIRECT = 2,
+ NETDEV_XDP_ACT_NDO_XMIT = 4,
+ NETDEV_XDP_ACT_XSK_ZEROCOPY = 8,
+ NETDEV_XDP_ACT_HW_OFFLOAD = 16,
+ NETDEV_XDP_ACT_RX_SG = 32,
+ NETDEV_XDP_ACT_NDO_XMIT_SG = 64,
+};
+
+enum {
+ NETDEV_A_DEV_IFINDEX = 1,
+ NETDEV_A_DEV_PAD,
+ NETDEV_A_DEV_XDP_FEATURES,
+
+ __NETDEV_A_DEV_MAX,
+ NETDEV_A_DEV_MAX = (__NETDEV_A_DEV_MAX - 1)
+};
+
+enum {
+ NETDEV_CMD_DEV_GET = 1,
+ NETDEV_CMD_DEV_ADD_NTF,
+ NETDEV_CMD_DEV_DEL_NTF,
+ NETDEV_CMD_DEV_CHANGE_NTF,
+
+ __NETDEV_CMD_MAX,
+ NETDEV_CMD_MAX = (__NETDEV_CMD_MAX - 1)
+};
+
+#define NETDEV_MCGRP_MGMT "mgmt"
+
+#endif /* _UAPI_LINUX_NETDEV_H */
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index cfa844da1ce6..ff677f3a6cad 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -98,6 +98,13 @@ enum nft_verdicts {
* @NFT_MSG_GETFLOWTABLE: get flow table (enum nft_flowtable_attributes)
* @NFT_MSG_DELFLOWTABLE: delete flow table (enum nft_flowtable_attributes)
* @NFT_MSG_GETRULE_RESET: get rules and reset stateful expressions (enum nft_obj_attributes)
+ * @NFT_MSG_DESTROYTABLE: destroy a table (enum nft_table_attributes)
+ * @NFT_MSG_DESTROYCHAIN: destroy a chain (enum nft_chain_attributes)
+ * @NFT_MSG_DESTROYRULE: destroy a rule (enum nft_rule_attributes)
+ * @NFT_MSG_DESTROYSET: destroy a set (enum nft_set_attributes)
+ * @NFT_MSG_DESTROYSETELEM: destroy a set element (enum nft_set_elem_attributes)
+ * @NFT_MSG_DESTROYOBJ: destroy a stateful object (enum nft_object_attributes)
+ * @NFT_MSG_DESTROYFLOWTABLE: destroy flow table (enum nft_flowtable_attributes)
*/
enum nf_tables_msg_types {
NFT_MSG_NEWTABLE,
@@ -126,6 +133,13 @@ enum nf_tables_msg_types {
NFT_MSG_GETFLOWTABLE,
NFT_MSG_DELFLOWTABLE,
NFT_MSG_GETRULE_RESET,
+ NFT_MSG_DESTROYTABLE,
+ NFT_MSG_DESTROYCHAIN,
+ NFT_MSG_DESTROYRULE,
+ NFT_MSG_DESTROYSET,
+ NFT_MSG_DESTROYSETELEM,
+ NFT_MSG_DESTROYOBJ,
+ NFT_MSG_DESTROYFLOWTABLE,
NFT_MSG_MAX,
};
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index c14a91bbca7c..f14621a954e1 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -424,7 +424,8 @@
* interface identified by %NL80211_ATTR_IFINDEX.
* @NL80211_CMD_DEL_STATION: Remove a station identified by %NL80211_ATTR_MAC
* or, if no MAC address given, all stations, on the interface identified
- * by %NL80211_ATTR_IFINDEX. %NL80211_ATTR_MGMT_SUBTYPE and
+ * by %NL80211_ATTR_IFINDEX. For MLD station, MLD address is used in
+ * %NL80211_ATTR_MAC. %NL80211_ATTR_MGMT_SUBTYPE and
* %NL80211_ATTR_REASON_CODE can optionally be used to specify which type
* of disconnection indication should be sent to the station
* (Deauthentication or Disassociation frame and reason code for that
@@ -1166,6 +1167,23 @@
* %NL80211_ATTR_STATUS_CODE attribute in %NL80211_CMD_EXTERNAL_AUTH
* command interface.
*
+ * Host driver sends MLD address of the AP with %NL80211_ATTR_MLD_ADDR in
+ * %NL80211_CMD_EXTERNAL_AUTH event to indicate user space to enable MLO
+ * during the authentication offload in STA mode while connecting to MLD
+ * APs. Host driver should check %NL80211_ATTR_MLO_SUPPORT flag capability
+ * in %NL80211_CMD_CONNECT to know whether the user space supports enabling
+ * MLO during the authentication offload or not.
+ * User space should enable MLO during the authentication only when it
+ * receives the AP MLD address in authentication offload request. User
+ * space shouldn't enable MLO when the authentication offload request
+ * doesn't indicate the AP MLD address even if the AP is MLO capable.
+ * User space should use %NL80211_ATTR_MLD_ADDR as peer's MLD address and
+ * interface address identified by %NL80211_ATTR_IFINDEX as self MLD
+ * address. User space and host driver to use MLD addresses in RA, TA and
+ * BSSID fields of the frames between them, and host driver translates the
+ * MLD addresses to/from link addresses based on the link chosen for the
+ * authentication.
+ *
* Host driver reports this status on an authentication failure to the
* user space through the connect result as the user space would have
* initiated the connection through the connect request.
@@ -2751,6 +2769,12 @@ enum nl80211_commands {
* the incoming frame RX timestamp.
* @NL80211_ATTR_TD_BITMAP: Transition Disable bitmap, for subsequent
* (re)associations.
+ *
+ * @NL80211_ATTR_PUNCT_BITMAP: (u32) Preamble puncturing bitmap, lowest
+ * bit corresponds to the lowest 20 MHz channel. Each bit set to 1
+ * indicates that the sub-channel is punctured. Higher 16 bits are
+ * reserved.
+ *
* @NUM_NL80211_ATTR: total number of nl80211_attrs available
* @NL80211_ATTR_MAX: highest attribute number currently defined
* @__NL80211_ATTR_AFTER_LAST: internal use
@@ -3280,6 +3304,8 @@ enum nl80211_attrs {
NL80211_ATTR_RX_HW_TIMESTAMP,
NL80211_ATTR_TD_BITMAP,
+ NL80211_ATTR_PUNCT_BITMAP,
+
/* add attributes here, update the policy in nl80211.c */
__NL80211_ATTR_AFTER_LAST,
@@ -5869,6 +5895,7 @@ enum plink_actions {
#define NL80211_KEK_LEN 16
#define NL80211_KCK_EXT_LEN 24
#define NL80211_KEK_EXT_LEN 32
+#define NL80211_KCK_EXT_LEN_32 32
#define NL80211_REPLAY_CTR_LEN 8
/**
@@ -6294,6 +6321,11 @@ enum nl80211_feature_flags {
* might apply, e.g. no scans in progress, no offchannel operations
* in progress, and no active connections.
*
+ * @NL80211_EXT_FEATURE_PUNCT: Driver supports preamble puncturing in AP mode.
+ *
+ * @NL80211_EXT_FEATURE_SECURE_NAN: Device supports NAN Pairing which enables
+ * authentication, data encryption and message integrity.
+ *
* @NUM_NL80211_EXT_FEATURES: number of extended features.
* @MAX_NL80211_EXT_FEATURES: highest extended feature index.
*/
@@ -6362,6 +6394,8 @@ enum nl80211_ext_feature_index {
NL80211_EXT_FEATURE_FILS_CRYPTO_OFFLOAD,
NL80211_EXT_FEATURE_RADAR_BACKGROUND,
NL80211_EXT_FEATURE_POWERED_ADDR_CHANGE,
+ NL80211_EXT_FEATURE_PUNCT,
+ NL80211_EXT_FEATURE_SECURE_NAN,
/* add new features before the definition below */
NUM_NL80211_EXT_FEATURES,
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index 85ab1278811e..dc2000e0fe3a 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -693,6 +693,7 @@
#define PCI_EXP_LNKCTL2_TX_MARGIN 0x0380 /* Transmit Margin */
#define PCI_EXP_LNKCTL2_HASD 0x0020 /* HW Autonomous Speed Disable */
#define PCI_EXP_LNKSTA2 0x32 /* Link Status 2 */
+#define PCI_EXP_LNKSTA2_FLIT 0x0400 /* Flit Mode Status */
#define PCI_CAP_EXP_ENDPOINT_SIZEOF_V2 0x32 /* end of v2 EPs w/ link */
#define PCI_EXP_SLTCAP2 0x34 /* Slot Capabilities 2 */
#define PCI_EXP_SLTCAP2_IBPD 0x00000001 /* In-band PD Disable Supported */
diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h
index a5e06dcbba13..1312a137f7fb 100644
--- a/include/uapi/linux/prctl.h
+++ b/include/uapi/linux/prctl.h
@@ -281,6 +281,12 @@ struct prctl_mm_map {
# define PR_SME_VL_LEN_MASK 0xffff
# define PR_SME_VL_INHERIT (1 << 17) /* inherit across exec */
+/* Memory deny write / execute */
+#define PR_SET_MDWE 65
+# define PR_MDWE_REFUSE_EXEC_GAIN 1
+
+#define PR_GET_MDWE 66
+
#define PR_SET_VMA 0x53564d41
# define PR_SET_VMA_ANON_NAME 0
diff --git a/include/uapi/linux/rpl.h b/include/uapi/linux/rpl.h
index 708adddf9f13..7c8970e5b84b 100644
--- a/include/uapi/linux/rpl.h
+++ b/include/uapi/linux/rpl.h
@@ -37,8 +37,8 @@ struct ipv6_rpl_sr_hdr {
#endif
union {
- struct in6_addr addr[0];
- __u8 data[0];
+ __DECLARE_FLEX_ARRAY(struct in6_addr, addr);
+ __DECLARE_FLEX_ARRAY(__u8, data);
} segments;
} __attribute__((packed));
diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h
index eb2747d58a81..25a0af57dd5e 100644
--- a/include/uapi/linux/rtnetlink.h
+++ b/include/uapi/linux/rtnetlink.h
@@ -635,6 +635,7 @@ enum {
TCA_INGRESS_BLOCK,
TCA_EGRESS_BLOCK,
TCA_DUMP_FLAGS,
+ TCA_EXT_WARN_MSG,
__TCA_MAX
};
diff --git a/include/uapi/linux/serial_core.h b/include/uapi/linux/serial_core.h
index 3ba34d8378bd..281fa286555c 100644
--- a/include/uapi/linux/serial_core.h
+++ b/include/uapi/linux/serial_core.h
@@ -207,6 +207,9 @@
/* Atheros AR933X SoC */
#define PORT_AR933X 99
+/* MCHP 16550A UART with 256 byte FIFOs */
+#define PORT_MCHP16550A 100
+
/* ARC (Synopsys) on-chip UART */
#define PORT_ARC 101
diff --git a/include/uapi/linux/serial_reg.h b/include/uapi/linux/serial_reg.h
index bab3b39266cc..08b3527e1b93 100644
--- a/include/uapi/linux/serial_reg.h
+++ b/include/uapi/linux/serial_reg.h
@@ -44,6 +44,11 @@
#define UART_IIR_RX_TIMEOUT 0x0c /* OMAP RX Timeout interrupt */
#define UART_IIR_XOFF 0x10 /* OMAP XOFF/Special Character */
#define UART_IIR_CTS_RTS_DSR 0x20 /* OMAP CTS/RTS/DSR Change */
+#define UART_IIR_64BYTE_FIFO 0x20 /* 16750 64 bytes FIFO */
+#define UART_IIR_FIFO_ENABLED 0xc0 /* FIFOs enabled / port type identification */
+#define UART_IIR_FIFO_ENABLED_8250 0x00 /* 8250: no FIFO */
+#define UART_IIR_FIFO_ENABLED_16550 0x80 /* 16550: (broken/unusable) FIFO */
+#define UART_IIR_FIFO_ENABLED_16550A 0xc0 /* 16550A: FIFO enabled */
#define UART_FCR 2 /* Out: FIFO Control Register */
#define UART_FCR_ENABLE_FIFO 0x01 /* Enable the FIFO */
diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h
index 6600cb0164c2..26f33a4c253d 100644
--- a/include/uapi/linux/snmp.h
+++ b/include/uapi/linux/snmp.h
@@ -95,6 +95,8 @@ enum
ICMP_MIB_OUTADDRMASKS, /* OutAddrMasks */
ICMP_MIB_OUTADDRMASKREPS, /* OutAddrMaskReps */
ICMP_MIB_CSUMERRORS, /* InCsumErrors */
+ ICMP_MIB_RATELIMITGLOBAL, /* OutRateLimitGlobal */
+ ICMP_MIB_RATELIMITHOST, /* OutRateLimitHost */
__ICMP_MIB_MAX
};
@@ -112,6 +114,7 @@ enum
ICMP6_MIB_OUTMSGS, /* OutMsgs */
ICMP6_MIB_OUTERRORS, /* OutErrors */
ICMP6_MIB_CSUMERRORS, /* InCsumErrors */
+ ICMP6_MIB_RATELIMITHOST, /* OutRateLimitHost */
__ICMP6_MIB_MAX
};
diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h
index 31fcfa084e63..b17e3a21b15f 100644
--- a/include/uapi/linux/usb/ch9.h
+++ b/include/uapi/linux/usb/ch9.h
@@ -948,6 +948,22 @@ struct usb_ss_container_id_descriptor {
#define USB_DT_USB_SS_CONTN_ID_SIZE 20
/*
+ * Platform Device Capability descriptor: Defines platform specific device
+ * capabilities
+ */
+#define USB_PLAT_DEV_CAP_TYPE 5
+struct usb_plat_dev_cap_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDevCapabilityType;
+ __u8 bReserved;
+ __u8 UUID[16];
+ __u8 CapabilityData[];
+} __attribute__((packed));
+
+#define USB_DT_USB_PLAT_DEV_CAP_SIZE(capability_data_size) (20 + capability_data_size)
+
+/*
* SuperSpeed Plus USB Capability descriptor: Defines the set of
* SuperSpeed Plus USB specific device level capabilities
*/
diff --git a/include/uapi/linux/usb/video.h b/include/uapi/linux/usb/video.h
index 6e8e572c2980..2ff0e8a3a683 100644
--- a/include/uapi/linux/usb/video.h
+++ b/include/uapi/linux/usb/video.h
@@ -179,6 +179,36 @@
#define UVC_CONTROL_CAP_AUTOUPDATE (1 << 3)
#define UVC_CONTROL_CAP_ASYNCHRONOUS (1 << 4)
+/* 3.9.2.6 Color Matching Descriptor Values */
+enum uvc_color_primaries_values {
+ UVC_COLOR_PRIMARIES_UNSPECIFIED,
+ UVC_COLOR_PRIMARIES_BT_709_SRGB,
+ UVC_COLOR_PRIMARIES_BT_470_2_M,
+ UVC_COLOR_PRIMARIES_BT_470_2_B_G,
+ UVC_COLOR_PRIMARIES_SMPTE_170M,
+ UVC_COLOR_PRIMARIES_SMPTE_240M,
+};
+
+enum uvc_transfer_characteristics_values {
+ UVC_TRANSFER_CHARACTERISTICS_UNSPECIFIED,
+ UVC_TRANSFER_CHARACTERISTICS_BT_709,
+ UVC_TRANSFER_CHARACTERISTICS_BT_470_2_M,
+ UVC_TRANSFER_CHARACTERISTICS_BT_470_2_B_G,
+ UVC_TRANSFER_CHARACTERISTICS_SMPTE_170M,
+ UVC_TRANSFER_CHARACTERISTICS_SMPTE_240M,
+ UVC_TRANSFER_CHARACTERISTICS_LINEAR,
+ UVC_TRANSFER_CHARACTERISTICS_SRGB,
+};
+
+enum uvc_matrix_coefficients {
+ UVC_MATRIX_COEFFICIENTS_UNSPECIFIED,
+ UVC_MATRIX_COEFFICIENTS_BT_709,
+ UVC_MATRIX_COEFFICIENTS_FCC,
+ UVC_MATRIX_COEFFICIENTS_BT_470_2_B_G,
+ UVC_MATRIX_COEFFICIENTS_SMPTE_170M,
+ UVC_MATRIX_COEFFICIENTS_SMPTE_240M,
+};
+
/* ------------------------------------------------------------------------
* UVC structures
*/
diff --git a/include/uapi/linux/uuid.h b/include/uapi/linux/uuid.h
index c0f4bd9b040e..96ac684a4b2f 100644
--- a/include/uapi/linux/uuid.h
+++ b/include/uapi/linux/uuid.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/* DO NOT USE in new code! This is solely for MEI due to legacy reasons */
/*
- * UUID/GUID definition
+ * MEI UUID definition
*
* Copyright (C) 2010, Intel Corp.
* Huang Ying <ying.huang@intel.com>
@@ -14,19 +14,15 @@
typedef struct {
__u8 b[16];
-} guid_t;
+} uuid_le;
-#define GUID_INIT(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) \
-((guid_t) \
+#define UUID_LE(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) \
+((uuid_le) \
{{ (a) & 0xff, ((a) >> 8) & 0xff, ((a) >> 16) & 0xff, ((a) >> 24) & 0xff, \
(b) & 0xff, ((b) >> 8) & 0xff, \
(c) & 0xff, ((c) >> 8) & 0xff, \
(d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) }})
-/* backwards compatibility, don't use in new code */
-typedef guid_t uuid_le;
-#define UUID_LE(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) \
- GUID_INIT(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7)
#define NULL_UUID_LE \
UUID_LE(0x00000000, 0x0000, 0x0000, 0x00, 0x00, 0x00, 0x00, \
0x00, 0x00, 0x00, 0x00)
diff --git a/include/uapi/linux/uvcvideo.h b/include/uapi/linux/uvcvideo.h
index 8288137387c0..f86185456dc5 100644
--- a/include/uapi/linux/uvcvideo.h
+++ b/include/uapi/linux/uvcvideo.h
@@ -36,9 +36,11 @@
UVC_CTRL_FLAG_GET_MAX | UVC_CTRL_FLAG_GET_RES | \
UVC_CTRL_FLAG_GET_DEF)
+#define UVC_MENU_NAME_LEN 32
+
struct uvc_menu_info {
__u32 value;
- __u8 name[32];
+ __u8 name[UVC_MENU_NAME_LEN];
};
struct uvc_xu_control_mapping {
@@ -86,7 +88,7 @@ struct uvc_xu_control_query {
* struct. The first two fields are added by the driver, they can be used for
* clock synchronisation. The rest is an exact copy of a UVC payload header.
* Only complete objects with complete buffers are included. Therefore it's
- * always sizeof(meta->ts) + sizeof(meta->sof) + meta->length bytes large.
+ * always sizeof(meta->ns) + sizeof(meta->sof) + meta->length bytes large.
*/
struct uvc_meta_buf {
__u64 ns;
diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h
index b73a8ba7df6c..5e80daa4ffe0 100644
--- a/include/uapi/linux/v4l2-controls.h
+++ b/include/uapi/linux/v4l2-controls.h
@@ -115,9 +115,13 @@ enum v4l2_colorfx {
/* USER-class private control IDs */
-/* The base for the meye driver controls. See linux/meye.h for the list
- * of controls. We reserve 16 controls for this driver. */
+#ifndef __KERNEL__
+/*
+ * The base for the meye driver controls. This driver was removed, but
+ * we keep this define in case any software still uses it.
+ */
#define V4L2_CID_USER_MEYE_BASE (V4L2_CID_USER_BASE + 0x1000)
+#endif
/* The base for the bttv driver controls.
* We reserve 32 controls for this driver. */
diff --git a/include/uapi/linux/v4l2-subdev.h b/include/uapi/linux/v4l2-subdev.h
index ecce4c79f5c5..654d659de835 100644
--- a/include/uapi/linux/v4l2-subdev.h
+++ b/include/uapi/linux/v4l2-subdev.h
@@ -11,6 +11,7 @@
#ifndef __LINUX_V4L2_SUBDEV_H
#define __LINUX_V4L2_SUBDEV_H
+#include <linux/const.h>
#include <linux/ioctl.h>
#include <linux/types.h>
#include <linux/v4l2-common.h>
@@ -31,13 +32,15 @@ enum v4l2_subdev_format_whence {
* @which: format type (from enum v4l2_subdev_format_whence)
* @pad: pad number, as reported by the media API
* @format: media bus format (format code and frame size)
+ * @stream: stream number, defined in subdev routing
* @reserved: drivers and applications must zero this array
*/
struct v4l2_subdev_format {
__u32 which;
__u32 pad;
struct v4l2_mbus_framefmt format;
- __u32 reserved[8];
+ __u32 stream;
+ __u32 reserved[7];
};
/**
@@ -45,13 +48,15 @@ struct v4l2_subdev_format {
* @which: format type (from enum v4l2_subdev_format_whence)
* @pad: pad number, as reported by the media API
* @rect: pad crop rectangle boundaries
+ * @stream: stream number, defined in subdev routing
* @reserved: drivers and applications must zero this array
*/
struct v4l2_subdev_crop {
__u32 which;
__u32 pad;
struct v4l2_rect rect;
- __u32 reserved[8];
+ __u32 stream;
+ __u32 reserved[7];
};
#define V4L2_SUBDEV_MBUS_CODE_CSC_COLORSPACE 0x00000001
@@ -67,6 +72,7 @@ struct v4l2_subdev_crop {
* @code: format code (MEDIA_BUS_FMT_ definitions)
* @which: format type (from enum v4l2_subdev_format_whence)
* @flags: flags set by the driver, (V4L2_SUBDEV_MBUS_CODE_*)
+ * @stream: stream number, defined in subdev routing
* @reserved: drivers and applications must zero this array
*/
struct v4l2_subdev_mbus_code_enum {
@@ -75,7 +81,8 @@ struct v4l2_subdev_mbus_code_enum {
__u32 code;
__u32 which;
__u32 flags;
- __u32 reserved[7];
+ __u32 stream;
+ __u32 reserved[6];
};
/**
@@ -88,6 +95,7 @@ struct v4l2_subdev_mbus_code_enum {
* @min_height: minimum frame height, in pixels
* @max_height: maximum frame height, in pixels
* @which: format type (from enum v4l2_subdev_format_whence)
+ * @stream: stream number, defined in subdev routing
* @reserved: drivers and applications must zero this array
*/
struct v4l2_subdev_frame_size_enum {
@@ -99,19 +107,22 @@ struct v4l2_subdev_frame_size_enum {
__u32 min_height;
__u32 max_height;
__u32 which;
- __u32 reserved[8];
+ __u32 stream;
+ __u32 reserved[7];
};
/**
* struct v4l2_subdev_frame_interval - Pad-level frame rate
* @pad: pad number, as reported by the media API
* @interval: frame interval in seconds
+ * @stream: stream number, defined in subdev routing
* @reserved: drivers and applications must zero this array
*/
struct v4l2_subdev_frame_interval {
__u32 pad;
struct v4l2_fract interval;
- __u32 reserved[9];
+ __u32 stream;
+ __u32 reserved[8];
};
/**
@@ -123,6 +134,7 @@ struct v4l2_subdev_frame_interval {
* @height: frame height in pixels
* @interval: frame interval in seconds
* @which: format type (from enum v4l2_subdev_format_whence)
+ * @stream: stream number, defined in subdev routing
* @reserved: drivers and applications must zero this array
*/
struct v4l2_subdev_frame_interval_enum {
@@ -133,7 +145,8 @@ struct v4l2_subdev_frame_interval_enum {
__u32 height;
struct v4l2_fract interval;
__u32 which;
- __u32 reserved[8];
+ __u32 stream;
+ __u32 reserved[7];
};
/**
@@ -145,6 +158,7 @@ struct v4l2_subdev_frame_interval_enum {
* defined in v4l2-common.h; V4L2_SEL_TGT_* .
* @flags: constraint flags, defined in v4l2-common.h; V4L2_SEL_FLAG_*.
* @r: coordinates of the selection window
+ * @stream: stream number, defined in subdev routing
* @reserved: for future use, set to zero for now
*
* Hardware may use multiple helper windows to process a video stream.
@@ -157,7 +171,8 @@ struct v4l2_subdev_selection {
__u32 target;
__u32 flags;
struct v4l2_rect r;
- __u32 reserved[8];
+ __u32 stream;
+ __u32 reserved[7];
};
/**
@@ -175,6 +190,49 @@ struct v4l2_subdev_capability {
/* The v4l2 sub-device video device node is registered in read-only mode. */
#define V4L2_SUBDEV_CAP_RO_SUBDEV 0x00000001
+/* The v4l2 sub-device supports routing and multiplexed streams. */
+#define V4L2_SUBDEV_CAP_STREAMS 0x00000002
+
+/*
+ * Is the route active? An active route will start when streaming is enabled
+ * on a video node.
+ */
+#define V4L2_SUBDEV_ROUTE_FL_ACTIVE (1U << 0)
+
+/**
+ * struct v4l2_subdev_route - A route inside a subdev
+ *
+ * @sink_pad: the sink pad index
+ * @sink_stream: the sink stream identifier
+ * @source_pad: the source pad index
+ * @source_stream: the source stream identifier
+ * @flags: route flags V4L2_SUBDEV_ROUTE_FL_*
+ * @reserved: drivers and applications must zero this array
+ */
+struct v4l2_subdev_route {
+ __u32 sink_pad;
+ __u32 sink_stream;
+ __u32 source_pad;
+ __u32 source_stream;
+ __u32 flags;
+ __u32 reserved[5];
+};
+
+/**
+ * struct v4l2_subdev_routing - Subdev routing information
+ *
+ * @which: configuration type (from enum v4l2_subdev_format_whence)
+ * @num_routes: the total number of routes in the routes array
+ * @routes: pointer to the routes array
+ * @reserved: drivers and applications must zero this array
+ */
+struct v4l2_subdev_routing {
+ __u32 which;
+ __u32 num_routes;
+ __u64 routes;
+ __u32 reserved[6];
+};
+
/* Backwards compatibility define --- to be removed */
#define v4l2_subdev_edid v4l2_edid
@@ -190,6 +248,8 @@ struct v4l2_subdev_capability {
#define VIDIOC_SUBDEV_S_CROP _IOWR('V', 60, struct v4l2_subdev_crop)
#define VIDIOC_SUBDEV_G_SELECTION _IOWR('V', 61, struct v4l2_subdev_selection)
#define VIDIOC_SUBDEV_S_SELECTION _IOWR('V', 62, struct v4l2_subdev_selection)
+#define VIDIOC_SUBDEV_G_ROUTING _IOWR('V', 38, struct v4l2_subdev_routing)
+#define VIDIOC_SUBDEV_S_ROUTING _IOWR('V', 39, struct v4l2_subdev_routing)
/* The following ioctls are identical to the ioctls in videodev2.h */
#define VIDIOC_SUBDEV_G_STD _IOR('V', 23, v4l2_std_id)
#define VIDIOC_SUBDEV_S_STD _IOW('V', 24, v4l2_std_id)
diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h
index 23105eb036fa..0552e8dcf0cb 100644
--- a/include/uapi/linux/vfio.h
+++ b/include/uapi/linux/vfio.h
@@ -49,7 +49,11 @@
/* Supports VFIO_DMA_UNMAP_FLAG_ALL */
#define VFIO_UNMAP_ALL 9
-/* Supports the vaddr flag for DMA map and unmap */
+/*
+ * Supports the vaddr flag for DMA map and unmap. Not supported for mediated
+ * devices, so this capability is subject to change as groups are added or
+ * removed.
+ */
#define VFIO_UPDATE_VADDR 10
/*
@@ -1343,8 +1347,7 @@ struct vfio_iommu_type1_info_dma_avail {
* Map process virtual addresses to IO virtual addresses using the
* provided struct vfio_dma_map. Caller sets argsz. READ &/ WRITE required.
*
- * If flags & VFIO_DMA_MAP_FLAG_VADDR, update the base vaddr for iova, and
- * unblock translation of host virtual addresses in the iova range. The vaddr
+ * If flags & VFIO_DMA_MAP_FLAG_VADDR, update the base vaddr for iova. The vaddr
* must have previously been invalidated with VFIO_DMA_UNMAP_FLAG_VADDR. To
* maintain memory consistency within the user application, the updated vaddr
* must address the same memory object as originally mapped. Failure to do so
@@ -1395,9 +1398,9 @@ struct vfio_bitmap {
* must be 0. This cannot be combined with the get-dirty-bitmap flag.
*
* If flags & VFIO_DMA_UNMAP_FLAG_VADDR, do not unmap, but invalidate host
- * virtual addresses in the iova range. Tasks that attempt to translate an
- * iova's vaddr will block. DMA to already-mapped pages continues. This
- * cannot be combined with the get-dirty-bitmap flag.
+ * virtual addresses in the iova range. DMA to already-mapped pages continues.
+ * Groups may not be added to the container while any addresses are invalid.
+ * This cannot be combined with the get-dirty-bitmap flag.
*/
struct vfio_iommu_type1_dma_unmap {
__u32 argsz;
diff --git a/include/uapi/linux/vhost.h b/include/uapi/linux/vhost.h
index f9f115a7c75b..92e1b700b51c 100644
--- a/include/uapi/linux/vhost.h
+++ b/include/uapi/linux/vhost.h
@@ -180,4 +180,12 @@
*/
#define VHOST_VDPA_SUSPEND _IO(VHOST_VIRTIO, 0x7D)
+/* Resume a device so it can resume processing virtqueue requests
+ *
+ * After the return of this ioctl the device will have restored all the
+ * necessary states and it is fully operational to continue processing the
+ * virtqueue descriptors.
+ */
+#define VHOST_VDPA_RESUME _IO(VHOST_VIRTIO, 0x7E)
+
#endif
diff --git a/include/uapi/linux/vhost_types.h b/include/uapi/linux/vhost_types.h
index 53601ce2c20a..c5690a8992d8 100644
--- a/include/uapi/linux/vhost_types.h
+++ b/include/uapi/linux/vhost_types.h
@@ -163,5 +163,7 @@ struct vhost_vdpa_iova_range {
#define VHOST_BACKEND_F_IOTLB_ASID 0x3
/* Device can be suspended */
#define VHOST_BACKEND_F_SUSPEND 0x4
+/* Device can be resumed */
+#define VHOST_BACKEND_F_RESUME 0x5
#endif
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index 1befd181a4cc..17a9b975177a 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -576,6 +576,9 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_RGBX32 v4l2_fourcc('X', 'B', '2', '4') /* 32 RGBX-8-8-8-8 */
#define V4L2_PIX_FMT_ARGB32 v4l2_fourcc('B', 'A', '2', '4') /* 32 ARGB-8-8-8-8 */
#define V4L2_PIX_FMT_XRGB32 v4l2_fourcc('B', 'X', '2', '4') /* 32 XRGB-8-8-8-8 */
+#define V4L2_PIX_FMT_RGBX1010102 v4l2_fourcc('R', 'X', '3', '0') /* 32 RGBX-10-10-10-2 */
+#define V4L2_PIX_FMT_RGBA1010102 v4l2_fourcc('R', 'A', '3', '0') /* 32 RGBA-10-10-10-2 */
+#define V4L2_PIX_FMT_ARGB2101010 v4l2_fourcc('A', 'R', '3', '0') /* 32 ARGB-2-10-10-10 */
/* Grey formats */
#define V4L2_PIX_FMT_GREY v4l2_fourcc('G', 'R', 'E', 'Y') /* 8 Greyscale */
@@ -618,6 +621,14 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_YUVX32 v4l2_fourcc('Y', 'U', 'V', 'X') /* 32 YUVX-8-8-8-8 */
#define V4L2_PIX_FMT_M420 v4l2_fourcc('M', '4', '2', '0') /* 12 YUV 4:2:0 2 lines y, 1 line uv interleaved */
+/*
+ * YCbCr packed format. For each Y2xx format, xx bits of valid data occupy the MSBs
+ * of the 16 bit components, and 16-xx bits of zero padding occupy the LSBs.
+ */
+#define V4L2_PIX_FMT_Y210 v4l2_fourcc('Y', '2', '1', '0') /* 32 YUYV 4:2:2 */
+#define V4L2_PIX_FMT_Y212 v4l2_fourcc('Y', '2', '1', '2') /* 32 YUYV 4:2:2 */
+#define V4L2_PIX_FMT_Y216 v4l2_fourcc('Y', '2', '1', '6') /* 32 YUYV 4:2:2 */
+
/* two planes -- one Y, one Cr + Cb interleaved */
#define V4L2_PIX_FMT_NV12 v4l2_fourcc('N', 'V', '1', '2') /* 12 Y/CbCr 4:2:0 */
#define V4L2_PIX_FMT_NV21 v4l2_fourcc('N', 'V', '2', '1') /* 12 Y/CrCb 4:2:0 */
diff --git a/include/uapi/linux/virtio_blk.h b/include/uapi/linux/virtio_blk.h
index 58e70b24b504..5af2a0300bb9 100644
--- a/include/uapi/linux/virtio_blk.h
+++ b/include/uapi/linux/virtio_blk.h
@@ -41,6 +41,7 @@
#define VIRTIO_BLK_F_DISCARD 13 /* DISCARD is supported */
#define VIRTIO_BLK_F_WRITE_ZEROES 14 /* WRITE ZEROES is supported */
#define VIRTIO_BLK_F_SECURE_ERASE 16 /* Secure Erase is supported */
+#define VIRTIO_BLK_F_ZONED 17 /* Zoned block device */
/* Legacy feature bits */
#ifndef VIRTIO_BLK_NO_LEGACY
@@ -137,6 +138,16 @@ struct virtio_blk_config {
/* Secure erase commands must be aligned to this number of sectors. */
__virtio32 secure_erase_sector_alignment;
+ /* Zoned block device characteristics (if VIRTIO_BLK_F_ZONED) */
+ struct virtio_blk_zoned_characteristics {
+ __le32 zone_sectors;
+ __le32 max_open_zones;
+ __le32 max_active_zones;
+ __le32 max_append_sectors;
+ __le32 write_granularity;
+ __u8 model;
+ __u8 unused2[3];
+ } zoned;
} __attribute__((packed));
/*
@@ -174,6 +185,27 @@ struct virtio_blk_config {
/* Secure erase command */
#define VIRTIO_BLK_T_SECURE_ERASE 14
+/* Zone append command */
+#define VIRTIO_BLK_T_ZONE_APPEND 15
+
+/* Report zones command */
+#define VIRTIO_BLK_T_ZONE_REPORT 16
+
+/* Open zone command */
+#define VIRTIO_BLK_T_ZONE_OPEN 18
+
+/* Close zone command */
+#define VIRTIO_BLK_T_ZONE_CLOSE 20
+
+/* Finish zone command */
+#define VIRTIO_BLK_T_ZONE_FINISH 22
+
+/* Reset zone command */
+#define VIRTIO_BLK_T_ZONE_RESET 24
+
+/* Reset All zones command */
+#define VIRTIO_BLK_T_ZONE_RESET_ALL 26
+
#ifndef VIRTIO_BLK_NO_LEGACY
/* Barrier before this op. */
#define VIRTIO_BLK_T_BARRIER 0x80000000
@@ -193,6 +225,72 @@ struct virtio_blk_outhdr {
__virtio64 sector;
};
+/*
+ * Supported zoned device models.
+ */
+
+/* Regular block device */
+#define VIRTIO_BLK_Z_NONE 0
+/* Host-managed zoned device */
+#define VIRTIO_BLK_Z_HM 1
+/* Host-aware zoned device */
+#define VIRTIO_BLK_Z_HA 2
+
+/*
+ * Zone descriptor. A part of VIRTIO_BLK_T_ZONE_REPORT command reply.
+ */
+struct virtio_blk_zone_descriptor {
+ /* Zone capacity */
+ __le64 z_cap;
+ /* The starting sector of the zone */
+ __le64 z_start;
+ /* Zone write pointer position in sectors */
+ __le64 z_wp;
+ /* Zone type */
+ __u8 z_type;
+ /* Zone state */
+ __u8 z_state;
+ __u8 reserved[38];
+};
+
+struct virtio_blk_zone_report {
+ __le64 nr_zones;
+ __u8 reserved[56];
+ struct virtio_blk_zone_descriptor zones[];
+};
+
+/*
+ * Supported zone types.
+ */
+
+/* Conventional zone */
+#define VIRTIO_BLK_ZT_CONV 1
+/* Sequential Write Required zone */
+#define VIRTIO_BLK_ZT_SWR 2
+/* Sequential Write Preferred zone */
+#define VIRTIO_BLK_ZT_SWP 3
+
+/*
+ * Zone states that are available for zones of all types.
+ */
+
+/* Not a write pointer (conventional zones only) */
+#define VIRTIO_BLK_ZS_NOT_WP 0
+/* Empty */
+#define VIRTIO_BLK_ZS_EMPTY 1
+/* Implicitly Open */
+#define VIRTIO_BLK_ZS_IOPEN 2
+/* Explicitly Open */
+#define VIRTIO_BLK_ZS_EOPEN 3
+/* Closed */
+#define VIRTIO_BLK_ZS_CLOSED 4
+/* Read-Only */
+#define VIRTIO_BLK_ZS_RDONLY 13
+/* Full */
+#define VIRTIO_BLK_ZS_FULL 14
+/* Offline */
+#define VIRTIO_BLK_ZS_OFFLINE 15
+
/* Unmap this range (only valid for write zeroes command) */
#define VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP 0x00000001
@@ -219,4 +317,11 @@ struct virtio_scsi_inhdr {
#define VIRTIO_BLK_S_OK 0
#define VIRTIO_BLK_S_IOERR 1
#define VIRTIO_BLK_S_UNSUPP 2
+
+/* Error codes that are specific to zoned block devices */
+#define VIRTIO_BLK_S_ZONE_INVALID_CMD 3
+#define VIRTIO_BLK_S_ZONE_UNALIGNED_WP 4
+#define VIRTIO_BLK_S_ZONE_OPEN_RESOURCE 5
+#define VIRTIO_BLK_S_ZONE_ACTIVE_RESOURCE 6
+
#endif /* _LINUX_VIRTIO_BLK_H */
diff --git a/include/uapi/rdma/hns-abi.h b/include/uapi/rdma/hns-abi.h
index 745790ce3c26..2e68a8b0c92c 100644
--- a/include/uapi/rdma/hns-abi.h
+++ b/include/uapi/rdma/hns-abi.h
@@ -87,10 +87,14 @@ struct hns_roce_ib_create_qp_resp {
enum {
HNS_ROCE_EXSGE_FLAGS = 1 << 0,
+ HNS_ROCE_RQ_INLINE_FLAGS = 1 << 1,
+ HNS_ROCE_CQE_INLINE_FLAGS = 1 << 2,
};
enum {
HNS_ROCE_RSP_EXSGE_FLAGS = 1 << 0,
+ HNS_ROCE_RSP_RQ_INLINE_FLAGS = 1 << 1,
+ HNS_ROCE_RSP_CQE_INLINE_FLAGS = 1 << 2,
};
struct hns_roce_ib_alloc_ucontext_resp {
diff --git a/include/uapi/scsi/scsi_bsg_fc.h b/include/uapi/scsi/scsi_bsg_fc.h
index 7f5930801f72..5e46cf1054af 100644
--- a/include/uapi/scsi/scsi_bsg_fc.h
+++ b/include/uapi/scsi/scsi_bsg_fc.h
@@ -216,7 +216,7 @@ struct fc_bsg_host_vendor {
*/
struct fc_bsg_host_vendor_reply {
/* start of vendor response area */
- __u32 vendor_rsp[0];
+ __DECLARE_FLEX_ARRAY(__u32, vendor_rsp);
};
diff --git a/include/uapi/scsi/scsi_bsg_ufs.h b/include/uapi/scsi/scsi_bsg_ufs.h
index d55f2176dfd4..2801b65299aa 100644
--- a/include/uapi/scsi/scsi_bsg_ufs.h
+++ b/include/uapi/scsi/scsi_bsg_ufs.h
@@ -14,10 +14,27 @@
*/
#define UFS_CDB_SIZE 16
-#define UPIU_TRANSACTION_UIC_CMD 0x1F
/* uic commands are 4DW long, per UFSHCI V2.1 paragraph 5.6.1 */
#define UIC_CMD_SIZE (sizeof(__u32) * 4)
+enum ufs_bsg_msg_code {
+ UPIU_TRANSACTION_UIC_CMD = 0x1F,
+ UPIU_TRANSACTION_ARPMB_CMD,
+};
+
+/* UFS RPMB Request Message Types */
+enum ufs_rpmb_op_type {
+ UFS_RPMB_WRITE_KEY = 0x01,
+ UFS_RPMB_READ_CNT = 0x02,
+ UFS_RPMB_WRITE = 0x03,
+ UFS_RPMB_READ = 0x04,
+ UFS_RPMB_READ_RESP = 0x05,
+ UFS_RPMB_SEC_CONF_WRITE = 0x06,
+ UFS_RPMB_SEC_CONF_READ = 0x07,
+ UFS_RPMB_PURGE_ENABLE = 0x08,
+ UFS_RPMB_PURGE_STATUS_READ = 0x09,
+};
+
/**
* struct utp_upiu_header - UPIU header structure
* @dword_0: UPIU header DW-0
@@ -79,6 +96,23 @@ struct utp_upiu_req {
};
};
+struct ufs_arpmb_meta {
+ __be16 req_resp_type;
+ __u8 nonce[16];
+ __be32 write_counter;
+ __be16 addr_lun;
+ __be16 block_count;
+ __be16 result;
+} __attribute__((__packed__));
+
+struct ufs_ehs {
+ __u8 length;
+ __u8 ehs_type;
+ __be16 ehssub_type;
+ struct ufs_arpmb_meta meta;
+ __u8 mac_key[32];
+} __attribute__((__packed__));
+
/* request (CDB) structure of the sg_io_v4 */
struct ufs_bsg_request {
__u32 msgcode;
@@ -95,11 +129,21 @@ struct ufs_bsg_reply {
* msg and status fields. The per-msgcode reply structure
* will contain valid data.
*/
- __u32 result;
+ int result;
/* If there was reply_payload, how much was received? */
__u32 reply_payload_rcv_len;
struct utp_upiu_req upiu_rsp;
};
+
+struct ufs_rpmb_request {
+ struct ufs_bsg_request bsg_request;
+ struct ufs_ehs ehs_req;
+};
+
+struct ufs_rpmb_reply {
+ struct ufs_bsg_reply bsg_reply;
+ struct ufs_ehs ehs_rsp;
+};
#endif /* UFS_BSG_H */
diff --git a/include/uapi/sound/firewire.h b/include/uapi/sound/firewire.h
index 3532ac7046d7..1e86872c151f 100644
--- a/include/uapi/sound/firewire.h
+++ b/include/uapi/sound/firewire.h
@@ -14,6 +14,7 @@
#define SNDRV_FIREWIRE_EVENT_MOTU_NOTIFICATION 0x64776479
#define SNDRV_FIREWIRE_EVENT_TASCAM_CONTROL 0x7473636d
#define SNDRV_FIREWIRE_EVENT_MOTU_REGISTER_DSP_CHANGE 0x4d545244
+#define SNDRV_FIREWIRE_EVENT_FF400_MESSAGE 0x4f6c6761
struct snd_firewire_event_common {
unsigned int type; /* SNDRV_FIREWIRE_EVENT_xxx */
@@ -72,6 +73,30 @@ struct snd_firewire_event_motu_register_dsp_change {
__u32 changes[]; /* Encoded event for change of register DSP. */
};
+/**
+ * struct snd_firewire_event_ff400_message - the container for message from Fireface 400 when
+ * operating hardware knob.
+ *
+ * @type: Fixed to SNDRV_FIREWIRE_EVENT_FF400_MESSAGE.
+ * @message_count: The number of messages.
+ * @messages.message: The messages expressing hardware knob operation.
+ * @messages.tstamp: The isochronous cycle at which the request subaction of asynchronous
+ * transaction was sent to deliver the message. It has 16 bit unsigned integer
+ * value. The higher 3 bits of value expresses the lower three bits of second
+ * field in the format of CYCLE_TIME, up to 7. The rest 13 bits expresses cycle
+ * field up to 7999.
+ *
+ * The structure expresses message transmitted by Fireface 400 when operating hardware knob.
+ */
+struct snd_firewire_event_ff400_message {
+ unsigned int type;
+ unsigned int message_count;
+ struct {
+ __u32 message;
+ __u32 tstamp;
+ } messages[];
+};
+
union snd_firewire_event {
struct snd_firewire_event_common common;
struct snd_firewire_event_lock_status lock_status;
@@ -81,6 +106,7 @@ union snd_firewire_event {
struct snd_firewire_event_tascam_control tascam_control;
struct snd_firewire_event_motu_notification motu_notification;
struct snd_firewire_event_motu_register_dsp_change motu_register_dsp_change;
+ struct snd_firewire_event_ff400_message ff400_message;
};
diff --git a/include/uapi/sound/intel/avs/tokens.h b/include/uapi/sound/intel/avs/tokens.h
index 754f02b2f444..4ffe546aa409 100644
--- a/include/uapi/sound/intel/avs/tokens.h
+++ b/include/uapi/sound/intel/avs/tokens.h
@@ -108,6 +108,7 @@ enum avs_tplg_token {
AVS_TKN_MOD_CORE_ID_U8 = 1704,
AVS_TKN_MOD_PROC_DOMAIN_U8 = 1705,
AVS_TKN_MOD_MODCFG_EXT_ID_U32 = 1706,
+ AVS_TKN_MOD_KCONTROL_ID_U32 = 1707,
/* struct avs_tplg_path_template */
AVS_TKN_PATH_TMPL_ID_U32 = 1801,
@@ -121,6 +122,9 @@ enum avs_tplg_token {
AVS_TKN_PIN_FMT_INDEX_U32 = 2201,
AVS_TKN_PIN_FMT_IOBS_U32 = 2202,
AVS_TKN_PIN_FMT_AFMT_ID_U32 = 2203,
+
+ /* struct avs_tplg_kcontrol */
+ AVS_TKN_KCONTROL_ID_U32 = 2301,
};
#endif
diff --git a/include/ufs/ufs.h b/include/ufs/ufs.h
index 1bba3fead2ce..4e8d6240e589 100644
--- a/include/ufs/ufs.h
+++ b/include/ufs/ufs.h
@@ -38,7 +38,6 @@
#define UFS_UPIU_MAX_UNIT_NUM_ID 0x7F
#define UFS_MAX_LUNS (SCSI_W_LUN_BASE + UFS_UPIU_MAX_UNIT_NUM_ID)
#define UFS_UPIU_WLUN_ID (1 << 7)
-#define UFS_RPMB_UNIT 0xC4
/* WriteBooster buffer is available only for the logical unit from 0 to 7 */
#define UFS_UPIU_MAX_WB_LUN_ID 8
@@ -49,6 +48,11 @@
*/
#define UFS_WB_EXCEED_LIFETIME 0x0B
+/*
+ * In UFS Spec, the Extra Header Segment (EHS) starts from byte 32 in UPIU request/response packet
+ */
+#define EHS_OFFSET_IN_RESPONSE 32
+
/* Well known logical unit id in LUN field of UPIU */
enum {
UFS_UPIU_REPORT_LUNS_WLUN = 0x81,
@@ -165,6 +169,7 @@ enum attr_idn {
QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE = 0x1D,
QUERY_ATTR_IDN_WB_BUFF_LIFE_TIME_EST = 0x1E,
QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE = 0x1F,
+ QUERY_ATTR_IDN_EXT_IID_EN = 0x2A,
};
/* Descriptor idn for Query requests */
@@ -212,6 +217,28 @@ enum unit_desc_param {
UNIT_DESC_PARAM_WB_BUF_ALLOC_UNITS = 0x29,
};
+/* RPMB Unit descriptor parameters offsets in bytes*/
+enum rpmb_unit_desc_param {
+ RPMB_UNIT_DESC_PARAM_LEN = 0x0,
+ RPMB_UNIT_DESC_PARAM_TYPE = 0x1,
+ RPMB_UNIT_DESC_PARAM_UNIT_INDEX = 0x2,
+ RPMB_UNIT_DESC_PARAM_LU_ENABLE = 0x3,
+ RPMB_UNIT_DESC_PARAM_BOOT_LUN_ID = 0x4,
+ RPMB_UNIT_DESC_PARAM_LU_WR_PROTECT = 0x5,
+ RPMB_UNIT_DESC_PARAM_LU_Q_DEPTH = 0x6,
+ RPMB_UNIT_DESC_PARAM_PSA_SENSITIVE = 0x7,
+ RPMB_UNIT_DESC_PARAM_MEM_TYPE = 0x8,
+ RPMB_UNIT_DESC_PARAM_REGION_EN = 0x9,
+ RPMB_UNIT_DESC_PARAM_LOGICAL_BLK_SIZE = 0xA,
+ RPMB_UNIT_DESC_PARAM_LOGICAL_BLK_COUNT = 0xB,
+ RPMB_UNIT_DESC_PARAM_REGION0_SIZE = 0x13,
+ RPMB_UNIT_DESC_PARAM_REGION1_SIZE = 0x14,
+ RPMB_UNIT_DESC_PARAM_REGION2_SIZE = 0x15,
+ RPMB_UNIT_DESC_PARAM_REGION3_SIZE = 0x16,
+ RPMB_UNIT_DESC_PARAM_PROVISIONING_TYPE = 0x17,
+ RPMB_UNIT_DESC_PARAM_PHY_MEM_RSRC_CNT = 0x18,
+};
+
/* Device descriptor parameters offsets in bytes*/
enum device_desc_param {
DEVICE_DESC_PARAM_LEN = 0x0,
@@ -352,6 +379,7 @@ enum {
UFS_DEV_EXT_TEMP_NOTIF = BIT(6),
UFS_DEV_HPB_SUPPORT = BIT(7),
UFS_DEV_WRITE_BOOSTER_SUP = BIT(8),
+ UFS_DEV_EXT_IID_SUP = BIT(16),
};
#define UFS_DEV_HPB_SUPPORT_VERSION 0x310
@@ -589,6 +617,8 @@ struct ufs_dev_info {
u8 *model;
u16 wspecversion;
u32 clk_gating_wait_us;
+ /* Stores the depth of queue in UFS device */
+ u8 bqueuedepth;
/* UFS HPB related flag */
bool hpb_enabled;
@@ -601,6 +631,11 @@ struct ufs_dev_info {
bool b_rpm_dev_flush_capable;
u8 b_presrv_uspc_en;
+
+ bool b_advanced_rpmb_en;
+
+ /* UFS EXT_IID Enable */
+ bool b_ext_iid_en;
};
/*
diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h
index 727084cd79be..431c3afb2ce0 100644
--- a/include/ufs/ufshcd.h
+++ b/include/ufs/ufshcd.h
@@ -16,7 +16,9 @@
#include <linux/blk-crypto-profile.h>
#include <linux/blk-mq.h>
#include <linux/devfreq.h>
+#include <linux/msi.h>
#include <linux/pm_runtime.h>
+#include <linux/dma-direction.h>
#include <scsi/scsi_device.h>
#include <ufs/unipro.h>
#include <ufs/ufs.h>
@@ -30,6 +32,7 @@ struct ufs_hba;
enum dev_cmd_type {
DEV_CMD_TYPE_NOP = 0x0,
DEV_CMD_TYPE_QUERY = 0x1,
+ DEV_CMD_TYPE_RPMB = 0x2,
};
enum ufs_event_type {
@@ -222,6 +225,7 @@ struct ufs_dev_cmd {
struct mutex lock;
struct completion *complete;
struct ufs_query query;
+ struct cq_entry *cqe;
};
/**
@@ -297,6 +301,12 @@ struct ufs_pwr_mode_info {
* @config_scaling_param: called to configure clock scaling parameters
* @program_key: program or evict an inline encryption key
* @event_notify: called to notify important events
+ * @reinit_notify: called to notify reinit of UFSHCD during max gear switch
+ * @mcq_config_resource: called to configure MCQ platform resources
+ * @get_hba_mac: called to get vendor specific mac value, mandatory for mcq mode
+ * @op_runtime_config: called to config Operation and runtime regs Pointers
+ * @get_outstanding_cqs: called to get outstanding completion queues
+ * @config_esi: called to config Event Specific Interrupt
*/
struct ufs_hba_variant_ops {
const char *name;
@@ -335,6 +345,13 @@ struct ufs_hba_variant_ops {
const union ufs_crypto_cfg_entry *cfg, int slot);
void (*event_notify)(struct ufs_hba *hba,
enum ufs_event_type evt, void *data);
+ void (*reinit_notify)(struct ufs_hba *);
+ int (*mcq_config_resource)(struct ufs_hba *hba);
+ int (*get_hba_mac)(struct ufs_hba *hba);
+ int (*op_runtime_config)(struct ufs_hba *hba);
+ int (*get_outstanding_cqs)(struct ufs_hba *hba,
+ unsigned long *ocqs);
+ int (*config_esi)(struct ufs_hba *hba);
};
/* clock gating state */
@@ -566,9 +583,9 @@ enum ufshcd_quirks {
UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING = 1 << 13,
/*
- * This quirk allows only sg entries aligned with page size.
+ * Align DMA SG entries on a 4 KiB boundary.
*/
- UFSHCD_QUIRK_ALIGN_SG_WITH_PAGE_SIZE = 1 << 14,
+ UFSHCD_QUIRK_4KB_DMA_ALIGNMENT = 1 << 14,
/*
* This quirk needs to be enabled if the host controller does not
@@ -593,6 +610,12 @@ enum ufshcd_quirks {
* auto-hibernate capability but it's FASTAUTO only.
*/
UFSHCD_QUIRK_HIBERN_FASTAUTO = 1 << 18,
+
+ /*
+ * This quirk needs to be enabled if the host controller needs
+ * to reinit the device after switching to maximum gear.
+ */
+ UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH = 1 << 19,
};
enum ufshcd_caps {
@@ -724,6 +747,51 @@ struct ufs_hba_monitor {
};
/**
+ * struct ufshcd_res_info_t - MCQ related resource regions
+ *
+ * @name: resource name
+ * @resource: pointer to resource region
+ * @base: register base address
+ */
+struct ufshcd_res_info {
+ const char *name;
+ struct resource *resource;
+ void __iomem *base;
+};
+
+enum ufshcd_res {
+ RES_UFS,
+ RES_MCQ,
+ RES_MCQ_SQD,
+ RES_MCQ_SQIS,
+ RES_MCQ_CQD,
+ RES_MCQ_CQIS,
+ RES_MCQ_VS,
+ RES_MAX,
+};
+
+/**
+ * struct ufshcd_mcq_opr_info_t - Operation and Runtime registers
+ *
+ * @offset: Doorbell Address Offset
+ * @stride: Steps proportional to queue [0...31]
+ * @base: base address
+ */
+struct ufshcd_mcq_opr_info_t {
+ unsigned long offset;
+ unsigned long stride;
+ void __iomem *base;
+};
+
+enum ufshcd_mcq_opr {
+ OPR_SQD,
+ OPR_SQIS,
+ OPR_CQD,
+ OPR_CQIS,
+ OPR_MAX,
+};
+
+/**
* struct ufs_hba - per adapter private structure
* @mmio_base: UFSHCI base register address
* @ucdl_base_addr: UFS Command Descriptor base address
@@ -747,6 +815,7 @@ struct ufs_hba_monitor {
* @outstanding_lock: Protects @outstanding_reqs.
* @outstanding_reqs: Bits representing outstanding transfer requests
* @capabilities: UFS Controller Capabilities
+ * @mcq_capabilities: UFS Multi Circular Queue capabilities
* @nutrs: Transfer Request Queue depth supported by controller
* @nutmrs: Task Management Queue depth supported by controller
* @reserved_slot: Used to submit device commands. Protected by @dev_cmd.lock.
@@ -754,6 +823,7 @@ struct ufs_hba_monitor {
* @vops: pointer to variant specific operations
* @vps: pointer to variant specific parameters
* @priv: pointer to variant specific private data
+ * @sg_entry_size: size of struct ufshcd_sg_entry (may include variant fields)
* @irq: Irq number of the controller
* @is_irq_enabled: whether or not the UFS controller interrupt is enabled.
* @dev_ref_clk_freq: reference clock frequency
@@ -829,8 +899,17 @@ struct ufs_hba_monitor {
* ee_ctrl_mask
* @luns_avail: number of regular and well known LUNs supported by the UFS
* device
+ * @nr_hw_queues: number of hardware queues configured
+ * @nr_queues: number of Queues of different queue types
* @complete_put: whether or not to call ufshcd_rpm_put() from inside
* ufshcd_resume_complete()
+ * @ext_iid_sup: is EXT_IID is supported by UFSHC
+ * @mcq_sup: is mcq supported by UFSHC
+ * @mcq_enabled: is mcq ready to accept requests
+ * @res: array of resource info of MCQ registers
+ * @mcq_base: Multi circular queue registers base address
+ * @uhq: array of supported hardware queues
+ * @dev_cmd_queue: Queue for issuing device management commands
*/
struct ufs_hba {
void __iomem *mmio_base;
@@ -872,12 +951,16 @@ struct ufs_hba {
u32 capabilities;
int nutrs;
+ u32 mcq_capabilities;
int nutmrs;
u32 reserved_slot;
u32 ufs_version;
const struct ufs_hba_variant_ops *vops;
struct ufs_hba_variant_params *vps;
void *priv;
+#ifdef CONFIG_SCSI_UFS_VARIABLE_SG_ENTRY_SIZE
+ size_t sg_entry_size;
+#endif
unsigned int irq;
bool is_irq_enabled;
enum ufs_ref_clk_freq dev_ref_clk_freq;
@@ -954,7 +1037,6 @@ struct ufs_hba {
struct mutex wb_mutex;
struct rw_semaphore clk_scaling_lock;
- unsigned char desc_size[QUERY_DESC_IDN_MAX];
atomic_t scsi_block_reqs_cnt;
struct device bsg_dev;
@@ -979,9 +1061,88 @@ struct ufs_hba {
u32 debugfs_ee_rate_limit_ms;
#endif
u32 luns_avail;
+ unsigned int nr_hw_queues;
+ unsigned int nr_queues[HCTX_MAX_TYPES];
bool complete_put;
+ bool ext_iid_sup;
+ bool scsi_host_added;
+ bool mcq_sup;
+ bool mcq_enabled;
+ struct ufshcd_res_info res[RES_MAX];
+ void __iomem *mcq_base;
+ struct ufs_hw_queue *uhq;
+ struct ufs_hw_queue *dev_cmd_queue;
+ struct ufshcd_mcq_opr_info_t mcq_opr[OPR_MAX];
+};
+
+/**
+ * struct ufs_hw_queue - per hardware queue structure
+ * @mcq_sq_head: base address of submission queue head pointer
+ * @mcq_sq_tail: base address of submission queue tail pointer
+ * @mcq_cq_head: base address of completion queue head pointer
+ * @mcq_cq_tail: base address of completion queue tail pointer
+ * @sqe_base_addr: submission queue entry base address
+ * @sqe_dma_addr: submission queue dma address
+ * @cqe_base_addr: completion queue base address
+ * @cqe_dma_addr: completion queue dma address
+ * @max_entries: max number of slots in this hardware queue
+ * @id: hardware queue ID
+ * @sq_tp_slot: current slot to which SQ tail pointer is pointing
+ * @sq_lock: serialize submission queue access
+ * @cq_tail_slot: current slot to which CQ tail pointer is pointing
+ * @cq_head_slot: current slot to which CQ head pointer is pointing
+ * @cq_lock: Synchronize between multiple polling instances
+ */
+struct ufs_hw_queue {
+ void __iomem *mcq_sq_head;
+ void __iomem *mcq_sq_tail;
+ void __iomem *mcq_cq_head;
+ void __iomem *mcq_cq_tail;
+
+ void *sqe_base_addr;
+ dma_addr_t sqe_dma_addr;
+ struct cq_entry *cqe_base_addr;
+ dma_addr_t cqe_dma_addr;
+ u32 max_entries;
+ u32 id;
+ u32 sq_tail_slot;
+ spinlock_t sq_lock;
+ u32 cq_tail_slot;
+ u32 cq_head_slot;
+ spinlock_t cq_lock;
};
+static inline bool is_mcq_enabled(struct ufs_hba *hba)
+{
+ return hba->mcq_enabled;
+}
+
+#ifdef CONFIG_SCSI_UFS_VARIABLE_SG_ENTRY_SIZE
+static inline size_t ufshcd_sg_entry_size(const struct ufs_hba *hba)
+{
+ return hba->sg_entry_size;
+}
+
+static inline void ufshcd_set_sg_entry_size(struct ufs_hba *hba, size_t sg_entry_size)
+{
+ WARN_ON_ONCE(sg_entry_size < sizeof(struct ufshcd_sg_entry));
+ hba->sg_entry_size = sg_entry_size;
+}
+#else
+static inline size_t ufshcd_sg_entry_size(const struct ufs_hba *hba)
+{
+ return sizeof(struct ufshcd_sg_entry);
+}
+
+#define ufshcd_set_sg_entry_size(hba, sg_entry_size) \
+ ({ (void)(hba); BUILD_BUG_ON(sg_entry_size != sizeof(struct ufshcd_sg_entry)); })
+#endif
+
+static inline size_t sizeof_utp_transfer_cmd_desc(const struct ufs_hba *hba)
+{
+ return sizeof(struct utp_transfer_cmd_desc) + SG_ALL * ufshcd_sg_entry_size(hba);
+}
+
/* Returns true if clocks can be gated. Otherwise false */
static inline bool ufshcd_is_clkgating_allowed(struct ufs_hba *hba)
{
@@ -1037,6 +1198,16 @@ static inline bool ufshcd_enable_wb_if_scaling_up(struct ufs_hba *hba)
return hba->caps & UFSHCD_CAP_WB_WITH_CLK_SCALING;
}
+#define ufsmcq_writel(hba, val, reg) \
+ writel((val), (hba)->mcq_base + (reg))
+#define ufsmcq_readl(hba, reg) \
+ readl((hba)->mcq_base + (reg))
+
+#define ufsmcq_writelx(hba, val, reg) \
+ writel_relaxed((val), (hba)->mcq_base + (reg))
+#define ufsmcq_readlx(hba, reg) \
+ readl_relaxed((hba)->mcq_base + (reg))
+
#define ufshcd_writel(hba, val, reg) \
writel((val), (hba)->mmio_base + (reg))
#define ufshcd_readl(hba, reg) \
@@ -1073,6 +1244,11 @@ void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk);
void ufshcd_update_evt_hist(struct ufs_hba *hba, u32 id, u32 val);
void ufshcd_hba_stop(struct ufs_hba *hba);
void ufshcd_schedule_eh_work(struct ufs_hba *hba);
+void ufshcd_mcq_write_cqis(struct ufs_hba *hba, u32 val, int i);
+unsigned long ufshcd_mcq_poll_cqe_nolock(struct ufs_hba *hba,
+ struct ufs_hw_queue *hwq);
+void ufshcd_mcq_enable_esi(struct ufs_hba *hba);
+void ufshcd_mcq_config_esi(struct ufs_hba *hba, struct msi_msg *msg);
/**
* ufshcd_set_variant - set variant specific data to the hba
@@ -1102,8 +1278,12 @@ extern int ufshcd_runtime_resume(struct device *dev);
#ifdef CONFIG_PM_SLEEP
extern int ufshcd_system_suspend(struct device *dev);
extern int ufshcd_system_resume(struct device *dev);
+extern int ufshcd_system_freeze(struct device *dev);
+extern int ufshcd_system_thaw(struct device *dev);
+extern int ufshcd_system_restore(struct device *dev);
#endif
extern int ufshcd_shutdown(struct ufs_hba *hba);
+
extern int ufshcd_dme_configure_adapt(struct ufs_hba *hba,
int agreed_gear,
int adapt_val);
@@ -1188,9 +1368,6 @@ void ufshcd_release(struct ufs_hba *hba);
void ufshcd_clkgate_delay_set(struct device *dev, unsigned long value);
-void ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id,
- int *desc_length);
-
u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba);
int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg);
@@ -1203,7 +1380,10 @@ int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
int msgcode,
u8 *desc_buff, int *buff_len,
enum query_opcode desc_op);
-
+int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *req_upiu,
+ struct utp_upiu_req *rsp_upiu, struct ufs_ehs *ehs_req,
+ struct ufs_ehs *ehs_rsp, int sg_cnt,
+ struct scatterlist *sg_list, enum dma_data_direction dir);
int ufshcd_wb_toggle(struct ufs_hba *hba, bool enable);
int ufshcd_wb_toggle_buf_flush(struct ufs_hba *hba, bool enable);
int ufshcd_suspend_prepare(struct device *dev);
diff --git a/include/ufs/ufshci.h b/include/ufs/ufshci.h
index f525566a0864..11424bb03814 100644
--- a/include/ufs/ufshci.h
+++ b/include/ufs/ufshci.h
@@ -22,6 +22,7 @@ enum {
/* UFSHCI Registers */
enum {
REG_CONTROLLER_CAPABILITIES = 0x00,
+ REG_MCQCAP = 0x04,
REG_UFS_VERSION = 0x08,
REG_CONTROLLER_DEV_ID = 0x10,
REG_CONTROLLER_PROD_ID = 0x14,
@@ -56,6 +57,10 @@ enum {
REG_UFS_CCAP = 0x100,
REG_UFS_CRYPTOCAP = 0x104,
+ REG_UFS_MEM_CFG = 0x300,
+ REG_UFS_MCQ_CFG = 0x380,
+ REG_UFS_ESILBA = 0x384,
+ REG_UFS_ESIUBA = 0x388,
UFSHCI_CRYPTO_REG_SPACE_SIZE = 0x400,
};
@@ -63,11 +68,47 @@ enum {
enum {
MASK_TRANSFER_REQUESTS_SLOTS = 0x0000001F,
MASK_TASK_MANAGEMENT_REQUEST_SLOTS = 0x00070000,
+ MASK_EHSLUTRD_SUPPORTED = 0x00400000,
MASK_AUTO_HIBERN8_SUPPORT = 0x00800000,
MASK_64_ADDRESSING_SUPPORT = 0x01000000,
MASK_OUT_OF_ORDER_DATA_DELIVERY_SUPPORT = 0x02000000,
MASK_UIC_DME_TEST_MODE_SUPPORT = 0x04000000,
MASK_CRYPTO_SUPPORT = 0x10000000,
+ MASK_MCQ_SUPPORT = 0x40000000,
+};
+
+/* MCQ capability mask */
+enum {
+ MASK_EXT_IID_SUPPORT = 0x00000400,
+};
+
+enum {
+ REG_SQATTR = 0x0,
+ REG_SQLBA = 0x4,
+ REG_SQUBA = 0x8,
+ REG_SQDAO = 0xC,
+ REG_SQISAO = 0x10,
+
+ REG_CQATTR = 0x20,
+ REG_CQLBA = 0x24,
+ REG_CQUBA = 0x28,
+ REG_CQDAO = 0x2C,
+ REG_CQISAO = 0x30,
+};
+
+enum {
+ REG_SQHP = 0x0,
+ REG_SQTP = 0x4,
+};
+
+enum {
+ REG_CQHP = 0x0,
+ REG_CQTP = 0x4,
+};
+
+enum {
+ REG_CQIS = 0x0,
+ REG_CQIE = 0x4,
};
#define UFS_MASK(mask, offset) ((mask) << (offset))
@@ -126,6 +167,7 @@ static inline u32 ufshci_version(u32 major, u32 minor)
#define CONTROLLER_FATAL_ERROR 0x10000
#define SYSTEM_BUS_FATAL_ERROR 0x20000
#define CRYPTO_ENGINE_FATAL_ERROR 0x40000
+#define MCQ_CQ_EVENT_STATUS 0x100000
#define UFSHCD_UIC_HIBERN8_MASK (UIC_HIBERNATE_ENTER |\
UIC_HIBERNATE_EXIT)
@@ -223,6 +265,9 @@ enum {
/* UTMRLRSR - UTP Task Management Request Run-Stop Register 80h */
#define UTP_TASK_REQ_LIST_RUN_STOP_BIT 0x1
+/* CQISy - CQ y Interrupt Status Register */
+#define UFSHCD_MCQ_CQIS_TAIL_ENT_PUSH_STS 0x1
+
/* UICCMD - UIC Command */
#define COMMAND_OPCODE_MASK 0xFF
#define GEN_SELECTOR_INDEX_MASK 0xFFFF
@@ -422,18 +467,23 @@ struct ufshcd_sg_entry {
__le64 addr;
__le32 reserved;
__le32 size;
+ /*
+ * followed by variant-specific fields if
+ * CONFIG_SCSI_UFS_VARIABLE_SG_ENTRY_SIZE has been defined.
+ */
};
/**
* struct utp_transfer_cmd_desc - UTP Command Descriptor (UCD)
* @command_upiu: Command UPIU Frame address
* @response_upiu: Response UPIU Frame address
- * @prd_table: Physical Region Descriptor
+ * @prd_table: Physical Region Descriptor: an array of SG_ALL struct
+ * ufshcd_sg_entry's. Variant-specific fields may be present after each.
*/
struct utp_transfer_cmd_desc {
u8 command_upiu[ALIGNED_UPIU_SIZE];
u8 response_upiu[ALIGNED_UPIU_SIZE];
- struct ufshcd_sg_entry prd_table[SG_ALL];
+ u8 prd_table[];
};
/**
@@ -478,6 +528,28 @@ struct utp_transfer_req_desc {
__le16 prd_table_offset;
};
+/* MCQ Completion Queue Entry */
+struct cq_entry {
+ /* DW 0-1 */
+ __le64 command_desc_base_addr;
+
+ /* DW 2 */
+ __le16 response_upiu_length;
+ __le16 response_upiu_offset;
+
+ /* DW 3 */
+ __le16 prd_table_length;
+ __le16 prd_table_offset;
+
+ /* DW 4 */
+ __le32 status;
+
+ /* DW 5-7 */
+ __le32 reserved[3];
+};
+
+static_assert(sizeof(struct cq_entry) == 32);
+
/*
* UTMRD structure.
*/
diff --git a/include/ufs/unipro.h b/include/ufs/unipro.h
index 6c553f98fe57..dc9dd1d23f0f 100644
--- a/include/ufs/unipro.h
+++ b/include/ufs/unipro.h
@@ -141,7 +141,6 @@
#define PA_SAVECONFIGTIME 0x15A4
#define PA_RXHSUNTERMCAP 0x15A5
#define PA_RXLSTERMCAP 0x15A6
-#define PA_GRANULARITY 0x15AA
#define PA_HIBERN8TIME 0x15A7
#define PA_LOCALVERINFO 0x15A9
#define PA_GRANULARITY 0x15AA
diff --git a/include/xen/xenbus.h b/include/xen/xenbus.h
index ad4fb4eab753..ac22cf08c09f 100644
--- a/include/xen/xenbus.h
+++ b/include/xen/xenbus.h
@@ -96,10 +96,7 @@ struct xenbus_device {
unsigned int spurious_threshold;
};
-static inline struct xenbus_device *to_xenbus_device(struct device *dev)
-{
- return container_of(dev, struct xenbus_device, dev);
-}
+#define to_xenbus_device(__dev) container_of_const(__dev, struct xenbus_device, dev)
struct xenbus_device_id
{
@@ -120,7 +117,7 @@ struct xenbus_driver {
void (*remove)(struct xenbus_device *dev);
int (*suspend)(struct xenbus_device *dev);
int (*resume)(struct xenbus_device *dev);
- int (*uevent)(struct xenbus_device *, struct kobj_uevent_env *);
+ int (*uevent)(const struct xenbus_device *, struct kobj_uevent_env *);
struct device_driver driver;
int (*read_otherend_details)(struct xenbus_device *dev);
int (*is_ready)(struct xenbus_device *dev);