summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/admin-guide/perf/alibaba_pmu.rst5
-rw-r--r--Documentation/admin-guide/sysctl/kernel.rst29
-rw-r--r--Documentation/dev-tools/kasan.rst4
-rw-r--r--Documentation/devicetree/bindings/cache/andestech,ax45mp-cache.yaml81
-rw-r--r--Documentation/devicetree/bindings/sound/fsl,easrc.yaml8
-rw-r--r--Documentation/features/debug/KASAN/arch-support.txt2
-rw-r--r--Documentation/features/debug/kcov/arch-support.txt2
-rw-r--r--Documentation/features/debug/kgdb/arch-support.txt2
-rw-r--r--Documentation/filesystems/btrfs.rst1
-rw-r--r--Documentation/gpu/amdgpu/driver-misc.rst8
-rw-r--r--Documentation/gpu/automated_testing.rst144
-rw-r--r--Documentation/gpu/index.rst1
-rw-r--r--Documentation/riscv/hwprobe.rst11
-rw-r--r--Documentation/translations/zh_CN/dev-tools/kasan.rst2
-rw-r--r--MAINTAINERS42
-rw-r--r--Makefile4
-rw-r--r--arch/arm64/include/asm/efi.h2
-rw-r--r--arch/arm64/kernel/idreg-override.c6
-rw-r--r--arch/arm64/lib/csum.c2
-rw-r--r--arch/loongarch/Kconfig26
-rw-r--r--arch/loongarch/Makefile3
-rw-r--r--arch/loongarch/configs/loongson3_defconfig74
-rw-r--r--arch/loongarch/include/asm/asm-prototypes.h1
-rw-r--r--arch/loongarch/include/asm/asmmacro.h158
-rw-r--r--arch/loongarch/include/asm/kasan.h126
-rw-r--r--arch/loongarch/include/asm/kfence.h61
-rw-r--r--arch/loongarch/include/asm/kgdb.h97
-rw-r--r--arch/loongarch/include/asm/lbt.h109
-rw-r--r--arch/loongarch/include/asm/loongarch.h47
-rw-r--r--arch/loongarch/include/asm/mmzone.h2
-rw-r--r--arch/loongarch/include/asm/page.h7
-rw-r--r--arch/loongarch/include/asm/pgalloc.h1
-rw-r--r--arch/loongarch/include/asm/pgtable.h31
-rw-r--r--arch/loongarch/include/asm/processor.h26
-rw-r--r--arch/loongarch/include/asm/setup.h8
-rw-r--r--arch/loongarch/include/asm/stackframe.h4
-rw-r--r--arch/loongarch/include/asm/string.h20
-rw-r--r--arch/loongarch/include/asm/switch_to.h2
-rw-r--r--arch/loongarch/include/asm/thread_info.h4
-rw-r--r--arch/loongarch/include/asm/xor.h68
-rw-r--r--arch/loongarch/include/asm/xor_simd.h34
-rw-r--r--arch/loongarch/include/uapi/asm/ptrace.h6
-rw-r--r--arch/loongarch/include/uapi/asm/sigcontext.h10
-rw-r--r--arch/loongarch/kernel/Makefile9
-rw-r--r--arch/loongarch/kernel/asm-offsets.c18
-rw-r--r--arch/loongarch/kernel/cpu-probe.c14
-rw-r--r--arch/loongarch/kernel/entry.S5
-rw-r--r--arch/loongarch/kernel/fpu.S14
-rw-r--r--arch/loongarch/kernel/head.S13
-rw-r--r--arch/loongarch/kernel/kfpu.c55
-rw-r--r--arch/loongarch/kernel/kgdb.c727
-rw-r--r--arch/loongarch/kernel/lbt.S155
-rw-r--r--arch/loongarch/kernel/numa.c35
-rw-r--r--arch/loongarch/kernel/process.c15
-rw-r--r--arch/loongarch/kernel/ptrace.c54
-rw-r--r--arch/loongarch/kernel/relocate.c8
-rw-r--r--arch/loongarch/kernel/setup.c4
-rw-r--r--arch/loongarch/kernel/signal.c188
-rw-r--r--arch/loongarch/kernel/stacktrace.c18
-rw-r--r--arch/loongarch/kernel/traps.c50
-rw-r--r--arch/loongarch/lib/Makefile2
-rw-r--r--arch/loongarch/lib/clear_user.S87
-rw-r--r--arch/loongarch/lib/copy_user.S161
-rw-r--r--arch/loongarch/lib/memcpy.S8
-rw-r--r--arch/loongarch/lib/memmove.S20
-rw-r--r--arch/loongarch/lib/memset.S8
-rw-r--r--arch/loongarch/lib/xor_simd.c93
-rw-r--r--arch/loongarch/lib/xor_simd.h38
-rw-r--r--arch/loongarch/lib/xor_simd_glue.c72
-rw-r--r--arch/loongarch/lib/xor_template.c110
-rw-r--r--arch/loongarch/mm/Makefile3
-rw-r--r--arch/loongarch/mm/cache.c1
-rw-r--r--arch/loongarch/mm/fault.c22
-rw-r--r--arch/loongarch/mm/init.c71
-rw-r--r--arch/loongarch/mm/kasan_init.c243
-rw-r--r--arch/loongarch/mm/mmap.c13
-rw-r--r--arch/loongarch/mm/pgtable.c12
-rw-r--r--arch/loongarch/vdso/Makefile3
-rw-r--r--arch/parisc/include/asm/cache.h1
-rw-r--r--arch/parisc/include/asm/mckinley.h8
-rw-r--r--arch/parisc/include/asm/pdc.h5
-rw-r--r--arch/parisc/include/asm/processor.h1
-rw-r--r--arch/parisc/include/asm/ropes.h7
-rw-r--r--arch/parisc/include/asm/shmparam.h15
-rw-r--r--arch/parisc/kernel/asm-offsets.c2
-rw-r--r--arch/parisc/kernel/cache.c8
-rw-r--r--arch/parisc/kernel/drivers.c2
-rw-r--r--arch/parisc/kernel/firmware.c56
-rw-r--r--arch/parisc/kernel/head.S16
-rw-r--r--arch/parisc/kernel/irq.c2
-rw-r--r--arch/parisc/kernel/processor.c2
-rw-r--r--arch/parisc/kernel/vmlinux.lds.S1
-rw-r--r--arch/parisc/mm/init.c72
-rw-r--r--arch/riscv/Kconfig28
-rw-r--r--arch/riscv/Kconfig.errata21
-rw-r--r--arch/riscv/errata/Makefile1
-rw-r--r--arch/riscv/errata/andes/Makefile1
-rw-r--r--arch/riscv/errata/andes/errata.c66
-rw-r--r--arch/riscv/errata/thead/errata.c8
-rw-r--r--arch/riscv/include/asm/alternative.h8
-rw-r--r--arch/riscv/include/asm/cpufeature.h2
-rw-r--r--arch/riscv/include/asm/dma-noncoherent.h28
-rw-r--r--arch/riscv/include/asm/efi.h2
-rw-r--r--arch/riscv/include/asm/errata_list.h5
-rw-r--r--arch/riscv/include/asm/page.h3
-rw-r--r--arch/riscv/include/asm/patch.h1
-rw-r--r--arch/riscv/include/asm/vendorid_list.h1
-rw-r--r--arch/riscv/include/uapi/asm/ptrace.h13
-rw-r--r--arch/riscv/kernel/Makefile1
-rw-r--r--arch/riscv/kernel/alternative.c24
-rw-r--r--arch/riscv/kernel/copy-unaligned.S71
-rw-r--r--arch/riscv/kernel/copy-unaligned.h13
-rw-r--r--arch/riscv/kernel/cpufeature.c104
-rw-r--r--arch/riscv/kernel/image-vars.h1
-rw-r--r--arch/riscv/kernel/patch.c114
-rw-r--r--arch/riscv/kernel/pi/Makefile2
-rw-r--r--arch/riscv/kernel/pi/cmdline_early.c13
-rw-r--r--arch/riscv/kernel/pi/fdt_early.c30
-rw-r--r--arch/riscv/kernel/ptrace.c79
-rw-r--r--arch/riscv/kernel/setup.c25
-rw-r--r--arch/riscv/kernel/smpboot.c3
-rw-r--r--arch/riscv/mm/dma-noncoherent.c103
-rw-r--r--arch/riscv/mm/init.c36
-rw-r--r--arch/riscv/mm/pmem.c13
-rw-r--r--arch/riscv/net/bpf_jit.h3
-rw-r--r--arch/riscv/net/bpf_jit_comp64.c60
-rw-r--r--arch/riscv/net/bpf_jit_core.c106
-rw-r--r--arch/sh/boards/mach-ap325rxa/setup.c2
-rw-r--r--arch/sh/boards/mach-ecovec24/setup.c6
-rw-r--r--arch/sh/boards/mach-kfr2r09/setup.c2
-rw-r--r--arch/sh/boards/mach-migor/setup.c2
-rw-r--r--arch/sh/boards/mach-se/7724/setup.c6
-rw-r--r--arch/sh/drivers/push-switch.c2
-rw-r--r--arch/x86/events/intel/uncore_snbep.c12
-rw-r--r--arch/x86/include/asm/mman.h15
-rw-r--r--arch/x86/include/uapi/asm/mman.h8
-rw-r--r--arch/x86/kernel/cpu/sgx/virt.c3
-rw-r--r--arch/x86/kernel/smpboot.c2
-rw-r--r--arch/x86/kernel/vmlinux.lds.S2
-rw-r--r--block/blk-map.c7
-rw-r--r--block/blk-throttle.c112
-rw-r--r--block/blk-throttle.h4
-rw-r--r--block/fops.c4
-rw-r--r--block/ioctl.c2
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile3
-rw-r--r--drivers/accel/ivpu/ivpu_jsm_msg.c3
-rw-r--r--drivers/ata/libata-scsi.c2
-rw-r--r--drivers/block/drbd/drbd_main.c2
-rw-r--r--drivers/block/null_blk/main.c12
-rw-r--r--drivers/cache/Kconfig11
-rw-r--r--drivers/cache/Makefile3
-rw-r--r--drivers/cache/ax45mp_cache.c213
-rw-r--r--drivers/char/agp/parisc-agp.c2
-rw-r--r--drivers/char/tpm/tpm-chip.c2
-rw-r--r--drivers/firmware/efi/libstub/Makefile4
-rw-r--r--drivers/firmware/efi/libstub/arm64-stub.c117
-rw-r--r--drivers/firmware/efi/libstub/efistub.h8
-rw-r--r--drivers/firmware/efi/libstub/kaslr.c159
-rw-r--r--drivers/firmware/efi/libstub/riscv-stub.c33
-rw-r--r--drivers/gpio/gpio-zynq.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v13_0.c40
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c24
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c9
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/Makefile1
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c68
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c47
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c118
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c21
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c9
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h2
-rw-r--r--drivers/gpu/drm/amd/include/atomfirmware.h18
-rw-r--r--drivers/gpu/drm/amd/include/discovery.h38
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c11
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_0_ppsmc.h5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h16
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c8
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c54
-rw-r--r--drivers/gpu/drm/ci/arm.config69
-rw-r--r--drivers/gpu/drm/ci/arm64.config199
-rw-r--r--drivers/gpu/drm/ci/build-igt.sh35
-rw-r--r--drivers/gpu/drm/ci/build.sh157
-rw-r--r--drivers/gpu/drm/ci/build.yml110
-rwxr-xr-xdrivers/gpu/drm/ci/check-patch.py57
-rw-r--r--drivers/gpu/drm/ci/container.yml65
-rw-r--r--drivers/gpu/drm/ci/gitlab-ci.yml251
-rwxr-xr-xdrivers/gpu/drm/ci/igt_runner.sh77
-rw-r--r--drivers/gpu/drm/ci/image-tags.yml15
-rwxr-xr-xdrivers/gpu/drm/ci/lava-submit.sh57
-rw-r--r--drivers/gpu/drm/ci/static-checks.yml12
-rw-r--r--drivers/gpu/drm/ci/test.yml335
-rw-r--r--drivers/gpu/drm/ci/testlist.txt2912
-rw-r--r--drivers/gpu/drm/ci/x86_64.config111
-rw-r--r--drivers/gpu/drm/ci/xfails/amdgpu-stoney-fails.txt19
-rw-r--r--drivers/gpu/drm/ci/xfails/amdgpu-stoney-flakes.txt21
-rw-r--r--drivers/gpu/drm/ci/xfails/amdgpu-stoney-skips.txt2
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-amly-fails.txt17
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-amly-flakes.txt32
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-amly-skips.txt4
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-apl-fails.txt58
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-apl-flakes.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-apl-skips.txt6
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-cml-fails.txt18
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-cml-flakes.txt38
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-cml-skips.txt2
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-glk-fails.txt19
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-glk-flakes.txt41
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-glk-skips.txt5
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-kbl-fails.txt25
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-kbl-flakes.txt26
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-kbl-skips.txt5
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-tgl-fails.txt37
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-tgl-flakes.txt5
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-tgl-skips.txt11
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-whl-fails.txt48
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-whl-flakes.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-whl-skips.txt2
-rw-r--r--drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt29
-rw-r--r--drivers/gpu/drm/ci/xfails/mediatek-mt8173-flakes.txt0
-rw-r--r--drivers/gpu/drm/ci/xfails/mediatek-mt8183-fails.txt10
-rw-r--r--drivers/gpu/drm/ci/xfails/mediatek-mt8183-flakes.txt14
-rw-r--r--drivers/gpu/drm/ci/xfails/meson-g12b-fails.txt12
-rw-r--r--drivers/gpu/drm/ci/xfails/meson-g12b-flakes.txt4
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt15
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-apq8016-flakes.txt4
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-apq8096-fails.txt2
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-apq8096-flakes.txt4
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-apq8096-skips.txt2
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-fails.txt25
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-flakes.txt7
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-skips.txt23
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sdm845-fails.txt68
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt11
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt2
-rw-r--r--drivers/gpu/drm/ci/xfails/rockchip-rk3288-fails.txt48
-rw-r--r--drivers/gpu/drm/ci/xfails/rockchip-rk3288-flakes.txt9
-rw-r--r--drivers/gpu/drm/ci/xfails/rockchip-rk3288-skips.txt52
-rw-r--r--drivers/gpu/drm/ci/xfails/rockchip-rk3399-fails.txt37
-rw-r--r--drivers/gpu/drm/ci/xfails/rockchip-rk3399-flakes.txt23
-rw-r--r--drivers/gpu/drm/ci/xfails/rockchip-rk3399-skips.txt5
-rw-r--r--drivers/gpu/drm/ci/xfails/virtio_gpu-none-fails.txt38
-rw-r--r--drivers/gpu/drm/ci/xfails/virtio_gpu-none-flakes.txt0
-rw-r--r--drivers/gpu/drm/ci/xfails/virtio_gpu-none-skips.txt6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_types.h1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c3
-rw-r--r--drivers/gpu/drm/i915/i915_request.c7
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dmem.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_exec.c11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c32
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.h5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c5
-rw-r--r--drivers/media/dvb-frontends/ascot2e.c2
-rw-r--r--drivers/media/dvb-frontends/atbm8830.c2
-rw-r--r--drivers/media/dvb-frontends/au8522_dig.c2
-rw-r--r--drivers/media/dvb-frontends/bcm3510.c2
-rw-r--r--drivers/media/dvb-frontends/cx22700.c2
-rw-r--r--drivers/media/dvb-frontends/cx22702.c2
-rw-r--r--drivers/media/dvb-frontends/cx24110.c2
-rw-r--r--drivers/media/dvb-frontends/cx24113.c2
-rw-r--r--drivers/media/dvb-frontends/cx24116.c2
-rw-r--r--drivers/media/dvb-frontends/cx24120.c2
-rw-r--r--drivers/media/dvb-frontends/cx24123.c2
-rw-r--r--drivers/media/dvb-frontends/cxd2820r_core.c2
-rw-r--r--drivers/media/dvb-frontends/cxd2841er.c4
-rw-r--r--drivers/media/dvb-frontends/cxd2880/cxd2880_top.c2
-rw-r--r--drivers/media/dvb-frontends/dib0070.c2
-rw-r--r--drivers/media/dvb-frontends/dib0090.c4
-rw-r--r--drivers/media/dvb-frontends/dib3000mb.c2
-rw-r--r--drivers/media/dvb-frontends/dib3000mc.c2
-rw-r--r--drivers/media/dvb-frontends/dib7000m.c2
-rw-r--r--drivers/media/dvb-frontends/dib7000p.c2
-rw-r--r--drivers/media/dvb-frontends/dib8000.c2
-rw-r--r--drivers/media/dvb-frontends/dib9000.c2
-rw-r--r--drivers/media/dvb-frontends/drx39xyj/drxj.c2
-rw-r--r--drivers/media/dvb-frontends/drxd_hard.c2
-rw-r--r--drivers/media/dvb-frontends/drxk_hard.c2
-rw-r--r--drivers/media/dvb-frontends/ds3000.c2
-rw-r--r--drivers/media/dvb-frontends/dvb-pll.c2
-rw-r--r--drivers/media/dvb-frontends/ec100.c2
-rw-r--r--drivers/media/dvb-frontends/helene.c4
-rw-r--r--drivers/media/dvb-frontends/horus3a.c2
-rw-r--r--drivers/media/dvb-frontends/isl6405.c2
-rw-r--r--drivers/media/dvb-frontends/isl6421.c2
-rw-r--r--drivers/media/dvb-frontends/isl6423.c2
-rw-r--r--drivers/media/dvb-frontends/itd1000.c2
-rw-r--r--drivers/media/dvb-frontends/ix2505v.c2
-rw-r--r--drivers/media/dvb-frontends/l64781.c2
-rw-r--r--drivers/media/dvb-frontends/lg2160.c2
-rw-r--r--drivers/media/dvb-frontends/lgdt3305.c2
-rw-r--r--drivers/media/dvb-frontends/lgdt3306a.c2
-rw-r--r--drivers/media/dvb-frontends/lgdt330x.c2
-rw-r--r--drivers/media/dvb-frontends/lgs8gxx.c2
-rw-r--r--drivers/media/dvb-frontends/lnbh25.c2
-rw-r--r--drivers/media/dvb-frontends/lnbp21.c4
-rw-r--r--drivers/media/dvb-frontends/lnbp22.c2
-rw-r--r--drivers/media/dvb-frontends/m88ds3103.c2
-rw-r--r--drivers/media/dvb-frontends/m88rs2000.c2
-rw-r--r--drivers/media/dvb-frontends/mb86a16.c2
-rw-r--r--drivers/media/dvb-frontends/mb86a20s.c2
-rw-r--r--drivers/media/dvb-frontends/mt312.c2
-rw-r--r--drivers/media/dvb-frontends/mt352.c2
-rw-r--r--drivers/media/dvb-frontends/nxt200x.c2
-rw-r--r--drivers/media/dvb-frontends/nxt6000.c2
-rw-r--r--drivers/media/dvb-frontends/or51132.c2
-rw-r--r--drivers/media/dvb-frontends/or51211.c2
-rw-r--r--drivers/media/dvb-frontends/s5h1409.c2
-rw-r--r--drivers/media/dvb-frontends/s5h1411.c2
-rw-r--r--drivers/media/dvb-frontends/s5h1420.c2
-rw-r--r--drivers/media/dvb-frontends/s5h1432.c2
-rw-r--r--drivers/media/dvb-frontends/s921.c2
-rw-r--r--drivers/media/dvb-frontends/si21xx.c2
-rw-r--r--drivers/media/dvb-frontends/sp887x.c2
-rw-r--r--drivers/media/dvb-frontends/stb0899_drv.c2
-rw-r--r--drivers/media/dvb-frontends/stb6000.c2
-rw-r--r--drivers/media/dvb-frontends/stb6100.c2
-rw-r--r--drivers/media/dvb-frontends/stv0288.c2
-rw-r--r--drivers/media/dvb-frontends/stv0297.c2
-rw-r--r--drivers/media/dvb-frontends/stv0299.c2
-rw-r--r--drivers/media/dvb-frontends/stv0367.c6
-rw-r--r--drivers/media/dvb-frontends/stv0900_core.c2
-rw-r--r--drivers/media/dvb-frontends/stv090x.c2
-rw-r--r--drivers/media/dvb-frontends/stv6110.c2
-rw-r--r--drivers/media/dvb-frontends/stv6110x.c2
-rw-r--r--drivers/media/dvb-frontends/tda10021.c2
-rw-r--r--drivers/media/dvb-frontends/tda10023.c2
-rw-r--r--drivers/media/dvb-frontends/tda10048.c2
-rw-r--r--drivers/media/dvb-frontends/tda1004x.c4
-rw-r--r--drivers/media/dvb-frontends/tda10086.c2
-rw-r--r--drivers/media/dvb-frontends/tda665x.c2
-rw-r--r--drivers/media/dvb-frontends/tda8083.c2
-rw-r--r--drivers/media/dvb-frontends/tda8261.c2
-rw-r--r--drivers/media/dvb-frontends/tda826x.c2
-rw-r--r--drivers/media/dvb-frontends/ts2020.c2
-rw-r--r--drivers/media/dvb-frontends/tua6100.c2
-rw-r--r--drivers/media/dvb-frontends/ves1820.c2
-rw-r--r--drivers/media/dvb-frontends/ves1x93.c2
-rw-r--r--drivers/media/dvb-frontends/zl10036.c2
-rw-r--r--drivers/media/dvb-frontends/zl10039.c2
-rw-r--r--drivers/media/dvb-frontends/zl10353.c2
-rw-r--r--drivers/media/pci/bt8xx/dst.c2
-rw-r--r--drivers/media/pci/bt8xx/dst_ca.c2
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-dummy-fe.c2
-rw-r--r--drivers/media/tuners/fc0011.c2
-rw-r--r--drivers/media/tuners/fc0012.c2
-rw-r--r--drivers/media/tuners/fc0013.c2
-rw-r--r--drivers/media/tuners/max2165.c2
-rw-r--r--drivers/media/tuners/mc44s803.c2
-rw-r--r--drivers/media/tuners/mt2060.c2
-rw-r--r--drivers/media/tuners/mt2131.c2
-rw-r--r--drivers/media/tuners/mt2266.c2
-rw-r--r--drivers/media/tuners/mxl5005s.c2
-rw-r--r--drivers/media/tuners/qt1010.c2
-rw-r--r--drivers/media/tuners/tda18218.c2
-rw-r--r--drivers/media/tuners/xc2028.c2
-rw-r--r--drivers/media/tuners/xc4000.c2
-rw-r--r--drivers/media/tuners/xc5000.c2
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c5
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c28
-rw-r--r--drivers/net/ethernet/renesas/rswitch.c20
-rw-r--r--drivers/net/ethernet/renesas/rswitch.h2
-rw-r--r--drivers/net/veth.c2
-rw-r--r--drivers/ntb/hw/amd/ntb_hw_amd.c11
-rw-r--r--drivers/ntb/ntb_transport.c21
-rw-r--r--drivers/ntb/test/ntb_perf.c2
-rw-r--r--drivers/ntb/test/ntb_tool.c15
-rw-r--r--drivers/parisc/ccio-dma.c18
-rw-r--r--drivers/parisc/iommu-helpers.h8
-rw-r--r--drivers/parisc/iosapic.c4
-rw-r--r--drivers/parisc/iosapic_private.h4
-rw-r--r--drivers/parisc/sba_iommu.c38
-rw-r--r--drivers/pci/Kconfig2
-rw-r--r--drivers/pci/probe.c1
-rw-r--r--drivers/pci/quirks.c2
-rw-r--r--drivers/perf/cxl_pmu.c2
-rw-r--r--drivers/platform/mellanox/Kconfig5
-rw-r--r--drivers/platform/mellanox/mlxbf-pmc.c41
-rw-r--r--drivers/platform/mellanox/mlxbf-tmfifo.c90
-rw-r--r--drivers/platform/x86/asus-nb-wmi.c9
-rw-r--r--drivers/pmdomain/Makefile (renamed from drivers/genpd/Makefile)0
-rw-r--r--drivers/pmdomain/actions/Makefile (renamed from drivers/genpd/actions/Makefile)0
-rw-r--r--drivers/pmdomain/actions/owl-sps-helper.c (renamed from drivers/genpd/actions/owl-sps-helper.c)0
-rw-r--r--drivers/pmdomain/actions/owl-sps.c (renamed from drivers/genpd/actions/owl-sps.c)0
-rw-r--r--drivers/pmdomain/amlogic/Makefile (renamed from drivers/genpd/amlogic/Makefile)0
-rw-r--r--drivers/pmdomain/amlogic/meson-ee-pwrc.c (renamed from drivers/genpd/amlogic/meson-ee-pwrc.c)0
-rw-r--r--drivers/pmdomain/amlogic/meson-gx-pwrc-vpu.c (renamed from drivers/genpd/amlogic/meson-gx-pwrc-vpu.c)0
-rw-r--r--drivers/pmdomain/amlogic/meson-secure-pwrc.c (renamed from drivers/genpd/amlogic/meson-secure-pwrc.c)0
-rw-r--r--drivers/pmdomain/apple/Makefile (renamed from drivers/genpd/apple/Makefile)0
-rw-r--r--drivers/pmdomain/apple/pmgr-pwrstate.c (renamed from drivers/genpd/apple/pmgr-pwrstate.c)0
-rw-r--r--drivers/pmdomain/bcm/Makefile (renamed from drivers/genpd/bcm/Makefile)0
-rw-r--r--drivers/pmdomain/bcm/bcm-pmb.c (renamed from drivers/genpd/bcm/bcm-pmb.c)0
-rw-r--r--drivers/pmdomain/bcm/bcm2835-power.c (renamed from drivers/genpd/bcm/bcm2835-power.c)0
-rw-r--r--drivers/pmdomain/bcm/bcm63xx-power.c (renamed from drivers/genpd/bcm/bcm63xx-power.c)0
-rw-r--r--drivers/pmdomain/bcm/raspberrypi-power.c (renamed from drivers/genpd/bcm/raspberrypi-power.c)0
-rw-r--r--drivers/pmdomain/imx/Makefile (renamed from drivers/genpd/imx/Makefile)0
-rw-r--r--drivers/pmdomain/imx/gpc.c (renamed from drivers/genpd/imx/gpc.c)0
-rw-r--r--drivers/pmdomain/imx/gpcv2.c (renamed from drivers/genpd/imx/gpcv2.c)0
-rw-r--r--drivers/pmdomain/imx/imx8m-blk-ctrl.c (renamed from drivers/genpd/imx/imx8m-blk-ctrl.c)0
-rw-r--r--drivers/pmdomain/imx/imx8mp-blk-ctrl.c (renamed from drivers/genpd/imx/imx8mp-blk-ctrl.c)0
-rw-r--r--drivers/pmdomain/imx/imx93-blk-ctrl.c (renamed from drivers/genpd/imx/imx93-blk-ctrl.c)0
-rw-r--r--drivers/pmdomain/imx/imx93-pd.c (renamed from drivers/genpd/imx/imx93-pd.c)0
-rw-r--r--drivers/pmdomain/imx/scu-pd.c (renamed from drivers/genpd/imx/scu-pd.c)0
-rw-r--r--drivers/pmdomain/mediatek/Makefile (renamed from drivers/genpd/mediatek/Makefile)0
-rw-r--r--drivers/pmdomain/mediatek/mt6795-pm-domains.h (renamed from drivers/genpd/mediatek/mt6795-pm-domains.h)0
-rw-r--r--drivers/pmdomain/mediatek/mt8167-pm-domains.h (renamed from drivers/genpd/mediatek/mt8167-pm-domains.h)0
-rw-r--r--drivers/pmdomain/mediatek/mt8173-pm-domains.h (renamed from drivers/genpd/mediatek/mt8173-pm-domains.h)0
-rw-r--r--drivers/pmdomain/mediatek/mt8183-pm-domains.h (renamed from drivers/genpd/mediatek/mt8183-pm-domains.h)0
-rw-r--r--drivers/pmdomain/mediatek/mt8186-pm-domains.h (renamed from drivers/genpd/mediatek/mt8186-pm-domains.h)0
-rw-r--r--drivers/pmdomain/mediatek/mt8188-pm-domains.h (renamed from drivers/genpd/mediatek/mt8188-pm-domains.h)0
-rw-r--r--drivers/pmdomain/mediatek/mt8192-pm-domains.h (renamed from drivers/genpd/mediatek/mt8192-pm-domains.h)0
-rw-r--r--drivers/pmdomain/mediatek/mt8195-pm-domains.h (renamed from drivers/genpd/mediatek/mt8195-pm-domains.h)0
-rw-r--r--drivers/pmdomain/mediatek/mtk-pm-domains.c (renamed from drivers/genpd/mediatek/mtk-pm-domains.c)0
-rw-r--r--drivers/pmdomain/mediatek/mtk-pm-domains.h (renamed from drivers/genpd/mediatek/mtk-pm-domains.h)0
-rw-r--r--drivers/pmdomain/mediatek/mtk-scpsys.c (renamed from drivers/genpd/mediatek/mtk-scpsys.c)0
-rw-r--r--drivers/pmdomain/qcom/Makefile (renamed from drivers/genpd/qcom/Makefile)0
-rw-r--r--drivers/pmdomain/qcom/cpr.c (renamed from drivers/genpd/qcom/cpr.c)0
-rw-r--r--drivers/pmdomain/qcom/rpmhpd.c (renamed from drivers/genpd/qcom/rpmhpd.c)0
-rw-r--r--drivers/pmdomain/qcom/rpmpd.c (renamed from drivers/genpd/qcom/rpmpd.c)0
-rw-r--r--drivers/pmdomain/renesas/Makefile (renamed from drivers/genpd/renesas/Makefile)0
-rw-r--r--drivers/pmdomain/renesas/r8a7742-sysc.c (renamed from drivers/genpd/renesas/r8a7742-sysc.c)0
-rw-r--r--drivers/pmdomain/renesas/r8a7743-sysc.c (renamed from drivers/genpd/renesas/r8a7743-sysc.c)0
-rw-r--r--drivers/pmdomain/renesas/r8a7745-sysc.c (renamed from drivers/genpd/renesas/r8a7745-sysc.c)0
-rw-r--r--drivers/pmdomain/renesas/r8a77470-sysc.c (renamed from drivers/genpd/renesas/r8a77470-sysc.c)0
-rw-r--r--drivers/pmdomain/renesas/r8a774a1-sysc.c (renamed from drivers/genpd/renesas/r8a774a1-sysc.c)0
-rw-r--r--drivers/pmdomain/renesas/r8a774b1-sysc.c (renamed from drivers/genpd/renesas/r8a774b1-sysc.c)0
-rw-r--r--drivers/pmdomain/renesas/r8a774c0-sysc.c (renamed from drivers/genpd/renesas/r8a774c0-sysc.c)0
-rw-r--r--drivers/pmdomain/renesas/r8a774e1-sysc.c (renamed from drivers/genpd/renesas/r8a774e1-sysc.c)0
-rw-r--r--drivers/pmdomain/renesas/r8a7779-sysc.c (renamed from drivers/genpd/renesas/r8a7779-sysc.c)0
-rw-r--r--drivers/pmdomain/renesas/r8a7790-sysc.c (renamed from drivers/genpd/renesas/r8a7790-sysc.c)0
-rw-r--r--drivers/pmdomain/renesas/r8a7791-sysc.c (renamed from drivers/genpd/renesas/r8a7791-sysc.c)0
-rw-r--r--drivers/pmdomain/renesas/r8a7792-sysc.c (renamed from drivers/genpd/renesas/r8a7792-sysc.c)0
-rw-r--r--drivers/pmdomain/renesas/r8a7794-sysc.c (renamed from drivers/genpd/renesas/r8a7794-sysc.c)0
-rw-r--r--drivers/pmdomain/renesas/r8a7795-sysc.c (renamed from drivers/genpd/renesas/r8a7795-sysc.c)0
-rw-r--r--drivers/pmdomain/renesas/r8a7796-sysc.c (renamed from drivers/genpd/renesas/r8a7796-sysc.c)0
-rw-r--r--drivers/pmdomain/renesas/r8a77965-sysc.c (renamed from drivers/genpd/renesas/r8a77965-sysc.c)0
-rw-r--r--drivers/pmdomain/renesas/r8a77970-sysc.c (renamed from drivers/genpd/renesas/r8a77970-sysc.c)0
-rw-r--r--drivers/pmdomain/renesas/r8a77980-sysc.c (renamed from drivers/genpd/renesas/r8a77980-sysc.c)0
-rw-r--r--drivers/pmdomain/renesas/r8a77990-sysc.c (renamed from drivers/genpd/renesas/r8a77990-sysc.c)0
-rw-r--r--drivers/pmdomain/renesas/r8a77995-sysc.c (renamed from drivers/genpd/renesas/r8a77995-sysc.c)0
-rw-r--r--drivers/pmdomain/renesas/r8a779a0-sysc.c (renamed from drivers/genpd/renesas/r8a779a0-sysc.c)0
-rw-r--r--drivers/pmdomain/renesas/r8a779f0-sysc.c (renamed from drivers/genpd/renesas/r8a779f0-sysc.c)0
-rw-r--r--drivers/pmdomain/renesas/r8a779g0-sysc.c (renamed from drivers/genpd/renesas/r8a779g0-sysc.c)0
-rw-r--r--drivers/pmdomain/renesas/rcar-gen4-sysc.c (renamed from drivers/genpd/renesas/rcar-gen4-sysc.c)0
-rw-r--r--drivers/pmdomain/renesas/rcar-gen4-sysc.h (renamed from drivers/genpd/renesas/rcar-gen4-sysc.h)0
-rw-r--r--drivers/pmdomain/renesas/rcar-sysc.c (renamed from drivers/genpd/renesas/rcar-sysc.c)0
-rw-r--r--drivers/pmdomain/renesas/rcar-sysc.h (renamed from drivers/genpd/renesas/rcar-sysc.h)0
-rw-r--r--drivers/pmdomain/renesas/rmobile-sysc.c (renamed from drivers/genpd/renesas/rmobile-sysc.c)0
-rw-r--r--drivers/pmdomain/rockchip/Makefile (renamed from drivers/genpd/rockchip/Makefile)0
-rw-r--r--drivers/pmdomain/rockchip/pm-domains.c (renamed from drivers/genpd/rockchip/pm-domains.c)0
-rw-r--r--drivers/pmdomain/samsung/Makefile (renamed from drivers/genpd/samsung/Makefile)0
-rw-r--r--drivers/pmdomain/samsung/exynos-pm-domains.c (renamed from drivers/genpd/samsung/exynos-pm-domains.c)0
-rw-r--r--drivers/pmdomain/st/Makefile (renamed from drivers/genpd/st/Makefile)0
-rw-r--r--drivers/pmdomain/st/ste-ux500-pm-domain.c (renamed from drivers/genpd/st/ste-ux500-pm-domain.c)0
-rw-r--r--drivers/pmdomain/starfive/Makefile (renamed from drivers/genpd/starfive/Makefile)0
-rw-r--r--drivers/pmdomain/starfive/jh71xx-pmu.c (renamed from drivers/genpd/starfive/jh71xx-pmu.c)0
-rw-r--r--drivers/pmdomain/sunxi/Makefile (renamed from drivers/genpd/sunxi/Makefile)0
-rw-r--r--drivers/pmdomain/sunxi/sun20i-ppu.c (renamed from drivers/genpd/sunxi/sun20i-ppu.c)0
-rw-r--r--drivers/pmdomain/tegra/Makefile (renamed from drivers/genpd/tegra/Makefile)0
-rw-r--r--drivers/pmdomain/tegra/powergate-bpmp.c (renamed from drivers/genpd/tegra/powergate-bpmp.c)0
-rw-r--r--drivers/pmdomain/ti/Makefile (renamed from drivers/genpd/ti/Makefile)0
-rw-r--r--drivers/pmdomain/ti/omap_prm.c (renamed from drivers/genpd/ti/omap_prm.c)0
-rw-r--r--drivers/pmdomain/ti/ti_sci_pm_domains.c (renamed from drivers/genpd/ti/ti_sci_pm_domains.c)0
-rw-r--r--drivers/pmdomain/xilinx/Makefile (renamed from drivers/genpd/xilinx/Makefile)0
-rw-r--r--drivers/pmdomain/xilinx/zynqmp-pm-domains.c (renamed from drivers/genpd/xilinx/zynqmp-pm-domains.c)0
-rw-r--r--drivers/power/supply/power_supply_core.c4
-rw-r--r--drivers/powercap/intel_rapl_common.c4
-rw-r--r--drivers/s390/block/dasd_devmap.c6
-rw-r--r--drivers/s390/block/dasd_eckd.c10
-rw-r--r--drivers/s390/block/dasd_int.h4
-rw-r--r--drivers/scsi/aacraid/commsup.c2
-rw-r--r--drivers/scsi/fnic/fnic.h3
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c53
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c41
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2.h2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c50
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h3
-rw-r--r--drivers/scsi/mvumi.c2
-rw-r--r--drivers/scsi/qedf/qedf.h1
-rw-r--r--drivers/scsi/qedi/qedi_gbl.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h57
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c10
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h14
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c36
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h57
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c321
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c164
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c402
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.h17
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.h4
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c75
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h6
-rw-r--r--drivers/scsi/scsi_debugfs.c26
-rw-r--r--drivers/scsi/scsi_lib.c2
-rw-r--r--drivers/scsi/scsi_priv.h1
-rw-r--r--drivers/scsi/scsi_scan.c4
-rw-r--r--drivers/scsi/scsi_sysfs.c4
-rw-r--r--drivers/scsi/sd.c66
-rw-r--r--drivers/scsi/smartpqi/smartpqi.h16
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c256
-rw-r--r--drivers/scsi/st.c2
-rw-r--r--drivers/scsi/storvsc_drv.c2
-rw-r--r--drivers/scsi/virtio_scsi.c2
-rw-r--r--drivers/scsi/xen-scsifront.c6
-rw-r--r--drivers/soc/renesas/Kconfig5
-rw-r--r--drivers/staging/media/av7110/sp8870.c2
-rw-r--r--drivers/thermal/armada_thermal.c5
-rw-r--r--drivers/thermal/dove_thermal.c4
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3400_thermal.c6
-rw-r--r--drivers/thermal/kirkwood_thermal.c4
-rw-r--r--drivers/thermal/spear_thermal.c4
-rw-r--r--drivers/thermal/thermal_core.c16
-rw-r--r--drivers/ufs/core/ufs_bsg.c3
-rw-r--r--drivers/ufs/core/ufshcd.c14
-rw-r--r--fs/btrfs/Kconfig2
-rw-r--r--fs/btrfs/block-group.c12
-rw-r--r--fs/btrfs/delayed-inode.c104
-rw-r--r--fs/btrfs/disk-io.c22
-rw-r--r--fs/btrfs/ioctl.c8
-rw-r--r--fs/btrfs/locking.h2
-rw-r--r--fs/btrfs/ordered-data.c2
-rw-r--r--fs/btrfs/transaction.c39
-rw-r--r--fs/btrfs/transaction.h1
-rw-r--r--fs/nls/Kconfig7
-rw-r--r--fs/overlayfs/copy_up.c3
-rw-r--r--fs/overlayfs/file.c9
-rw-r--r--fs/smb/client/cached_dir.c11
-rw-r--r--fs/smb/client/cached_dir.h2
-rw-r--r--fs/smb/client/cifsfs.c12
-rw-r--r--fs/smb/client/cifsfs.h4
-rw-r--r--fs/smb/client/cifsglob.h2
-rw-r--r--fs/smb/client/connect.c1
-rw-r--r--fs/smb/client/fs_context.c11
-rw-r--r--fs/smb/client/fs_context.h4
-rw-r--r--fs/smb/client/fscache.c2
-rw-r--r--fs/smb/client/smb2ops.c1
-rw-r--r--fs/smb/client/trace.h2
-rw-r--r--fs/smb/common/smb2pdu.h2
-rw-r--r--fs/smb/server/Kconfig2
-rw-r--r--fs/smb/server/server.c2
-rw-r--r--fs/tracefs/event_inode.c59
-rw-r--r--fs/tracefs/inode.c5
-rw-r--r--fs/tracefs/internal.h5
-rw-r--r--include/linux/export-internal.h2
-rw-r--r--include/linux/kasan.h2
-rw-r--r--include/linux/nvme-fc-driver.h6
-rw-r--r--include/linux/oid_registry.h1
-rw-r--r--include/linux/raid/pq.h4
-rw-r--r--include/linux/thermal.h51
-rw-r--r--include/linux/trace_events.h7
-rw-r--r--include/linux/xarray.h18
-rw-r--r--include/net/ipv6.h7
-rw-r--r--include/scsi/scsi_host.h2
-rw-r--r--include/sound/dmaengine_pcm.h2
-rw-r--r--include/sound/soc-component.h4
-rw-r--r--include/uapi/linux/elf.h2
-rw-r--r--include/uapi/scsi/scsi_bsg_ufs.h2
-rw-r--r--io_uring/fdinfo.c2
-rw-r--r--io_uring/io-wq.c10
-rw-r--r--io_uring/io-wq.h1
-rw-r--r--io_uring/io_uring.c56
-rw-r--r--io_uring/sqpoll.c4
-rw-r--r--kernel/bpf/core.c8
-rw-r--r--kernel/dma/Kconfig2
-rw-r--r--kernel/dma/contiguous.c5
-rw-r--r--kernel/dma/debug.c20
-rw-r--r--kernel/dma/pool.c4
-rw-r--r--kernel/printk/printk.c2
-rw-r--r--kernel/trace/ring_buffer.c7
-rw-r--r--kernel/trace/trace.c72
-rw-r--r--kernel/trace/trace.h2
-rw-r--r--kernel/trace/trace_events.c32
-rw-r--r--kernel/trace/trace_events_inject.c3
-rw-r--r--kernel/trace/trace_events_synth.c2
-rw-r--r--lib/Kconfig.debug11
-rw-r--r--lib/Makefile1
-rw-r--r--lib/idr.c2
-rw-r--r--lib/iov_iter.c30
-rw-r--r--lib/kunit/executor.c48
-rw-r--r--lib/kunit/executor_test.c13
-rw-r--r--lib/kunit/test.c3
-rw-r--r--lib/kunit_iov_iter.c777
-rw-r--r--lib/raid6/Makefile1
-rw-r--r--lib/raid6/algos.c16
-rw-r--r--lib/raid6/loongarch.h38
-rw-r--r--lib/raid6/loongarch_simd.c422
-rw-r--r--lib/raid6/recov_loongarch_simd.c513
-rw-r--r--lib/raid6/test/Makefile12
-rw-r--r--lib/xarray.c8
-rw-r--r--mm/kasan/init.c18
-rw-r--r--mm/kasan/kasan.h6
-rw-r--r--mm/kfence/core.c5
-rw-r--r--net/ipv4/inet_hashtables.c36
-rw-r--r--net/kcm/kcmsock.c15
-rw-r--r--net/tls/tls_sw.c4
-rwxr-xr-xscripts/headers_install.sh1
-rw-r--r--scripts/mod/modpost.c9
-rw-r--r--security/landlock/ruleset.h2
-rw-r--r--sound/core/pcm_lib.c8
-rw-r--r--sound/core/seq/seq_memory.c9
-rw-r--r--sound/isa/sb/emu8000_pcm.c2
-rw-r--r--sound/pci/hda/patch_cs8409.c2
-rw-r--r--sound/pci/hda/patch_cs8409.h1
-rw-r--r--sound/pci/hda/patch_realtek.c30
-rw-r--r--sound/pci/hda/tas2781_hda_i2c.c16
-rw-r--r--sound/soc/amd/yc/acp6x-mach.c14
-rw-r--r--sound/soc/atmel/mchp-pdmc.c2
-rw-r--r--sound/soc/codecs/Kconfig6
-rw-r--r--sound/soc/codecs/Makefile8
-rw-r--r--sound/soc/codecs/cs35l45.c11
-rw-r--r--sound/soc/codecs/cs35l56-shared.c29
-rw-r--r--sound/soc/codecs/cs42l43.c3
-rw-r--r--sound/soc/codecs/rt5645.c16
-rw-r--r--sound/soc/codecs/wcd-clsh-v2.c8
-rw-r--r--sound/soc/intel/avs/pcm.c22
-rw-r--r--sound/soc/soc-component.c4
-rw-r--r--sound/soc/soc-generic-dmaengine-pcm.c10
-rw-r--r--sound/soc/stm/stm32_sai_sub.c2
-rw-r--r--sound/usb/midi2.c7
-rw-r--r--tools/build/Makefile.build10
-rw-r--r--tools/build/feature/Makefile10
-rw-r--r--tools/build/feature/test-clang.cpp28
-rw-r--r--tools/build/feature/test-cxx.cpp16
-rw-r--r--tools/build/feature/test-llvm-version.cpp12
-rw-r--r--tools/build/feature/test-llvm.cpp14
-rw-r--r--tools/lib/perf/include/perf/event.h14
-rw-r--r--tools/perf/Documentation/perf-bench.txt3
-rw-r--r--tools/perf/Documentation/perf-config.txt33
-rw-r--r--tools/perf/Documentation/perf-dlfilter.txt22
-rw-r--r--tools/perf/Documentation/perf-ftrace.txt16
-rw-r--r--tools/perf/Documentation/perf-record.txt95
-rw-r--r--tools/perf/Documentation/perf.data-file-format.txt2
-rw-r--r--tools/perf/Makefile.config59
-rw-r--r--tools/perf/Makefile.perf36
-rw-r--r--tools/perf/arch/arm/include/perf_regs.h3
-rw-r--r--tools/perf/arch/arm/util/cs-etm.c12
-rw-r--r--tools/perf/arch/arm/util/perf_regs.c11
-rw-r--r--tools/perf/arch/arm/util/unwind-libdw.c1
-rw-r--r--tools/perf/arch/arm64/include/arch-tests.h3
-rw-r--r--tools/perf/arch/arm64/include/perf_regs.h3
-rw-r--r--tools/perf/arch/arm64/tests/Build1
-rw-r--r--tools/perf/arch/arm64/tests/arch-tests.c4
-rw-r--r--tools/perf/arch/arm64/tests/cpuid-match.c37
-rw-r--r--tools/perf/arch/arm64/util/arm-spe.c2
-rw-r--r--tools/perf/arch/arm64/util/header.c67
-rw-r--r--tools/perf/arch/arm64/util/machine.c1
-rw-r--r--tools/perf/arch/arm64/util/mem-events.c2
-rw-r--r--tools/perf/arch/arm64/util/perf_regs.c6
-rw-r--r--tools/perf/arch/arm64/util/pmu.c18
-rw-r--r--tools/perf/arch/arm64/util/unwind-libdw.c1
-rw-r--r--tools/perf/arch/csky/include/perf_regs.h3
-rw-r--r--tools/perf/arch/csky/util/perf_regs.c11
-rw-r--r--tools/perf/arch/csky/util/unwind-libdw.c1
-rw-r--r--tools/perf/arch/loongarch/include/perf_regs.h2
-rw-r--r--tools/perf/arch/loongarch/util/perf_regs.c11
-rw-r--r--tools/perf/arch/loongarch/util/unwind-libdw.c1
-rw-r--r--tools/perf/arch/mips/include/perf_regs.h2
-rw-r--r--tools/perf/arch/mips/util/perf_regs.c11
-rw-r--r--tools/perf/arch/powerpc/include/perf_regs.h3
-rw-r--r--tools/perf/arch/powerpc/util/mem-events.c6
-rw-r--r--tools/perf/arch/powerpc/util/perf_regs.c6
-rw-r--r--tools/perf/arch/powerpc/util/unwind-libdw.c1
-rw-r--r--tools/perf/arch/riscv/include/perf_regs.h3
-rw-r--r--tools/perf/arch/riscv/util/perf_regs.c11
-rw-r--r--tools/perf/arch/riscv/util/unwind-libdw.c1
-rw-r--r--tools/perf/arch/s390/include/perf_regs.h3
-rw-r--r--tools/perf/arch/s390/util/perf_regs.c11
-rw-r--r--tools/perf/arch/s390/util/unwind-libdw.c1
-rwxr-xr-xtools/perf/arch/x86/entry/syscalls/syscalltbl.sh2
-rw-r--r--tools/perf/arch/x86/include/perf_regs.h2
-rw-r--r--tools/perf/arch/x86/util/evlist.c7
-rw-r--r--tools/perf/arch/x86/util/evsel.c7
-rw-r--r--tools/perf/arch/x86/util/intel-pt.c39
-rw-r--r--tools/perf/arch/x86/util/mem-events.c8
-rw-r--r--tools/perf/arch/x86/util/perf_regs.c6
-rw-r--r--tools/perf/arch/x86/util/pmu.c10
-rw-r--r--tools/perf/arch/x86/util/unwind-libdw.c1
-rw-r--r--tools/perf/bench/Build1
-rw-r--r--tools/perf/bench/bench.h3
-rw-r--r--tools/perf/bench/breakpoint.c24
-rw-r--r--tools/perf/bench/pmu-scan.c8
-rw-r--r--tools/perf/bench/uprobe.c198
-rw-r--r--tools/perf/builtin-bench.c8
-rw-r--r--tools/perf/builtin-diff.c4
-rw-r--r--tools/perf/builtin-list.c23
-rw-r--r--tools/perf/builtin-lock.c3
-rw-r--r--tools/perf/builtin-record.c45
-rw-r--r--tools/perf/builtin-script.c22
-rw-r--r--tools/perf/builtin-top.c1
-rw-r--r--tools/perf/builtin-trace.c338
-rwxr-xr-xtools/perf/check-headers.sh6
-rw-r--r--tools/perf/dlfilters/dlfilter-test-api-v0.c26
-rw-r--r--tools/perf/dlfilters/dlfilter-test-api-v2.c377
-rw-r--r--tools/perf/examples/bpf/5sec.c53
-rw-r--r--tools/perf/examples/bpf/empty.c12
-rw-r--r--tools/perf/examples/bpf/hello.c27
-rw-r--r--tools/perf/examples/bpf/sys_enter_openat.c33
-rw-r--r--tools/perf/include/perf/perf_dlfilter.h11
-rw-r--r--tools/perf/perf.c2
-rw-r--r--tools/perf/pmu-events/Build6
-rw-r--r--tools/perf/pmu-events/arch/arm64/ampere/ampereone/cache.json3
-rw-r--r--tools/perf/pmu-events/arch/arm64/ampere/ampereone/core-imp-def.json120
-rw-r--r--tools/perf/pmu-events/arch/arm64/ampere/ampereone/metrics.json362
-rw-r--r--tools/perf/pmu-events/arch/arm64/ampere/ampereone/pipeline.json12
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/branch.json8
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/bus.json18
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/cache.json155
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/exception.json45
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/fp_operation.json22
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/general.json10
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/instruction.json143
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/l1d_cache.json54
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/l1i_cache.json14
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/l2_cache.json50
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/l3_cache.json22
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/ll_cache.json10
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/memory.json39
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/metrics.json365
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/pipeline.json23
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/retired.json30
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/spe.json12
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/spec_operation.json110
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/stall.json30
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/sve.json50
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/tlb.json66
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/trace.json27
-rw-r--r--tools/perf/pmu-events/arch/arm64/freescale/yitian710/sys/ali_drw.json373
-rw-r--r--tools/perf/pmu-events/arch/arm64/freescale/yitian710/sys/metrics.json20
-rw-r--r--tools/perf/pmu-events/arch/arm64/sbsa.json24
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power10/cache.json47
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power10/floating_point.json66
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power10/frontend.json197
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power10/marked.json224
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power10/memory.json93
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power10/metrics.json89
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power10/others.json210
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power10/pipeline.json292
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power10/pmc.json198
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power10/translation.json43
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlake/adl-metrics.json11
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlaken/adln-metrics.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/icelake/icl-metrics.json10
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/icx-metrics.json10
-rw-r--r--tools/perf/pmu-events/arch/x86/mapfile.csv4
-rw-r--r--tools/perf/pmu-events/arch/x86/meteorlake/cache.json165
-rw-r--r--tools/perf/pmu-events/arch/x86/meteorlake/floating-point.json8
-rw-r--r--tools/perf/pmu-events/arch/x86/meteorlake/frontend.json56
-rw-r--r--tools/perf/pmu-events/arch/x86/meteorlake/memory.json80
-rw-r--r--tools/perf/pmu-events/arch/x86/meteorlake/other.json16
-rw-r--r--tools/perf/pmu-events/arch/x86/meteorlake/pipeline.json159
-rw-r--r--tools/perf/pmu-events/arch/x86/rocketlake/rkl-metrics.json10
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/other.json18
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/spr-metrics.json9
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/tigerlake/tgl-metrics.json10
-rw-r--r--tools/perf/pmu-events/empty-pmu-events.c49
-rwxr-xr-xtools/perf/pmu-events/jevents.py330
-rw-r--r--tools/perf/pmu-events/metric.py17
-rw-r--r--tools/perf/pmu-events/pmu-events.h15
-rw-r--r--tools/perf/scripts/python/Perf-Trace-Util/Build3
-rw-r--r--tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py7
-rw-r--r--tools/perf/scripts/python/bin/gecko-record2
-rwxr-xr-xtools/perf/scripts/python/bin/gecko-report7
-rw-r--r--tools/perf/scripts/python/gecko.py395
-rw-r--r--tools/perf/tests/.gitignore5
-rw-r--r--tools/perf/tests/Build31
-rw-r--r--tools/perf/tests/bpf-script-example.c60
-rw-r--r--tools/perf/tests/bpf-script-test-kbuild.c21
-rw-r--r--tools/perf/tests/bpf-script-test-prologue.c49
-rw-r--r--tools/perf/tests/bpf-script-test-relocation.c51
-rw-r--r--tools/perf/tests/bpf.c389
-rw-r--r--tools/perf/tests/builtin-test.c14
-rw-r--r--tools/perf/tests/clang.c32
-rw-r--r--tools/perf/tests/config-fragments/README7
-rw-r--r--tools/perf/tests/config-fragments/arm641
-rw-r--r--tools/perf/tests/config-fragments/config11
-rw-r--r--tools/perf/tests/dlfilter-test.c38
-rw-r--r--tools/perf/tests/expr.c5
-rw-r--r--tools/perf/tests/llvm.c219
-rw-r--r--tools/perf/tests/llvm.h31
-rw-r--r--tools/perf/tests/make1
-rw-r--r--tools/perf/tests/parse-events.c4
-rw-r--r--tools/perf/tests/pmu-events.c208
-rw-r--r--tools/perf/tests/pmu.c94
-rwxr-xr-xtools/perf/tests/shell/coresight/asm_pure_loop.sh2
-rwxr-xr-xtools/perf/tests/shell/coresight/memcpy_thread_16k_10.sh2
-rwxr-xr-xtools/perf/tests/shell/coresight/thread_loop_check_tid_10.sh2
-rwxr-xr-xtools/perf/tests/shell/coresight/thread_loop_check_tid_2.sh2
-rwxr-xr-xtools/perf/tests/shell/coresight/unroll_loop_thread_10.sh2
-rw-r--r--tools/perf/tests/shell/lib/probe.sh1
-rw-r--r--tools/perf/tests/shell/lib/probe_vfs_getname.sh5
-rw-r--r--tools/perf/tests/shell/lib/stat_output.sh1
-rw-r--r--tools/perf/tests/shell/lib/waiting.sh1
-rwxr-xr-xtools/perf/tests/shell/lock_contention.sh12
-rwxr-xr-xtools/perf/tests/shell/probe_vfs_getname.sh4
-rwxr-xr-xtools/perf/tests/shell/record+zstd_comp_decomp.sh14
-rwxr-xr-xtools/perf/tests/shell/record_bpf_filter.sh134
-rwxr-xr-xtools/perf/tests/shell/record_offcpu.sh6
-rwxr-xr-xtools/perf/tests/shell/stat+csv_output.sh2
-rwxr-xr-xtools/perf/tests/shell/stat+csv_summary.sh4
-rwxr-xr-xtools/perf/tests/shell/stat+shadow_stat.sh4
-rwxr-xr-xtools/perf/tests/shell/stat+std_output.sh3
-rwxr-xr-xtools/perf/tests/shell/stat_bpf_counters.sh8
-rwxr-xr-xtools/perf/tests/shell/stat_bpf_counters_cgrp.sh28
-rwxr-xr-xtools/perf/tests/shell/test_arm_spe_fork.sh2
-rwxr-xr-xtools/perf/tests/shell/test_perf_data_converter_json.sh2
-rwxr-xr-xtools/perf/tests/shell/test_task_analyzer.sh2
-rwxr-xr-xtools/perf/tests/shell/trace+probe_vfs_getname.sh8
-rw-r--r--tools/perf/tests/stat.c2
-rw-r--r--tools/perf/tests/tests.h2
-rwxr-xr-xtools/perf/trace/beauty/arch_errno_names.sh15
-rw-r--r--tools/perf/trace/beauty/beauty.h15
-rwxr-xr-xtools/perf/trace/beauty/mmap_flags.sh7
-rwxr-xr-xtools/perf/trace/beauty/mmap_prot.sh5
-rwxr-xr-xtools/perf/trace/beauty/x86_arch_prctl.sh6
-rw-r--r--tools/perf/ui/Build2
-rw-r--r--tools/perf/ui/browser.c6
-rw-r--r--tools/perf/ui/browsers/Build5
-rw-r--r--tools/perf/ui/browsers/hists.c60
-rw-r--r--tools/perf/ui/libslang.h20
-rw-r--r--tools/perf/ui/tui/helpline.c2
-rw-r--r--tools/perf/ui/tui/setup.c2
-rw-r--r--tools/perf/ui/tui/util.c12
-rw-r--r--tools/perf/util/Build72
-rw-r--r--tools/perf/util/amd-sample-raw.c1
-rw-r--r--tools/perf/util/annotate.c10
-rw-r--r--tools/perf/util/bpf-filter.c14
-rw-r--r--tools/perf/util/bpf-filter.y2
-rw-r--r--tools/perf/util/bpf-loader.c2110
-rw-r--r--tools/perf/util/bpf-loader.h216
-rw-r--r--tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c (renamed from tools/perf/examples/bpf/augmented_raw_syscalls.c)53
-rw-r--r--tools/perf/util/bpf_skel/bench_uprobe.bpf.c23
-rw-r--r--tools/perf/util/build-id.c6
-rw-r--r--tools/perf/util/c++/Build2
-rw-r--r--tools/perf/util/c++/clang-c.h43
-rw-r--r--tools/perf/util/c++/clang-test.cpp67
-rw-r--r--tools/perf/util/c++/clang.cpp225
-rw-r--r--tools/perf/util/c++/clang.h27
-rw-r--r--tools/perf/util/config.c4
-rw-r--r--tools/perf/util/cs-etm.c14
-rw-r--r--tools/perf/util/dlfilter.c32
-rw-r--r--tools/perf/util/env.c8
-rw-r--r--tools/perf/util/event.c30
-rw-r--r--tools/perf/util/event.h2
-rw-r--r--tools/perf/util/evsel.c10
-rw-r--r--tools/perf/util/expr.c29
-rw-r--r--tools/perf/util/expr.h1
-rw-r--r--tools/perf/util/expr.l1
-rw-r--r--tools/perf/util/expr.y12
-rw-r--r--tools/perf/util/header.c52
-rw-r--r--tools/perf/util/libunwind/arm64.c2
-rw-r--r--tools/perf/util/libunwind/x86_32.c2
-rw-r--r--tools/perf/util/llvm-utils.c612
-rw-r--r--tools/perf/util/llvm-utils.h69
-rw-r--r--tools/perf/util/lzma.c12
-rw-r--r--tools/perf/util/machine.c4
-rw-r--r--tools/perf/util/mem-events.c16
-rw-r--r--tools/perf/util/mem-events.h2
-rw-r--r--tools/perf/util/metricgroup.c10
-rw-r--r--tools/perf/util/parse-events.c723
-rw-r--r--tools/perf/util/parse-events.h104
-rw-r--r--tools/perf/util/parse-events.l44
-rw-r--r--tools/perf/util/parse-events.y448
-rw-r--r--tools/perf/util/perf-regs-arch/Build9
-rw-r--r--tools/perf/util/perf-regs-arch/perf_regs_aarch64.c96
-rw-r--r--tools/perf/util/perf-regs-arch/perf_regs_arm.c60
-rw-r--r--tools/perf/util/perf-regs-arch/perf_regs_csky.c100
-rw-r--r--tools/perf/util/perf-regs-arch/perf_regs_loongarch.c91
-rw-r--r--tools/perf/util/perf-regs-arch/perf_regs_mips.c87
-rw-r--r--tools/perf/util/perf-regs-arch/perf_regs_powerpc.c145
-rw-r--r--tools/perf/util/perf-regs-arch/perf_regs_riscv.c92
-rw-r--r--tools/perf/util/perf-regs-arch/perf_regs_s390.c96
-rw-r--r--tools/perf/util/perf-regs-arch/perf_regs_x86.c98
-rw-r--r--tools/perf/util/perf_regs.c772
-rw-r--r--tools/perf/util/perf_regs.h56
-rw-r--r--tools/perf/util/pmu.c934
-rw-r--r--tools/perf/util/pmu.h122
-rw-r--r--tools/perf/util/pmu.y35
-rw-r--r--tools/perf/util/pmus.c326
-rw-r--r--tools/perf/util/pmus.h2
-rw-r--r--tools/perf/util/print-events.h1
-rw-r--r--tools/perf/util/probe-event.c30
-rw-r--r--tools/perf/util/probe-event.h1
-rw-r--r--tools/perf/util/python-ext-sources9
-rw-r--r--tools/perf/util/python.c5
-rw-r--r--tools/perf/util/s390-sample-raw.c51
-rw-r--r--tools/perf/util/scripting-engines/Build3
-rw-r--r--tools/perf/util/session.c4
-rw-r--r--tools/perf/util/setup.py3
-rw-r--r--tools/perf/util/stat-display.c4
-rw-r--r--tools/perf/util/stat.c2
-rw-r--r--tools/perf/util/svghelper.c2
-rw-r--r--tools/perf/util/symbol-elf.c2
-rw-r--r--tools/perf/util/symbol.c15
-rw-r--r--tools/perf/util/synthetic-events.c2
-rw-r--r--tools/perf/util/thread.c13
-rw-r--r--tools/perf/util/thread.h2
-rw-r--r--tools/perf/util/unwind-libdw.c8
-rw-r--r--tools/perf/util/unwind-libunwind-local.c6
-rw-r--r--tools/perf/util/unwind.h8
-rw-r--r--tools/scripts/utilities.mak20
-rw-r--r--tools/testing/radix-tree/multiorder.c68
-rw-r--r--tools/testing/selftests/ftrace/test.d/instances/instance-event.tc2
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-dynstring.tc2
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic_event_syntax_errors.tc2
-rw-r--r--tools/testing/selftests/kselftest/runner.sh3
-rwxr-xr-xtools/testing/selftests/kselftest_deps.sh77
-rw-r--r--tools/testing/selftests/landlock/fs_test.c8
-rw-r--r--tools/testing/selftests/lib.mk4
-rw-r--r--tools/testing/selftests/net/bind_wildcard.c68
948 files changed, 23357 insertions, 11119 deletions
diff --git a/Documentation/admin-guide/perf/alibaba_pmu.rst b/Documentation/admin-guide/perf/alibaba_pmu.rst
index 11de998bb480..7d840023903f 100644
--- a/Documentation/admin-guide/perf/alibaba_pmu.rst
+++ b/Documentation/admin-guide/perf/alibaba_pmu.rst
@@ -88,6 +88,11 @@ data bandwidth::
-e ali_drw_27080/hif_rmw/ \
-e ali_drw_27080/cycle/ -- sleep 10
+Example usage of counting all memory read/write bandwidth by metric::
+
+ perf stat -M ddr_read_bandwidth.all -- sleep 10
+ perf stat -M ddr_write_bandwidth.all -- sleep 10
+
The average DRAM bandwidth can be calculated as follows:
- Read Bandwidth = perf_hif_rd * DDRC_WIDTH * DDRC_Freq / DDRC_Cycle
diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst
index 8019103aac10..cf33de56da27 100644
--- a/Documentation/admin-guide/sysctl/kernel.rst
+++ b/Documentation/admin-guide/sysctl/kernel.rst
@@ -450,6 +450,35 @@ this allows system administrators to override the
``IA64_THREAD_UAC_NOPRINT`` ``prctl`` and avoid logs being flooded.
+io_uring_disabled
+=================
+
+Prevents all processes from creating new io_uring instances. Enabling this
+shrinks the kernel's attack surface.
+
+= ======================================================================
+0 All processes can create io_uring instances as normal. This is the
+ default setting.
+1 io_uring creation is disabled (io_uring_setup() will fail with
+ -EPERM) for unprivileged processes not in the io_uring_group group.
+ Existing io_uring instances can still be used. See the
+ documentation for io_uring_group for more information.
+2 io_uring creation is disabled for all processes. io_uring_setup()
+ always fails with -EPERM. Existing io_uring instances can still be
+ used.
+= ======================================================================
+
+
+io_uring_group
+==============
+
+When io_uring_disabled is set to 1, a process must either be
+privileged (CAP_SYS_ADMIN) or be in the io_uring_group group in order
+to create an io_uring instance. If io_uring_group is set to -1 (the
+default), only processes with the CAP_SYS_ADMIN capability may create
+io_uring instances.
+
+
kexec_load_disabled
===================
diff --git a/Documentation/dev-tools/kasan.rst b/Documentation/dev-tools/kasan.rst
index f4acf9c2e90f..382818a7197a 100644
--- a/Documentation/dev-tools/kasan.rst
+++ b/Documentation/dev-tools/kasan.rst
@@ -41,8 +41,8 @@ Support
Architectures
~~~~~~~~~~~~~
-Generic KASAN is supported on x86_64, arm, arm64, powerpc, riscv, s390, and
-xtensa, and the tag-based KASAN modes are supported only on arm64.
+Generic KASAN is supported on x86_64, arm, arm64, powerpc, riscv, s390, xtensa,
+and loongarch, and the tag-based KASAN modes are supported only on arm64.
Compilers
~~~~~~~~~
diff --git a/Documentation/devicetree/bindings/cache/andestech,ax45mp-cache.yaml b/Documentation/devicetree/bindings/cache/andestech,ax45mp-cache.yaml
new file mode 100644
index 000000000000..9ab5f0c435d4
--- /dev/null
+++ b/Documentation/devicetree/bindings/cache/andestech,ax45mp-cache.yaml
@@ -0,0 +1,81 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+# Copyright (C) 2023 Renesas Electronics Corp.
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/cache/andestech,ax45mp-cache.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Andestech AX45MP L2 Cache Controller
+
+maintainers:
+ - Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>
+
+description:
+ A level-2 cache (L2C) is used to improve the system performance by providing
+ a large amount of cache line entries and reasonable access delays. The L2C
+ is shared between cores, and a non-inclusive non-exclusive policy is used.
+
+select:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - andestech,ax45mp-cache
+
+ required:
+ - compatible
+
+properties:
+ compatible:
+ items:
+ - const: andestech,ax45mp-cache
+ - const: cache
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ cache-line-size:
+ const: 64
+
+ cache-level:
+ const: 2
+
+ cache-sets:
+ const: 1024
+
+ cache-size:
+ enum: [131072, 262144, 524288, 1048576, 2097152]
+
+ cache-unified: true
+
+ next-level-cache: true
+
+additionalProperties: false
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - cache-line-size
+ - cache-level
+ - cache-sets
+ - cache-size
+ - cache-unified
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ cache-controller@2010000 {
+ compatible = "andestech,ax45mp-cache", "cache";
+ reg = <0x13400000 0x100000>;
+ interrupts = <508 IRQ_TYPE_LEVEL_HIGH>;
+ cache-line-size = <64>;
+ cache-level = <2>;
+ cache-sets = <1024>;
+ cache-size = <262144>;
+ cache-unified;
+ };
diff --git a/Documentation/devicetree/bindings/sound/fsl,easrc.yaml b/Documentation/devicetree/bindings/sound/fsl,easrc.yaml
index bdde68a1059c..a680d7aff237 100644
--- a/Documentation/devicetree/bindings/sound/fsl,easrc.yaml
+++ b/Documentation/devicetree/bindings/sound/fsl,easrc.yaml
@@ -14,7 +14,13 @@ properties:
pattern: "^easrc@.*"
compatible:
- const: fsl,imx8mn-easrc
+ oneOf:
+ - enum:
+ - fsl,imx8mn-easrc
+ - items:
+ - enum:
+ - fsl,imx8mp-easrc
+ - const: fsl,imx8mn-easrc
reg:
maxItems: 1
diff --git a/Documentation/features/debug/KASAN/arch-support.txt b/Documentation/features/debug/KASAN/arch-support.txt
index bf0124fae643..c4581c2edb28 100644
--- a/Documentation/features/debug/KASAN/arch-support.txt
+++ b/Documentation/features/debug/KASAN/arch-support.txt
@@ -13,7 +13,7 @@
| csky: | TODO |
| hexagon: | TODO |
| ia64: | TODO |
- | loongarch: | TODO |
+ | loongarch: | ok |
| m68k: | TODO |
| microblaze: | TODO |
| mips: | TODO |
diff --git a/Documentation/features/debug/kcov/arch-support.txt b/Documentation/features/debug/kcov/arch-support.txt
index ffcc9f2b1d74..de84cefbcdd3 100644
--- a/Documentation/features/debug/kcov/arch-support.txt
+++ b/Documentation/features/debug/kcov/arch-support.txt
@@ -13,7 +13,7 @@
| csky: | TODO |
| hexagon: | TODO |
| ia64: | TODO |
- | loongarch: | TODO |
+ | loongarch: | ok |
| m68k: | TODO |
| microblaze: | TODO |
| mips: | ok |
diff --git a/Documentation/features/debug/kgdb/arch-support.txt b/Documentation/features/debug/kgdb/arch-support.txt
index 958498f9f2a4..5e91ec78c80b 100644
--- a/Documentation/features/debug/kgdb/arch-support.txt
+++ b/Documentation/features/debug/kgdb/arch-support.txt
@@ -13,7 +13,7 @@
| csky: | TODO |
| hexagon: | ok |
| ia64: | TODO |
- | loongarch: | TODO |
+ | loongarch: | ok |
| m68k: | TODO |
| microblaze: | ok |
| mips: | ok |
diff --git a/Documentation/filesystems/btrfs.rst b/Documentation/filesystems/btrfs.rst
index 992eddb0e11b..a81db8f54d68 100644
--- a/Documentation/filesystems/btrfs.rst
+++ b/Documentation/filesystems/btrfs.rst
@@ -37,7 +37,6 @@ For more information please refer to the documentation site or wiki
https://btrfs.readthedocs.io
- https://btrfs.wiki.kernel.org
that maintains information about administration tasks, frequently asked
questions, use cases, mount options, comprehensible changelogs, features,
diff --git a/Documentation/gpu/amdgpu/driver-misc.rst b/Documentation/gpu/amdgpu/driver-misc.rst
index be131e963d87..4321c38fef21 100644
--- a/Documentation/gpu/amdgpu/driver-misc.rst
+++ b/Documentation/gpu/amdgpu/driver-misc.rst
@@ -11,19 +11,19 @@ via sysfs
product_name
------------
-.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
:doc: product_name
product_number
--------------
-.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
- :doc: product_name
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
+ :doc: product_number
serial_number
-------------
-.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
:doc: serial_number
unique_id
diff --git a/Documentation/gpu/automated_testing.rst b/Documentation/gpu/automated_testing.rst
new file mode 100644
index 000000000000..469b6fb65c30
--- /dev/null
+++ b/Documentation/gpu/automated_testing.rst
@@ -0,0 +1,144 @@
+.. SPDX-License-Identifier: GPL-2.0+
+
+=========================================
+Automated testing of the DRM subsystem
+=========================================
+
+Introduction
+============
+
+Making sure that changes to the core or drivers don't introduce regressions can
+be very time-consuming when lots of different hardware configurations need to
+be tested. Moreover, it isn't practical for each person interested in this
+testing to have to acquire and maintain what can be a considerable amount of
+hardware.
+
+Also, it is desirable for developers to check for regressions in their code by
+themselves, instead of relying on the maintainers to find them and then
+reporting back.
+
+There are facilities in gitlab.freedesktop.org to automatically test Mesa that
+can be used as well for testing the DRM subsystem. This document explains how
+people interested in testing it can use this shared infrastructure to save
+quite some time and effort.
+
+
+Relevant files
+==============
+
+drivers/gpu/drm/ci/gitlab-ci.yml
+--------------------------------
+
+This is the root configuration file for GitLab CI. Among other less interesting
+bits, it specifies the specific version of the scripts to be used. There are
+some variables that can be modified to change the behavior of the pipeline:
+
+DRM_CI_PROJECT_PATH
+ Repository that contains the Mesa software infrastructure for CI
+
+DRM_CI_COMMIT_SHA
+ A particular revision to use from that repository
+
+UPSTREAM_REPO
+ URL to git repository containing the target branch
+
+TARGET_BRANCH
+ Branch to which this branch is to be merged into
+
+IGT_VERSION
+ Revision of igt-gpu-tools being used, from
+ https://gitlab.freedesktop.org/drm/igt-gpu-tools
+
+drivers/gpu/drm/ci/testlist.txt
+-------------------------------
+
+IGT tests to be run on all drivers (unless mentioned in a driver's \*-skips.txt
+file, see below).
+
+drivers/gpu/drm/ci/${DRIVER_NAME}-${HW_REVISION}-fails.txt
+----------------------------------------------------------
+
+Lists the known failures for a given driver on a specific hardware revision.
+
+drivers/gpu/drm/ci/${DRIVER_NAME}-${HW_REVISION}-flakes.txt
+-----------------------------------------------------------
+
+Lists the tests that for a given driver on a specific hardware revision are
+known to behave unreliably. These tests won't cause a job to fail regardless of
+the result. They will still be run.
+
+drivers/gpu/drm/ci/${DRIVER_NAME}-${HW_REVISION}-skips.txt
+-----------------------------------------------------------
+
+Lists the tests that won't be run for a given driver on a specific hardware
+revision. These are usually tests that interfere with the running of the test
+list due to hanging the machine, causing OOM, taking too long, etc.
+
+
+How to enable automated testing on your tree
+============================================
+
+1. Create a Linux tree in https://gitlab.freedesktop.org/ if you don't have one
+yet
+
+2. In your kernel repo's configuration (eg.
+https://gitlab.freedesktop.org/janedoe/linux/-/settings/ci_cd), change the
+CI/CD configuration file from .gitlab-ci.yml to
+drivers/gpu/drm/ci/gitlab-ci.yml.
+
+3. Next time you push to this repository, you will see a CI pipeline being
+created (eg. https://gitlab.freedesktop.org/janedoe/linux/-/pipelines)
+
+4. The various jobs will be run and when the pipeline is finished, all jobs
+should be green unless a regression has been found.
+
+
+How to update test expectations
+===============================
+
+If your changes to the code fix any tests, you will have to remove one or more
+lines from one or more of the files in
+drivers/gpu/drm/ci/${DRIVER_NAME}_*_fails.txt, for each of the test platforms
+affected by the change.
+
+
+How to expand coverage
+======================
+
+If your code changes make it possible to run more tests (by solving reliability
+issues, for example), you can remove tests from the flakes and/or skips lists,
+and then the expected results if there are any known failures.
+
+If there is a need for updating the version of IGT being used (maybe you have
+added more tests to it), update the IGT_VERSION variable at the top of the
+gitlab-ci.yml file.
+
+
+How to test your changes to the scripts
+=======================================
+
+For testing changes to the scripts in the drm-ci repo, change the
+DRM_CI_PROJECT_PATH and DRM_CI_COMMIT_SHA variables in
+drivers/gpu/drm/ci/gitlab-ci.yml to match your fork of the project (eg.
+janedoe/drm-ci). This fork needs to be in https://gitlab.freedesktop.org/.
+
+
+How to incorporate external fixes in your testing
+=================================================
+
+Often, regressions in other trees will prevent testing changes local to the
+tree under test. These fixes will be automatically merged in during the build
+jobs from a branch in the target tree that is named as
+${TARGET_BRANCH}-external-fixes.
+
+If the pipeline is not in a merge request and a branch with the same name
+exists in the local tree, commits from that branch will be merged in as well.
+
+
+How to deal with automated testing labs that may be down
+========================================================
+
+If a hardware farm is down and thus causing pipelines to fail that would
+otherwise pass, one can disable all jobs that would be submitted to that farm
+by editing the file at
+https://gitlab.freedesktop.org/gfx-ci/lab-status/-/blob/main/lab-status.yml.
diff --git a/Documentation/gpu/index.rst b/Documentation/gpu/index.rst
index eee5996acf2c..e45ff0915246 100644
--- a/Documentation/gpu/index.rst
+++ b/Documentation/gpu/index.rst
@@ -17,6 +17,7 @@ GPU Driver Developer's Guide
backlight
vga-switcheroo
vgaarbiter
+ automated_testing
todo
rfc/index
diff --git a/Documentation/riscv/hwprobe.rst b/Documentation/riscv/hwprobe.rst
index 20eff9650da9..a52996b22f75 100644
--- a/Documentation/riscv/hwprobe.rst
+++ b/Documentation/riscv/hwprobe.rst
@@ -87,13 +87,12 @@ The following keys are defined:
emulated via software, either in or below the kernel. These accesses are
always extremely slow.
- * :c:macro:`RISCV_HWPROBE_MISALIGNED_SLOW`: Misaligned accesses are supported
- in hardware, but are slower than the corresponding aligned accesses
- sequences.
+ * :c:macro:`RISCV_HWPROBE_MISALIGNED_SLOW`: Misaligned accesses are slower
+ than equivalent byte accesses. Misaligned accesses may be supported
+ directly in hardware, or trapped and emulated by software.
- * :c:macro:`RISCV_HWPROBE_MISALIGNED_FAST`: Misaligned accesses are supported
- in hardware and are faster than the corresponding aligned accesses
- sequences.
+ * :c:macro:`RISCV_HWPROBE_MISALIGNED_FAST`: Misaligned accesses are faster
+ than equivalent byte accesses.
* :c:macro:`RISCV_HWPROBE_MISALIGNED_UNSUPPORTED`: Misaligned accesses are
not supported at all and will generate a misaligned address fault.
diff --git a/Documentation/translations/zh_CN/dev-tools/kasan.rst b/Documentation/translations/zh_CN/dev-tools/kasan.rst
index 05ef904dbcfb..8fdb20c9665b 100644
--- a/Documentation/translations/zh_CN/dev-tools/kasan.rst
+++ b/Documentation/translations/zh_CN/dev-tools/kasan.rst
@@ -42,7 +42,7 @@ KASAN有三种模式:
体系架构
~~~~~~~~
-在x86_64、arm、arm64、powerpc、riscv、s390和xtensa上支持通用KASAN,
+在x86_64、arm、arm64、powerpc、riscv、s390、xtensa和loongarch上支持通用KASAN,
而基于标签的KASAN模式只在arm64上支持。
编译器
diff --git a/MAINTAINERS b/MAINTAINERS
index be36ec2687b8..3b32228356de 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1855,7 +1855,7 @@ F: Documentation/devicetree/bindings/phy/amlogic*
F: arch/arm/boot/dts/amlogic/
F: arch/arm/mach-meson/
F: arch/arm64/boot/dts/amlogic/
-F: drivers/genpd/amlogic/
+F: drivers/pmdomain/amlogic/
F: drivers/mmc/host/meson*
F: drivers/phy/amlogic/
F: drivers/pinctrl/meson/
@@ -1918,7 +1918,7 @@ F: drivers/bluetooth/hci_bcm4377.c
F: drivers/clk/clk-apple-nco.c
F: drivers/cpufreq/apple-soc-cpufreq.c
F: drivers/dma/apple-admac.c
-F: drivers/genpd/apple/
+F: drivers/pmdomain/apple/
F: drivers/i2c/busses/i2c-pasemi-core.c
F: drivers/i2c/busses/i2c-pasemi-platform.c
F: drivers/iommu/apple-dart.c
@@ -2435,7 +2435,7 @@ F: arch/arm/mach-ux500/
F: drivers/clk/clk-nomadik.c
F: drivers/clocksource/clksrc-dbx500-prcmu.c
F: drivers/dma/ste_dma40*
-F: drivers/genpd/st/ste-ux500-pm-domain.c
+F: drivers/pmdomain/st/ste-ux500-pm-domain.c
F: drivers/hwspinlock/u8500_hsem.c
F: drivers/i2c/busses/i2c-nomadik.c
F: drivers/iio/adc/ab8500-gpadc.c
@@ -2598,7 +2598,7 @@ F: arch/arm/include/debug/renesas-scif.S
F: arch/arm/mach-shmobile/
F: arch/arm64/boot/dts/renesas/
F: arch/riscv/boot/dts/renesas/
-F: drivers/genpd/renesas/
+F: drivers/pmdomain/renesas/
F: drivers/soc/renesas/
F: include/linux/soc/renesas/
K: \brenesas,
@@ -4026,7 +4026,7 @@ F: arch/mips/kernel/*bmips*
F: drivers/irqchip/irq-bcm63*
F: drivers/irqchip/irq-bcm7*
F: drivers/irqchip/irq-brcmstb*
-F: drivers/genpd/bcm/bcm63xx-power.c
+F: drivers/pmdomain/bcm/bcm63xx-power.c
F: include/linux/bcm963xx_nvram.h
F: include/linux/bcm963xx_tag.h
@@ -4248,7 +4248,7 @@ R: Broadcom internal kernel review list <bcm-kernel-feedback-list@broadcom.com>
L: linux-pm@vger.kernel.org
S: Maintained
T: git https://github.com/broadcom/stblinux.git
-F: drivers/genpd/bcm/bcm-pmb.c
+F: drivers/pmdomain/bcm/bcm-pmb.c
F: include/dt-bindings/soc/bcm-pmb.h
BROADCOM SPECIFIC AMBA DRIVER (BCMA)
@@ -4377,7 +4377,6 @@ M: David Sterba <dsterba@suse.com>
L: linux-btrfs@vger.kernel.org
S: Maintained
W: https://btrfs.readthedocs.io
-W: https://btrfs.wiki.kernel.org/
Q: https://patchwork.kernel.org/project/linux-btrfs/list/
C: irc://irc.libera.chat/btrfs
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux.git
@@ -7164,6 +7163,14 @@ T: git git://anongit.freedesktop.org/drm/drm-misc
F: drivers/gpu/drm/ttm/
F: include/drm/ttm/
+DRM AUTOMATED TESTING
+M: Helen Koike <helen.koike@collabora.com>
+L: dri-devel@lists.freedesktop.org
+S: Maintained
+T: git git://anongit.freedesktop.org/drm/drm-misc
+F: Documentation/gpu/automated_testing.rst
+F: drivers/gpu/drm/ci/
+
DSBR100 USB FM RADIO DRIVER
M: Alexey Klimov <klimov.linux@gmail.com>
L: linux-media@vger.kernel.org
@@ -8720,7 +8727,7 @@ M: Ulf Hansson <ulf.hansson@linaro.org>
L: linux-pm@vger.kernel.org
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/linux-pm.git
-F: drivers/genpd/
+F: drivers/pmdomain/
GENERIC RESISTIVE TOUCHSCREEN ADC DRIVER
M: Eugen Hristev <eugen.hristev@microchip.com>
@@ -16762,6 +16769,8 @@ L: linux-kernel@vger.kernel.org
S: Supported
W: https://perf.wiki.kernel.org/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git perf/core
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/perf/perf-tools.git perf-tools
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/perf/perf-tools-next.git perf-tools-next
F: arch/*/events/*
F: arch/*/events/*/*
F: arch/*/include/asm/perf_event.h
@@ -17669,7 +17678,7 @@ L: linux-pm@vger.kernel.org
L: linux-arm-msm@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/power/avs/qcom,cpr.yaml
-F: drivers/genpd/qcom/cpr.c
+F: drivers/pmdomain/qcom/cpr.c
QUALCOMM CPUFREQ DRIVER MSM8996/APQ8096
M: Ilia Lin <ilia.lin@kernel.org>
@@ -20405,6 +20414,13 @@ S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging.git
F: drivers/staging/
+STANDALONE CACHE CONTROLLER DRIVERS
+M: Conor Dooley <conor@kernel.org>
+L: linux-riscv@lists.infradead.org
+S: Maintained
+T: git https://git.kernel.org/pub/scm/linux/kernel/git/conor/linux.git/
+F: drivers/cache
+
STARFIRE/DURALAN NETWORK DRIVER
M: Ion Badulescu <ionut@badula.org>
S: Odd Fixes
@@ -20496,7 +20512,7 @@ STARFIVE JH71XX PMU CONTROLLER DRIVER
M: Walker Chen <walker.chen@starfivetech.com>
S: Supported
F: Documentation/devicetree/bindings/power/starfive*
-F: drivers/genpd/starfive/jh71xx-pmu.c
+F: drivers/pmdomain/starfive/jh71xx-pmu.c
F: include/dt-bindings/power/starfive,jh7110-pmu.h
STARFIVE SOC DRIVERS
@@ -21243,7 +21259,7 @@ F: sound/soc/ti/
TEXAS INSTRUMENTS AUDIO (ASoC/HDA) DRIVERS
M: Shenghao Ding <shenghao-ding@ti.com>
M: Kevin Lu <kevin-lu@ti.com>
-M: Baojun Xu <x1077012@ti.com>
+M: Baojun Xu <baojun.xu@ti.com>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
S: Maintained
F: Documentation/devicetree/bindings/sound/tas2552.txt
@@ -21321,7 +21337,7 @@ F: drivers/irqchip/irq-ti-sci-inta.c
F: drivers/irqchip/irq-ti-sci-intr.c
F: drivers/reset/reset-ti-sci.c
F: drivers/soc/ti/ti_sci_inta_msi.c
-F: drivers/genpd/ti/ti_sci_pm_domains.c
+F: drivers/pmdomain/ti/ti_sci_pm_domains.c
F: include/dt-bindings/soc/ti,sci_pm_domain.h
F: include/linux/soc/ti/ti_sci_inta_msi.h
F: include/linux/soc/ti/ti_sci_protocol.h
@@ -21563,7 +21579,7 @@ L: linux-kernel@vger.kernel.org
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/ti/linux.git
-F: drivers/genpd/ti/omap_prm.c
+F: drivers/pmdomain/ti/omap_prm.c
F: drivers/soc/ti/*
TI LM49xxx FAMILY ASoC CODEC DRIVERS
diff --git a/Makefile b/Makefile
index 73f23fa0677a..ceb23eed4dce 100644
--- a/Makefile
+++ b/Makefile
@@ -1,8 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 6
-PATCHLEVEL = 5
+PATCHLEVEL = 6
SUBLEVEL = 0
-EXTRAVERSION =
+EXTRAVERSION = -rc1
NAME = Hurr durr I'ma ninja sloth
# *DOCUMENTATION*
diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h
index f482b994c608..bcd5622aa096 100644
--- a/arch/arm64/include/asm/efi.h
+++ b/arch/arm64/include/asm/efi.h
@@ -156,4 +156,6 @@ static inline void efi_capsule_flush_cache_range(void *addr, int size)
efi_status_t efi_handle_corrupted_x18(efi_status_t s, const char *f);
+void efi_icache_sync(unsigned long start, unsigned long end);
+
#endif /* _ASM_EFI_H */
diff --git a/arch/arm64/kernel/idreg-override.c b/arch/arm64/kernel/idreg-override.c
index aee12c75b738..3addc09f8746 100644
--- a/arch/arm64/kernel/idreg-override.c
+++ b/arch/arm64/kernel/idreg-override.c
@@ -262,9 +262,9 @@ static __init void __parse_cmdline(const char *cmdline, bool parse_aliases)
if (!len)
return;
- len = strscpy(buf, cmdline, ARRAY_SIZE(buf));
- if (len == -E2BIG)
- len = ARRAY_SIZE(buf) - 1;
+ len = min(len, ARRAY_SIZE(buf) - 1);
+ memcpy(buf, cmdline, len);
+ buf[len] = '\0';
if (strcmp(buf, "--") == 0)
return;
diff --git a/arch/arm64/lib/csum.c b/arch/arm64/lib/csum.c
index 78b87a64ca0a..2432683e48a6 100644
--- a/arch/arm64/lib/csum.c
+++ b/arch/arm64/lib/csum.c
@@ -24,7 +24,7 @@ unsigned int __no_sanitize_address do_csum(const unsigned char *buff, int len)
const u64 *ptr;
u64 data, sum64 = 0;
- if (unlikely(len == 0))
+ if (unlikely(len <= 0))
return 0;
offset = (unsigned long)buff & 7;
diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
index ecf282dee513..e14396a2ddcb 100644
--- a/arch/loongarch/Kconfig
+++ b/arch/loongarch/Kconfig
@@ -8,11 +8,13 @@ config LOONGARCH
select ACPI_PPTT if ACPI
select ACPI_SYSTEM_POWER_STATES_SUPPORT if ACPI
select ARCH_BINFMT_ELF_STATE
+ select ARCH_DISABLE_KASAN_INLINE
select ARCH_ENABLE_MEMORY_HOTPLUG
select ARCH_ENABLE_MEMORY_HOTREMOVE
select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
select ARCH_HAS_CPU_FINALIZE_INIT
select ARCH_HAS_FORTIFY_SOURCE
+ select ARCH_HAS_KCOV
select ARCH_HAS_NMI_SAFE_THIS_CPU_OPS
select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
select ARCH_HAS_PTE_SPECIAL
@@ -91,6 +93,9 @@ config LOONGARCH
select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_JUMP_LABEL_RELATIVE
+ select HAVE_ARCH_KASAN
+ select HAVE_ARCH_KFENCE
+ select HAVE_ARCH_KGDB if PERF_EVENTS
select HAVE_ARCH_MMAP_RND_BITS if MMU
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_TRACEHOOK
@@ -115,6 +120,7 @@ config LOONGARCH
select HAVE_FUNCTION_GRAPH_RETVAL if HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FUNCTION_TRACER
+ select HAVE_GCC_PLUGINS
select HAVE_GENERIC_VDSO
select HAVE_HW_BREAKPOINT if PERF_EVENTS
select HAVE_IOREMAP_PROT
@@ -254,6 +260,9 @@ config AS_HAS_LSX_EXTENSION
config AS_HAS_LASX_EXTENSION
def_bool $(as-instr,xvld \$xr0$(comma)\$a0$(comma)0)
+config AS_HAS_LBT_EXTENSION
+ def_bool $(as-instr,movscr2gr \$a0$(comma)\$scr0)
+
menu "Kernel type and options"
source "kernel/Kconfig.hz"
@@ -534,6 +543,18 @@ config CPU_HAS_LASX
If unsure, say Y.
+config CPU_HAS_LBT
+ bool "Support for the Loongson Binary Translation Extension"
+ depends on AS_HAS_LBT_EXTENSION
+ help
+ Loongson Binary Translation (LBT) introduces 4 scratch registers (SCR0
+ to SCR3), x86/ARM eflags (eflags) and x87 fpu stack pointer (ftop).
+ Enabling this option allows the kernel to allocate and switch registers
+ specific to LBT.
+
+ If you want to use this feature, such as the Loongson Architecture
+ Translator (LAT), say Y.
+
config CPU_HAS_PREFETCH
bool
default y
@@ -638,6 +659,11 @@ config ARCH_MMAP_RND_BITS_MAX
config ARCH_SUPPORTS_UPROBES
def_bool y
+config KASAN_SHADOW_OFFSET
+ hex
+ default 0x0
+ depends on KASAN
+
menu "Power management options"
config ARCH_SUSPEND_POSSIBLE
diff --git a/arch/loongarch/Makefile b/arch/loongarch/Makefile
index ef87bab46754..fb0fada43197 100644
--- a/arch/loongarch/Makefile
+++ b/arch/loongarch/Makefile
@@ -84,7 +84,10 @@ LDFLAGS_vmlinux += -static -pie --no-dynamic-linker -z notext
endif
cflags-y += $(call cc-option, -mno-check-zero-division)
+
+ifndef CONFIG_KASAN
cflags-y += -fno-builtin-memcpy -fno-builtin-memmove -fno-builtin-memset
+endif
load-y = 0x9000000000200000
bootvars-y = VMLINUX_LOAD_ADDRESS=$(load-y)
diff --git a/arch/loongarch/configs/loongson3_defconfig b/arch/loongarch/configs/loongson3_defconfig
index d64849b4cba1..a3b52aaa83b3 100644
--- a/arch/loongarch/configs/loongson3_defconfig
+++ b/arch/loongarch/configs/loongson3_defconfig
@@ -30,7 +30,6 @@ CONFIG_NAMESPACES=y
CONFIG_USER_NS=y
CONFIG_CHECKPOINT_RESTORE=y
CONFIG_SCHED_AUTOGROUP=y
-CONFIG_SYSFS_DEPRECATED=y
CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
@@ -47,8 +46,12 @@ CONFIG_SMP=y
CONFIG_HOTPLUG_CPU=y
CONFIG_NR_CPUS=64
CONFIG_NUMA=y
+CONFIG_CPU_HAS_FPU=y
+CONFIG_CPU_HAS_LSX=y
+CONFIG_CPU_HAS_LASX=y
CONFIG_KEXEC=y
CONFIG_CRASH_DUMP=y
+CONFIG_RANDOMIZE_BASE=y
CONFIG_SUSPEND=y
CONFIG_HIBERNATION=y
CONFIG_ACPI=y
@@ -63,6 +66,7 @@ CONFIG_EFI_ZBOOT=y
CONFIG_EFI_GENERIC_STUB_INITRD_CMDLINE_LOADER=y
CONFIG_EFI_CAPSULE_LOADER=m
CONFIG_EFI_TEST=m
+CONFIG_JUMP_LABEL=y
CONFIG_MODULES=y
CONFIG_MODULE_FORCE_LOAD=y
CONFIG_MODULE_UNLOAD=y
@@ -108,7 +112,12 @@ CONFIG_IP_PNP_BOOTP=y
CONFIG_IP_PNP_RARP=y
CONFIG_NET_IPIP=m
CONFIG_NET_IPGRE_DEMUX=m
+CONFIG_NET_IPGRE=m
+CONFIG_NET_IPGRE_BROADCAST=y
CONFIG_IP_MROUTE=y
+CONFIG_IP_MROUTE_MULTIPLE_TABLES=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
CONFIG_INET_ESP=m
CONFIG_INET_UDP_DIAG=y
CONFIG_TCP_CONG_ADVANCED=y
@@ -137,7 +146,6 @@ CONFIG_NFT_MASQ=m
CONFIG_NFT_REDIR=m
CONFIG_NFT_NAT=m
CONFIG_NFT_TUNNEL=m
-CONFIG_NFT_OBJREF=m
CONFIG_NFT_QUEUE=m
CONFIG_NFT_QUOTA=m
CONFIG_NFT_REJECT=m
@@ -208,7 +216,11 @@ CONFIG_IP_VS=m
CONFIG_IP_VS_IPV6=y
CONFIG_IP_VS_PROTO_TCP=y
CONFIG_IP_VS_PROTO_UDP=y
+CONFIG_IP_VS_PROTO_ESP=y
+CONFIG_IP_VS_PROTO_AH=y
+CONFIG_IP_VS_PROTO_SCTP=y
CONFIG_IP_VS_RR=m
+CONFIG_IP_VS_WRR=m
CONFIG_IP_VS_NFCT=y
CONFIG_NF_TABLES_IPV4=y
CONFIG_NFT_DUP_IPV4=m
@@ -227,7 +239,6 @@ CONFIG_IP_NF_TARGET_MASQUERADE=m
CONFIG_IP_NF_TARGET_NETMAP=m
CONFIG_IP_NF_TARGET_REDIRECT=m
CONFIG_IP_NF_MANGLE=m
-CONFIG_IP_NF_TARGET_CLUSTERIP=m
CONFIG_IP_NF_TARGET_ECN=m
CONFIG_IP_NF_TARGET_TTL=m
CONFIG_IP_NF_RAW=m
@@ -363,6 +374,8 @@ CONFIG_MTD_CFI_AMDSTD=m
CONFIG_MTD_CFI_STAA=m
CONFIG_MTD_RAM=m
CONFIG_MTD_ROM=m
+CONFIG_MTD_UBI=m
+CONFIG_MTD_UBI_BLOCK=y
CONFIG_PARPORT=y
CONFIG_PARPORT_PC=y
CONFIG_PARPORT_SERIAL=y
@@ -370,6 +383,7 @@ CONFIG_PARPORT_PC_FIFO=y
CONFIG_ZRAM=m
CONFIG_ZRAM_DEF_COMP_ZSTD=y
CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_DRBD=m
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=8192
@@ -516,6 +530,8 @@ CONFIG_STMMAC_ETH=y
# CONFIG_NET_VENDOR_TEHUTI is not set
# CONFIG_NET_VENDOR_TI is not set
# CONFIG_NET_VENDOR_VIA is not set
+CONFIG_NGBE=y
+CONFIG_TXGBE=y
# CONFIG_NET_VENDOR_WIZNET is not set
# CONFIG_NET_VENDOR_XILINX is not set
CONFIG_PPP=m
@@ -602,9 +618,15 @@ CONFIG_HW_RANDOM_VIRTIO=m
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_PIIX4=y
CONFIG_I2C_GPIO=y
+CONFIG_I2C_LS2X=y
CONFIG_SPI=y
+CONFIG_SPI_LOONGSON_PCI=m
+CONFIG_SPI_LOONGSON_PLATFORM=m
+CONFIG_PINCTRL=y
+CONFIG_PINCTRL_LOONGSON2=y
CONFIG_GPIO_SYSFS=y
CONFIG_GPIO_LOONGSON=y
+CONFIG_GPIO_LOONGSON_64BIT=y
CONFIG_POWER_RESET=y
CONFIG_POWER_RESET_RESTART=y
CONFIG_POWER_RESET_SYSCON=y
@@ -614,6 +636,7 @@ CONFIG_SENSORS_LM75=m
CONFIG_SENSORS_LM93=m
CONFIG_SENSORS_W83795=m
CONFIG_SENSORS_W83627HF=m
+CONFIG_LOONGSON2_THERMAL=m
CONFIG_RC_CORE=m
CONFIG_LIRC=y
CONFIG_RC_DECODERS=y
@@ -643,6 +666,7 @@ CONFIG_DRM_AMDGPU_USERPTR=y
CONFIG_DRM_AST=y
CONFIG_DRM_QXL=m
CONFIG_DRM_VIRTIO_GPU=m
+CONFIG_DRM_LOONGSON=y
CONFIG_FB=y
CONFIG_FB_EFI=y
CONFIG_FB_RADEON=y
@@ -712,6 +736,7 @@ CONFIG_UCSI_ACPI=m
CONFIG_INFINIBAND=m
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_EFI=y
+CONFIG_RTC_DRV_LOONGSON=y
CONFIG_DMADEVICES=y
CONFIG_UIO=m
CONFIG_UIO_PDRV_GENIRQ=m
@@ -745,7 +770,9 @@ CONFIG_COMEDI_NI_LABPC_PCI=m
CONFIG_COMEDI_NI_PCIDIO=m
CONFIG_COMEDI_NI_PCIMIO=m
CONFIG_STAGING=y
-CONFIG_R8188EU=m
+CONFIG_COMMON_CLK_LOONGSON2=y
+CONFIG_LOONGSON2_GUTS=y
+CONFIG_LOONGSON2_PM=y
CONFIG_PM_DEVFREQ=y
CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y
CONFIG_DEVFREQ_GOV_PERFORMANCE=y
@@ -759,10 +786,17 @@ CONFIG_EXT2_FS_SECURITY=y
CONFIG_EXT3_FS=y
CONFIG_EXT3_FS_POSIX_ACL=y
CONFIG_EXT3_FS_SECURITY=y
+CONFIG_JFS_FS=m
+CONFIG_JFS_POSIX_ACL=y
+CONFIG_JFS_SECURITY=y
CONFIG_XFS_FS=y
CONFIG_XFS_QUOTA=y
CONFIG_XFS_POSIX_ACL=y
+CONFIG_GFS2_FS=m
+CONFIG_GFS2_FS_LOCKING_DLM=y
+CONFIG_OCFS2_FS=m
CONFIG_BTRFS_FS=y
+CONFIG_BTRFS_FS_POSIX_ACL=y
CONFIG_FANOTIFY=y
CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
CONFIG_QUOTA=y
@@ -771,11 +805,14 @@ CONFIG_QFMT_V1=m
CONFIG_QFMT_V2=m
CONFIG_AUTOFS_FS=y
CONFIG_FUSE_FS=m
+CONFIG_CUSE=m
+CONFIG_VIRTIO_FS=m
CONFIG_OVERLAY_FS=y
CONFIG_OVERLAY_FS_INDEX=y
CONFIG_OVERLAY_FS_XINO_AUTO=y
CONFIG_OVERLAY_FS_METACOPY=y
CONFIG_FSCACHE=y
+CONFIG_CACHEFILES=m
CONFIG_ISO9660_FS=y
CONFIG_JOLIET=y
CONFIG_ZISOFS=y
@@ -784,19 +821,42 @@ CONFIG_MSDOS_FS=m
CONFIG_VFAT_FS=m
CONFIG_FAT_DEFAULT_CODEPAGE=936
CONFIG_FAT_DEFAULT_IOCHARSET="gb2312"
+CONFIG_EXFAT_FS=m
+CONFIG_NTFS3_FS=m
+CONFIG_NTFS3_64BIT_CLUSTER=y
+CONFIG_NTFS3_LZX_XPRESS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
CONFIG_HUGETLBFS=y
CONFIG_CONFIGFS_FS=y
+CONFIG_ORANGEFS_FS=m
+CONFIG_ECRYPT_FS=m
+CONFIG_ECRYPT_FS_MESSAGING=y
CONFIG_HFS_FS=m
CONFIG_HFSPLUS_FS=m
+CONFIG_UBIFS_FS=m
+CONFIG_UBIFS_FS_ADVANCED_COMPR=y
CONFIG_CRAMFS=m
CONFIG_SQUASHFS=y
CONFIG_SQUASHFS_XATTR=y
CONFIG_SQUASHFS_LZ4=y
CONFIG_SQUASHFS_LZO=y
CONFIG_SQUASHFS_XZ=y
+CONFIG_MINIX_FS=m
+CONFIG_ROMFS_FS=m
+CONFIG_PSTORE=m
+CONFIG_PSTORE_LZO_COMPRESS=m
+CONFIG_PSTORE_LZ4_COMPRESS=m
+CONFIG_PSTORE_LZ4HC_COMPRESS=m
+CONFIG_PSTORE_842_COMPRESS=y
+CONFIG_PSTORE_ZSTD_COMPRESS=y
+CONFIG_PSTORE_ZSTD_COMPRESS_DEFAULT=y
+CONFIG_SYSV_FS=m
+CONFIG_UFS_FS=m
+CONFIG_EROFS_FS=m
+CONFIG_EROFS_FS_ZIP_LZMA=y
+CONFIG_EROFS_FS_PCPU_KTHREAD=y
CONFIG_NFS_FS=y
CONFIG_NFS_V3_ACL=y
CONFIG_NFS_V4=y
@@ -807,6 +867,10 @@ CONFIG_NFSD=y
CONFIG_NFSD_V3_ACL=y
CONFIG_NFSD_V4=y
CONFIG_NFSD_BLOCKLAYOUT=y
+CONFIG_CEPH_FS=m
+CONFIG_CEPH_FSCACHE=y
+CONFIG_CEPH_FS_POSIX_ACL=y
+CONFIG_CEPH_FS_SECURITY_LABEL=y
CONFIG_CIFS=m
# CONFIG_CIFS_DEBUG is not set
CONFIG_9P_FS=y
@@ -814,6 +878,7 @@ CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_CODEPAGE_936=y
CONFIG_NLS_ASCII=y
CONFIG_NLS_UTF8=y
+CONFIG_DLM=m
CONFIG_KEY_DH_OPERATIONS=y
CONFIG_SECURITY=y
CONFIG_SECURITY_SELINUX=y
@@ -847,6 +912,7 @@ CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
+CONFIG_CRYPTO_CRC32_LOONGARCH=m
CONFIG_CRYPTO_DEV_VIRTIO=m
CONFIG_PRINTK_TIME=y
CONFIG_STRIP_ASM_SYMS=y
diff --git a/arch/loongarch/include/asm/asm-prototypes.h b/arch/loongarch/include/asm/asm-prototypes.h
index ed06d3997420..cf8e1a4e7c19 100644
--- a/arch/loongarch/include/asm/asm-prototypes.h
+++ b/arch/loongarch/include/asm/asm-prototypes.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/uaccess.h>
#include <asm/fpu.h>
+#include <asm/lbt.h>
#include <asm/mmu_context.h>
#include <asm/page.h>
#include <asm/ftrace.h>
diff --git a/arch/loongarch/include/asm/asmmacro.h b/arch/loongarch/include/asm/asmmacro.h
index 79e1d53fea89..c9544f358c33 100644
--- a/arch/loongarch/include/asm/asmmacro.h
+++ b/arch/loongarch/include/asm/asmmacro.h
@@ -10,113 +10,6 @@
#include <asm/fpregdef.h>
#include <asm/loongarch.h>
- .macro parse_v var val
- \var = \val
- .endm
-
- .macro parse_r var r
- \var = -1
- .ifc \r, $r0
- \var = 0
- .endif
- .ifc \r, $r1
- \var = 1
- .endif
- .ifc \r, $r2
- \var = 2
- .endif
- .ifc \r, $r3
- \var = 3
- .endif
- .ifc \r, $r4
- \var = 4
- .endif
- .ifc \r, $r5
- \var = 5
- .endif
- .ifc \r, $r6
- \var = 6
- .endif
- .ifc \r, $r7
- \var = 7
- .endif
- .ifc \r, $r8
- \var = 8
- .endif
- .ifc \r, $r9
- \var = 9
- .endif
- .ifc \r, $r10
- \var = 10
- .endif
- .ifc \r, $r11
- \var = 11
- .endif
- .ifc \r, $r12
- \var = 12
- .endif
- .ifc \r, $r13
- \var = 13
- .endif
- .ifc \r, $r14
- \var = 14
- .endif
- .ifc \r, $r15
- \var = 15
- .endif
- .ifc \r, $r16
- \var = 16
- .endif
- .ifc \r, $r17
- \var = 17
- .endif
- .ifc \r, $r18
- \var = 18
- .endif
- .ifc \r, $r19
- \var = 19
- .endif
- .ifc \r, $r20
- \var = 20
- .endif
- .ifc \r, $r21
- \var = 21
- .endif
- .ifc \r, $r22
- \var = 22
- .endif
- .ifc \r, $r23
- \var = 23
- .endif
- .ifc \r, $r24
- \var = 24
- .endif
- .ifc \r, $r25
- \var = 25
- .endif
- .ifc \r, $r26
- \var = 26
- .endif
- .ifc \r, $r27
- \var = 27
- .endif
- .ifc \r, $r28
- \var = 28
- .endif
- .ifc \r, $r29
- \var = 29
- .endif
- .ifc \r, $r30
- \var = 30
- .endif
- .ifc \r, $r31
- \var = 31
- .endif
- .iflt \var
- .error "Unable to parse register name \r"
- .endif
- .endm
-
.macro cpu_save_nonscratch thread
stptr.d s0, \thread, THREAD_REG23
stptr.d s1, \thread, THREAD_REG24
@@ -148,12 +41,51 @@
.macro fpu_save_csr thread tmp
movfcsr2gr \tmp, fcsr0
- stptr.w \tmp, \thread, THREAD_FCSR
+ stptr.w \tmp, \thread, THREAD_FCSR
+#ifdef CONFIG_CPU_HAS_LBT
+ /* TM bit is always 0 if LBT not supported */
+ andi \tmp, \tmp, FPU_CSR_TM
+ beqz \tmp, 1f
+ /* Save FTOP */
+ x86mftop \tmp
+ stptr.w \tmp, \thread, THREAD_FTOP
+ /* Turn off TM to ensure the order of FPR in memory independent of TM */
+ x86clrtm
+1:
+#endif
.endm
- .macro fpu_restore_csr thread tmp
- ldptr.w \tmp, \thread, THREAD_FCSR
- movgr2fcsr fcsr0, \tmp
+ .macro fpu_restore_csr thread tmp0 tmp1
+ ldptr.w \tmp0, \thread, THREAD_FCSR
+ movgr2fcsr fcsr0, \tmp0
+#ifdef CONFIG_CPU_HAS_LBT
+ /* TM bit is always 0 if LBT not supported */
+ andi \tmp0, \tmp0, FPU_CSR_TM
+ beqz \tmp0, 2f
+ /* Restore FTOP */
+ ldptr.w \tmp0, \thread, THREAD_FTOP
+ andi \tmp0, \tmp0, 0x7
+ la.pcrel \tmp1, 1f
+ alsl.d \tmp1, \tmp0, \tmp1, 3
+ jr \tmp1
+1:
+ x86mttop 0
+ b 2f
+ x86mttop 1
+ b 2f
+ x86mttop 2
+ b 2f
+ x86mttop 3
+ b 2f
+ x86mttop 4
+ b 2f
+ x86mttop 5
+ b 2f
+ x86mttop 6
+ b 2f
+ x86mttop 7
+2:
+#endif
.endm
.macro fpu_save_cc thread tmp0 tmp1
@@ -353,7 +285,7 @@
.macro lsx_restore_all thread tmp0 tmp1
lsx_restore_data \thread, \tmp0
fpu_restore_cc \thread, \tmp0, \tmp1
- fpu_restore_csr \thread, \tmp0
+ fpu_restore_csr \thread, \tmp0, \tmp1
.endm
.macro lsx_save_upper vd base tmp off
@@ -563,7 +495,7 @@
.macro lasx_restore_all thread tmp0 tmp1
lasx_restore_data \thread, \tmp0
fpu_restore_cc \thread, \tmp0, \tmp1
- fpu_restore_csr \thread, \tmp0
+ fpu_restore_csr \thread, \tmp0, \tmp1
.endm
.macro lasx_save_upper xd base tmp off
diff --git a/arch/loongarch/include/asm/kasan.h b/arch/loongarch/include/asm/kasan.h
new file mode 100644
index 000000000000..deeff8158f45
--- /dev/null
+++ b/arch/loongarch/include/asm/kasan.h
@@ -0,0 +1,126 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_KASAN_H
+#define __ASM_KASAN_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/linkage.h>
+#include <linux/mmzone.h>
+#include <asm/addrspace.h>
+#include <asm/io.h>
+#include <asm/pgtable.h>
+
+#define __HAVE_ARCH_SHADOW_MAP
+
+#define KASAN_SHADOW_SCALE_SHIFT 3
+#define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL)
+
+#define XRANGE_SHIFT (48)
+
+/* Valid address length */
+#define XRANGE_SHADOW_SHIFT (PGDIR_SHIFT + PAGE_SHIFT - 3)
+/* Used for taking out the valid address */
+#define XRANGE_SHADOW_MASK GENMASK_ULL(XRANGE_SHADOW_SHIFT - 1, 0)
+/* One segment whole address space size */
+#define XRANGE_SIZE (XRANGE_SHADOW_MASK + 1)
+
+/* 64-bit segment value. */
+#define XKPRANGE_UC_SEG (0x8000)
+#define XKPRANGE_CC_SEG (0x9000)
+#define XKVRANGE_VC_SEG (0xffff)
+
+/* Cached */
+#define XKPRANGE_CC_START CACHE_BASE
+#define XKPRANGE_CC_SIZE XRANGE_SIZE
+#define XKPRANGE_CC_KASAN_OFFSET (0)
+#define XKPRANGE_CC_SHADOW_SIZE (XKPRANGE_CC_SIZE >> KASAN_SHADOW_SCALE_SHIFT)
+#define XKPRANGE_CC_SHADOW_END (XKPRANGE_CC_KASAN_OFFSET + XKPRANGE_CC_SHADOW_SIZE)
+
+/* UnCached */
+#define XKPRANGE_UC_START UNCACHE_BASE
+#define XKPRANGE_UC_SIZE XRANGE_SIZE
+#define XKPRANGE_UC_KASAN_OFFSET XKPRANGE_CC_SHADOW_END
+#define XKPRANGE_UC_SHADOW_SIZE (XKPRANGE_UC_SIZE >> KASAN_SHADOW_SCALE_SHIFT)
+#define XKPRANGE_UC_SHADOW_END (XKPRANGE_UC_KASAN_OFFSET + XKPRANGE_UC_SHADOW_SIZE)
+
+/* VMALLOC (Cached or UnCached) */
+#define XKVRANGE_VC_START MODULES_VADDR
+#define XKVRANGE_VC_SIZE round_up(KFENCE_AREA_END - MODULES_VADDR + 1, PGDIR_SIZE)
+#define XKVRANGE_VC_KASAN_OFFSET XKPRANGE_UC_SHADOW_END
+#define XKVRANGE_VC_SHADOW_SIZE (XKVRANGE_VC_SIZE >> KASAN_SHADOW_SCALE_SHIFT)
+#define XKVRANGE_VC_SHADOW_END (XKVRANGE_VC_KASAN_OFFSET + XKVRANGE_VC_SHADOW_SIZE)
+
+/* KAsan shadow memory start right after vmalloc. */
+#define KASAN_SHADOW_START round_up(KFENCE_AREA_END, PGDIR_SIZE)
+#define KASAN_SHADOW_SIZE (XKVRANGE_VC_SHADOW_END - XKPRANGE_CC_KASAN_OFFSET)
+#define KASAN_SHADOW_END round_up(KASAN_SHADOW_START + KASAN_SHADOW_SIZE, PGDIR_SIZE)
+
+#define XKPRANGE_CC_SHADOW_OFFSET (KASAN_SHADOW_START + XKPRANGE_CC_KASAN_OFFSET)
+#define XKPRANGE_UC_SHADOW_OFFSET (KASAN_SHADOW_START + XKPRANGE_UC_KASAN_OFFSET)
+#define XKVRANGE_VC_SHADOW_OFFSET (KASAN_SHADOW_START + XKVRANGE_VC_KASAN_OFFSET)
+
+extern bool kasan_early_stage;
+extern unsigned char kasan_early_shadow_page[PAGE_SIZE];
+
+#define kasan_arch_is_ready kasan_arch_is_ready
+static __always_inline bool kasan_arch_is_ready(void)
+{
+ return !kasan_early_stage;
+}
+
+static inline void *kasan_mem_to_shadow(const void *addr)
+{
+ if (!kasan_arch_is_ready()) {
+ return (void *)(kasan_early_shadow_page);
+ } else {
+ unsigned long maddr = (unsigned long)addr;
+ unsigned long xrange = (maddr >> XRANGE_SHIFT) & 0xffff;
+ unsigned long offset = 0;
+
+ maddr &= XRANGE_SHADOW_MASK;
+ switch (xrange) {
+ case XKPRANGE_CC_SEG:
+ offset = XKPRANGE_CC_SHADOW_OFFSET;
+ break;
+ case XKPRANGE_UC_SEG:
+ offset = XKPRANGE_UC_SHADOW_OFFSET;
+ break;
+ case XKVRANGE_VC_SEG:
+ offset = XKVRANGE_VC_SHADOW_OFFSET;
+ break;
+ default:
+ WARN_ON(1);
+ return NULL;
+ }
+
+ return (void *)((maddr >> KASAN_SHADOW_SCALE_SHIFT) + offset);
+ }
+}
+
+static inline const void *kasan_shadow_to_mem(const void *shadow_addr)
+{
+ unsigned long addr = (unsigned long)shadow_addr;
+
+ if (unlikely(addr > KASAN_SHADOW_END) ||
+ unlikely(addr < KASAN_SHADOW_START)) {
+ WARN_ON(1);
+ return NULL;
+ }
+
+ if (addr >= XKVRANGE_VC_SHADOW_OFFSET)
+ return (void *)(((addr - XKVRANGE_VC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKVRANGE_VC_START);
+ else if (addr >= XKPRANGE_UC_SHADOW_OFFSET)
+ return (void *)(((addr - XKPRANGE_UC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKPRANGE_UC_START);
+ else if (addr >= XKPRANGE_CC_SHADOW_OFFSET)
+ return (void *)(((addr - XKPRANGE_CC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKPRANGE_CC_START);
+ else {
+ WARN_ON(1);
+ return NULL;
+ }
+}
+
+void kasan_init(void);
+asmlinkage void kasan_early_init(void);
+
+#endif
+#endif
diff --git a/arch/loongarch/include/asm/kfence.h b/arch/loongarch/include/asm/kfence.h
new file mode 100644
index 000000000000..6c82aea1c993
--- /dev/null
+++ b/arch/loongarch/include/asm/kfence.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * KFENCE support for LoongArch.
+ *
+ * Author: Enze Li <lienze@kylinos.cn>
+ * Copyright (C) 2022-2023 KylinSoft Corporation.
+ */
+
+#ifndef _ASM_LOONGARCH_KFENCE_H
+#define _ASM_LOONGARCH_KFENCE_H
+
+#include <linux/kfence.h>
+#include <asm/pgtable.h>
+#include <asm/tlb.h>
+
+static inline bool arch_kfence_init_pool(void)
+{
+ int err;
+ char *kfence_pool = __kfence_pool;
+ struct vm_struct *area;
+
+ area = __get_vm_area_caller(KFENCE_POOL_SIZE, VM_IOREMAP,
+ KFENCE_AREA_START, KFENCE_AREA_END,
+ __builtin_return_address(0));
+ if (!area)
+ return false;
+
+ __kfence_pool = (char *)area->addr;
+ err = ioremap_page_range((unsigned long)__kfence_pool,
+ (unsigned long)__kfence_pool + KFENCE_POOL_SIZE,
+ virt_to_phys((void *)kfence_pool), PAGE_KERNEL);
+ if (err) {
+ free_vm_area(area);
+ __kfence_pool = kfence_pool;
+ return false;
+ }
+
+ return true;
+}
+
+/* Protect the given page and flush TLB. */
+static inline bool kfence_protect_page(unsigned long addr, bool protect)
+{
+ pte_t *pte = virt_to_kpte(addr);
+
+ if (WARN_ON(!pte) || pte_none(*pte))
+ return false;
+
+ if (protect)
+ set_pte(pte, __pte(pte_val(*pte) & ~(_PAGE_VALID | _PAGE_PRESENT)));
+ else
+ set_pte(pte, __pte(pte_val(*pte) | (_PAGE_VALID | _PAGE_PRESENT)));
+
+ preempt_disable();
+ local_flush_tlb_one(addr);
+ preempt_enable();
+
+ return true;
+}
+
+#endif /* _ASM_LOONGARCH_KFENCE_H */
diff --git a/arch/loongarch/include/asm/kgdb.h b/arch/loongarch/include/asm/kgdb.h
new file mode 100644
index 000000000000..2041ae58b161
--- /dev/null
+++ b/arch/loongarch/include/asm/kgdb.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2023 Loongson Technology Corporation Limited
+ */
+
+#ifndef _ASM_LOONGARCH_KGDB_H
+#define _ASM_LOONGARCH_KGDB_H
+
+#define GDB_SIZEOF_REG sizeof(u64)
+
+/* gdb remote procotol expects the following register layout. */
+
+/*
+ * General purpose registers:
+ * r0-r31: 64 bit
+ * orig_a0: 64 bit
+ * pc : 64 bit
+ * csr_badvaddr: 64 bit
+ */
+#define DBG_PT_REGS_BASE 0
+#define DBG_PT_REGS_NUM 35
+#define DBG_PT_REGS_END (DBG_PT_REGS_BASE + DBG_PT_REGS_NUM - 1)
+
+/*
+ * Floating point registers:
+ * f0-f31: 64 bit
+ */
+#define DBG_FPR_BASE (DBG_PT_REGS_END + 1)
+#define DBG_FPR_NUM 32
+#define DBG_FPR_END (DBG_FPR_BASE + DBG_FPR_NUM - 1)
+
+/*
+ * Condition Flag registers:
+ * fcc0-fcc8: 8 bit
+ */
+#define DBG_FCC_BASE (DBG_FPR_END + 1)
+#define DBG_FCC_NUM 8
+#define DBG_FCC_END (DBG_FCC_BASE + DBG_FCC_NUM - 1)
+
+/*
+ * Floating-point Control and Status registers:
+ * fcsr: 32 bit
+ */
+#define DBG_FCSR_NUM 1
+#define DBG_FCSR (DBG_FCC_END + 1)
+
+#define DBG_MAX_REG_NUM (DBG_FCSR + 1)
+
+/*
+ * Size of I/O buffer for gdb packet.
+ * considering to hold all register contents, size is set
+ */
+#define BUFMAX 2048
+
+/*
+ * Number of bytes required for gdb_regs buffer.
+ * PT_REGS and FPR: 8 bytes; FCSR: 4 bytes; FCC: 1 bytes.
+ * GDB fails to connect for size beyond this with error
+ * "'g' packet reply is too long"
+ */
+#define NUMREGBYTES ((DBG_PT_REGS_NUM + DBG_FPR_NUM) * GDB_SIZEOF_REG + DBG_FCC_NUM * 1 + DBG_FCSR_NUM * 4)
+
+#define BREAK_INSTR_SIZE 4
+#define CACHE_FLUSH_IS_SAFE 0
+
+/* Register numbers of various important registers. */
+enum dbg_loongarch_regnum {
+ DBG_LOONGARCH_ZERO = 0,
+ DBG_LOONGARCH_RA,
+ DBG_LOONGARCH_TP,
+ DBG_LOONGARCH_SP,
+ DBG_LOONGARCH_A0,
+ DBG_LOONGARCH_FP = 22,
+ DBG_LOONGARCH_S0,
+ DBG_LOONGARCH_S1,
+ DBG_LOONGARCH_S2,
+ DBG_LOONGARCH_S3,
+ DBG_LOONGARCH_S4,
+ DBG_LOONGARCH_S5,
+ DBG_LOONGARCH_S6,
+ DBG_LOONGARCH_S7,
+ DBG_LOONGARCH_S8,
+ DBG_LOONGARCH_ORIG_A0,
+ DBG_LOONGARCH_PC,
+ DBG_LOONGARCH_BADV
+};
+
+void kgdb_breakinst(void);
+void arch_kgdb_breakpoint(void);
+
+#ifdef CONFIG_KGDB
+bool kgdb_breakpoint_handler(struct pt_regs *regs);
+#else /* !CONFIG_KGDB */
+static inline bool kgdb_breakpoint_handler(struct pt_regs *regs) { return false; }
+#endif /* CONFIG_KGDB */
+
+#endif /* __ASM_KGDB_H_ */
diff --git a/arch/loongarch/include/asm/lbt.h b/arch/loongarch/include/asm/lbt.h
new file mode 100644
index 000000000000..e671978bf552
--- /dev/null
+++ b/arch/loongarch/include/asm/lbt.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Author: Qi Hu <huqi@loongson.cn>
+ * Huacai Chen <chenhuacai@loongson.cn>
+ * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_LBT_H
+#define _ASM_LBT_H
+
+#include <asm/cpu.h>
+#include <asm/current.h>
+#include <asm/loongarch.h>
+#include <asm/processor.h>
+
+extern void _init_lbt(void);
+extern void _save_lbt(struct loongarch_lbt *);
+extern void _restore_lbt(struct loongarch_lbt *);
+
+static inline int is_lbt_enabled(void)
+{
+ if (!cpu_has_lbt)
+ return 0;
+
+ return (csr_read32(LOONGARCH_CSR_EUEN) & CSR_EUEN_LBTEN) ?
+ 1 : 0;
+}
+
+static inline int is_lbt_owner(void)
+{
+ return test_thread_flag(TIF_USEDLBT);
+}
+
+#ifdef CONFIG_CPU_HAS_LBT
+
+static inline void enable_lbt(void)
+{
+ if (cpu_has_lbt)
+ csr_xchg32(CSR_EUEN_LBTEN, CSR_EUEN_LBTEN, LOONGARCH_CSR_EUEN);
+}
+
+static inline void disable_lbt(void)
+{
+ if (cpu_has_lbt)
+ csr_xchg32(0, CSR_EUEN_LBTEN, LOONGARCH_CSR_EUEN);
+}
+
+static inline void __own_lbt(void)
+{
+ enable_lbt();
+ set_thread_flag(TIF_USEDLBT);
+ KSTK_EUEN(current) |= CSR_EUEN_LBTEN;
+}
+
+static inline void own_lbt_inatomic(int restore)
+{
+ if (cpu_has_lbt && !is_lbt_owner()) {
+ __own_lbt();
+ if (restore)
+ _restore_lbt(&current->thread.lbt);
+ }
+}
+
+static inline void own_lbt(int restore)
+{
+ preempt_disable();
+ own_lbt_inatomic(restore);
+ preempt_enable();
+}
+
+static inline void lose_lbt_inatomic(int save, struct task_struct *tsk)
+{
+ if (cpu_has_lbt && is_lbt_owner()) {
+ if (save)
+ _save_lbt(&tsk->thread.lbt);
+
+ disable_lbt();
+ clear_tsk_thread_flag(tsk, TIF_USEDLBT);
+ }
+ KSTK_EUEN(tsk) &= ~(CSR_EUEN_LBTEN);
+}
+
+static inline void lose_lbt(int save)
+{
+ preempt_disable();
+ lose_lbt_inatomic(save, current);
+ preempt_enable();
+}
+
+static inline void init_lbt(void)
+{
+ __own_lbt();
+ _init_lbt();
+}
+#else
+static inline void own_lbt_inatomic(int restore) {}
+static inline void lose_lbt_inatomic(int save, struct task_struct *tsk) {}
+static inline void init_lbt(void) {}
+static inline void lose_lbt(int save) {}
+#endif
+
+static inline int thread_lbt_context_live(void)
+{
+ if (!cpu_has_lbt)
+ return 0;
+
+ return test_thread_flag(TIF_LBT_CTX_LIVE);
+}
+
+#endif /* _ASM_LBT_H */
diff --git a/arch/loongarch/include/asm/loongarch.h b/arch/loongarch/include/asm/loongarch.h
index 10748a20a2ab..33531d432b49 100644
--- a/arch/loongarch/include/asm/loongarch.h
+++ b/arch/loongarch/include/asm/loongarch.h
@@ -12,49 +12,6 @@
#ifndef __ASSEMBLY__
#include <larchintrin.h>
-/*
- * parse_r var, r - Helper assembler macro for parsing register names.
- *
- * This converts the register name in $n form provided in \r to the
- * corresponding register number, which is assigned to the variable \var. It is
- * needed to allow explicit encoding of instructions in inline assembly where
- * registers are chosen by the compiler in $n form, allowing us to avoid using
- * fixed register numbers.
- *
- * It also allows newer instructions (not implemented by the assembler) to be
- * transparently implemented using assembler macros, instead of needing separate
- * cases depending on toolchain support.
- *
- * Simple usage example:
- * __asm__ __volatile__("parse_r addr, %0\n\t"
- * "#invtlb op, 0, %0\n\t"
- * ".word ((0x6498000) | (addr << 10) | (0 << 5) | op)"
- * : "=r" (status);
- */
-
-/* Match an individual register number and assign to \var */
-#define _IFC_REG(n) \
- ".ifc \\r, $r" #n "\n\t" \
- "\\var = " #n "\n\t" \
- ".endif\n\t"
-
-__asm__(".macro parse_r var r\n\t"
- "\\var = -1\n\t"
- _IFC_REG(0) _IFC_REG(1) _IFC_REG(2) _IFC_REG(3)
- _IFC_REG(4) _IFC_REG(5) _IFC_REG(6) _IFC_REG(7)
- _IFC_REG(8) _IFC_REG(9) _IFC_REG(10) _IFC_REG(11)
- _IFC_REG(12) _IFC_REG(13) _IFC_REG(14) _IFC_REG(15)
- _IFC_REG(16) _IFC_REG(17) _IFC_REG(18) _IFC_REG(19)
- _IFC_REG(20) _IFC_REG(21) _IFC_REG(22) _IFC_REG(23)
- _IFC_REG(24) _IFC_REG(25) _IFC_REG(26) _IFC_REG(27)
- _IFC_REG(28) _IFC_REG(29) _IFC_REG(30) _IFC_REG(31)
- ".iflt \\var\n\t"
- ".error \"Unable to parse register name \\r\"\n\t"
- ".endif\n\t"
- ".endm");
-
-#undef _IFC_REG
-
/* CPUCFG */
#define read_cpucfg(reg) __cpucfg(reg)
@@ -1453,6 +1410,10 @@ __BUILD_CSR_OP(tlbidx)
#define FPU_CSR_RU 0x200 /* towards +Infinity */
#define FPU_CSR_RD 0x300 /* towards -Infinity */
+/* Bit 6 of FPU Status Register specify the LBT TOP simulation mode */
+#define FPU_CSR_TM_SHIFT 0x6
+#define FPU_CSR_TM (_ULCAST_(1) << FPU_CSR_TM_SHIFT)
+
#define read_fcsr(source) \
({ \
unsigned int __res; \
diff --git a/arch/loongarch/include/asm/mmzone.h b/arch/loongarch/include/asm/mmzone.h
index fe67d0b4b33d..2b9a90727e19 100644
--- a/arch/loongarch/include/asm/mmzone.h
+++ b/arch/loongarch/include/asm/mmzone.h
@@ -13,6 +13,4 @@ extern struct pglist_data *node_data[];
#define NODE_DATA(nid) (node_data[(nid)])
-extern void setup_zero_pages(void);
-
#endif /* _ASM_MMZONE_H_ */
diff --git a/arch/loongarch/include/asm/page.h b/arch/loongarch/include/asm/page.h
index 26e8dccb6619..63f137ce82a4 100644
--- a/arch/loongarch/include/asm/page.h
+++ b/arch/loongarch/include/asm/page.h
@@ -84,7 +84,12 @@ typedef struct { unsigned long pgprot; } pgprot_t;
#define sym_to_pfn(x) __phys_to_pfn(__pa_symbol(x))
#define virt_to_pfn(kaddr) PFN_DOWN(PHYSADDR(kaddr))
-#define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr))
+
+#define virt_to_page(kaddr) \
+({ \
+ (likely((unsigned long)kaddr < vm_map_base)) ? \
+ dmw_virt_to_page((unsigned long)kaddr) : tlb_virt_to_page((unsigned long)kaddr);\
+})
extern int __virt_addr_valid(volatile void *kaddr);
#define virt_addr_valid(kaddr) __virt_addr_valid((volatile void *)(kaddr))
diff --git a/arch/loongarch/include/asm/pgalloc.h b/arch/loongarch/include/asm/pgalloc.h
index 23f5b1107246..79470f0b4f1d 100644
--- a/arch/loongarch/include/asm/pgalloc.h
+++ b/arch/loongarch/include/asm/pgalloc.h
@@ -94,4 +94,5 @@ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
#endif /* __PAGETABLE_PUD_FOLDED */
+extern pte_t * __init populate_kernel_pte(unsigned long addr);
#endif /* _ASM_PGALLOC_H */
diff --git a/arch/loongarch/include/asm/pgtable.h b/arch/loongarch/include/asm/pgtable.h
index 06963a172319..29d9b12298bc 100644
--- a/arch/loongarch/include/asm/pgtable.h
+++ b/arch/loongarch/include/asm/pgtable.h
@@ -70,12 +70,9 @@ struct vm_area_struct;
* for zero-mapped memory areas etc..
*/
-extern unsigned long empty_zero_page;
-extern unsigned long zero_page_mask;
+extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
-#define ZERO_PAGE(vaddr) \
- (virt_to_page((void *)(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask))))
-#define __HAVE_COLOR_ZERO_PAGE
+#define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page)
/*
* TLB refill handlers may also map the vmalloc area into xkvrange.
@@ -85,14 +82,30 @@ extern unsigned long zero_page_mask;
#define MODULES_VADDR (vm_map_base + PCI_IOSIZE + (2 * PAGE_SIZE))
#define MODULES_END (MODULES_VADDR + SZ_256M)
+#ifdef CONFIG_KFENCE
+#define KFENCE_AREA_SIZE (((CONFIG_KFENCE_NUM_OBJECTS + 1) * 2 + 2) * PAGE_SIZE)
+#else
+#define KFENCE_AREA_SIZE 0
+#endif
+
#define VMALLOC_START MODULES_END
+
+#ifndef CONFIG_KASAN
#define VMALLOC_END \
(vm_map_base + \
- min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, (1UL << cpu_vabits)) - PMD_SIZE - VMEMMAP_SIZE)
+ min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, (1UL << cpu_vabits)) - PMD_SIZE - VMEMMAP_SIZE - KFENCE_AREA_SIZE)
+#else
+#define VMALLOC_END \
+ (vm_map_base + \
+ min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, (1UL << cpu_vabits) / 2) - PMD_SIZE - VMEMMAP_SIZE - KFENCE_AREA_SIZE)
+#endif
#define vmemmap ((struct page *)((VMALLOC_END + PMD_SIZE) & PMD_MASK))
#define VMEMMAP_END ((unsigned long)vmemmap + VMEMMAP_SIZE - 1)
+#define KFENCE_AREA_START (VMEMMAP_END + 1)
+#define KFENCE_AREA_END (KFENCE_AREA_START + KFENCE_AREA_SIZE - 1)
+
#define pte_ERROR(e) \
pr_err("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
#ifndef __PAGETABLE_PMD_FOLDED
@@ -350,6 +363,9 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *pt
extern pgd_t swapper_pg_dir[];
extern pgd_t invalid_pg_dir[];
+struct page *dmw_virt_to_page(unsigned long kaddr);
+struct page *tlb_virt_to_page(unsigned long kaddr);
+
/*
* The following only work if pte_present() is true.
* Undefined behaviour if not..
@@ -596,6 +612,9 @@ static inline long pmd_protnone(pmd_t pmd)
}
#endif /* CONFIG_NUMA_BALANCING */
+#define pmd_leaf(pmd) ((pmd_val(pmd) & _PAGE_HUGE) != 0)
+#define pud_leaf(pud) ((pud_val(pud) & _PAGE_HUGE) != 0)
+
/*
* We provide our own get_unmapped area to cope with the virtual aliasing
* constraints placed on us by the cache architecture.
diff --git a/arch/loongarch/include/asm/processor.h b/arch/loongarch/include/asm/processor.h
index 636e1c66398c..c3bc44b5f5b3 100644
--- a/arch/loongarch/include/asm/processor.h
+++ b/arch/loongarch/include/asm/processor.h
@@ -80,11 +80,22 @@ BUILD_FPR_ACCESS(32)
BUILD_FPR_ACCESS(64)
struct loongarch_fpu {
- unsigned int fcsr;
uint64_t fcc; /* 8x8 */
+ uint32_t fcsr;
+ uint32_t ftop;
union fpureg fpr[NUM_FPU_REGS];
};
+struct loongarch_lbt {
+ /* Scratch registers */
+ unsigned long scr0;
+ unsigned long scr1;
+ unsigned long scr2;
+ unsigned long scr3;
+ /* Eflags register */
+ unsigned long eflags;
+};
+
#define INIT_CPUMASK { \
{0,} \
}
@@ -113,15 +124,6 @@ struct thread_struct {
unsigned long csr_ecfg;
unsigned long csr_badvaddr; /* Last user fault */
- /* Scratch registers */
- unsigned long scr0;
- unsigned long scr1;
- unsigned long scr2;
- unsigned long scr3;
-
- /* Eflags register */
- unsigned long eflags;
-
/* Other stuff associated with the thread. */
unsigned long trap_nr;
unsigned long error_code;
@@ -133,6 +135,7 @@ struct thread_struct {
* context because they are conditionally copied at fork().
*/
struct loongarch_fpu fpu FPU_ALIGN;
+ struct loongarch_lbt lbt; /* Also conditionally copied */
/* Hardware breakpoints pinned to this task. */
struct perf_event *hbp_break[LOONGARCH_MAX_BRP];
@@ -174,8 +177,9 @@ struct thread_struct {
* FPU & vector registers \
*/ \
.fpu = { \
- .fcsr = 0, \
.fcc = 0, \
+ .fcsr = 0, \
+ .ftop = 0, \
.fpr = {{{0,},},}, \
}, \
.hbp_break = {0}, \
diff --git a/arch/loongarch/include/asm/setup.h b/arch/loongarch/include/asm/setup.h
index be05c0e706a2..a0bc159ce8bd 100644
--- a/arch/loongarch/include/asm/setup.h
+++ b/arch/loongarch/include/asm/setup.h
@@ -7,6 +7,7 @@
#define _LOONGARCH_SETUP_H
#include <linux/types.h>
+#include <asm/sections.h>
#include <uapi/asm/setup.h>
#define VECSIZE 0x200
@@ -33,8 +34,13 @@ extern long __la_abs_end;
extern long __rela_dyn_begin;
extern long __rela_dyn_end;
-extern void * __init relocate_kernel(void);
+extern unsigned long __init relocate_kernel(void);
#endif
+static inline unsigned long kaslr_offset(void)
+{
+ return (unsigned long)&_text - VMLINUX_LOAD_ADDRESS;
+}
+
#endif /* __SETUP_H */
diff --git a/arch/loongarch/include/asm/stackframe.h b/arch/loongarch/include/asm/stackframe.h
index 7df80e6ae9d2..4fb1e6408b98 100644
--- a/arch/loongarch/include/asm/stackframe.h
+++ b/arch/loongarch/include/asm/stackframe.h
@@ -158,6 +158,10 @@
cfi_st u0, PT_R21, \docfi
csrrd u0, PERCPU_BASE_KS
9:
+#ifdef CONFIG_KGDB
+ li.w t0, CSR_CRMD_WE
+ csrxchg t0, t0, LOONGARCH_CSR_CRMD
+#endif
.endm
.macro SAVE_ALL docfi=0
diff --git a/arch/loongarch/include/asm/string.h b/arch/loongarch/include/asm/string.h
index 7b29cc9c70aa..5bb5a90d2681 100644
--- a/arch/loongarch/include/asm/string.h
+++ b/arch/loongarch/include/asm/string.h
@@ -7,11 +7,31 @@
#define __HAVE_ARCH_MEMSET
extern void *memset(void *__s, int __c, size_t __count);
+extern void *__memset(void *__s, int __c, size_t __count);
#define __HAVE_ARCH_MEMCPY
extern void *memcpy(void *__to, __const__ void *__from, size_t __n);
+extern void *__memcpy(void *__to, __const__ void *__from, size_t __n);
#define __HAVE_ARCH_MEMMOVE
extern void *memmove(void *__dest, __const__ void *__src, size_t __n);
+extern void *__memmove(void *__dest, __const__ void *__src, size_t __n);
+
+#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
+
+/*
+ * For files that are not instrumented (e.g. mm/slub.c) we
+ * should use not instrumented version of mem* functions.
+ */
+
+#define memset(s, c, n) __memset(s, c, n)
+#define memcpy(dst, src, len) __memcpy(dst, src, len)
+#define memmove(dst, src, len) __memmove(dst, src, len)
+
+#ifndef __NO_FORTIFY
+#define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */
+#endif
+
+#endif
#endif /* _ASM_STRING_H */
diff --git a/arch/loongarch/include/asm/switch_to.h b/arch/loongarch/include/asm/switch_to.h
index 24e3094bebab..5b225aff3ba2 100644
--- a/arch/loongarch/include/asm/switch_to.h
+++ b/arch/loongarch/include/asm/switch_to.h
@@ -7,6 +7,7 @@
#include <asm/cpu-features.h>
#include <asm/fpu.h>
+#include <asm/lbt.h>
struct task_struct;
@@ -34,6 +35,7 @@ extern asmlinkage struct task_struct *__switch_to(struct task_struct *prev,
#define switch_to(prev, next, last) \
do { \
lose_fpu_inatomic(1, prev); \
+ lose_lbt_inatomic(1, prev); \
hw_breakpoint_thread_switch(next); \
(last) = __switch_to(prev, next, task_thread_info(next), \
__builtin_return_address(0), __builtin_frame_address(0)); \
diff --git a/arch/loongarch/include/asm/thread_info.h b/arch/loongarch/include/asm/thread_info.h
index 1a3354ca056e..8cb653d49a54 100644
--- a/arch/loongarch/include/asm/thread_info.h
+++ b/arch/loongarch/include/asm/thread_info.h
@@ -84,6 +84,8 @@ register unsigned long current_stack_pointer __asm__("$sp");
#define TIF_SINGLESTEP 16 /* Single Step */
#define TIF_LSX_CTX_LIVE 17 /* LSX context must be preserved */
#define TIF_LASX_CTX_LIVE 18 /* LASX context must be preserved */
+#define TIF_USEDLBT 19 /* LBT was used by this task this quantum (SMP) */
+#define TIF_LBT_CTX_LIVE 20 /* LBT context must be preserved */
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
@@ -101,6 +103,8 @@ register unsigned long current_stack_pointer __asm__("$sp");
#define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP)
#define _TIF_LSX_CTX_LIVE (1<<TIF_LSX_CTX_LIVE)
#define _TIF_LASX_CTX_LIVE (1<<TIF_LASX_CTX_LIVE)
+#define _TIF_USEDLBT (1<<TIF_USEDLBT)
+#define _TIF_LBT_CTX_LIVE (1<<TIF_LBT_CTX_LIVE)
#endif /* __KERNEL__ */
#endif /* _ASM_THREAD_INFO_H */
diff --git a/arch/loongarch/include/asm/xor.h b/arch/loongarch/include/asm/xor.h
new file mode 100644
index 000000000000..12467fffee46
--- /dev/null
+++ b/arch/loongarch/include/asm/xor.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2023 WANG Xuerui <git@xen0n.name>
+ */
+#ifndef _ASM_LOONGARCH_XOR_H
+#define _ASM_LOONGARCH_XOR_H
+
+#include <asm/cpu-features.h>
+#include <asm/xor_simd.h>
+
+#ifdef CONFIG_CPU_HAS_LSX
+static struct xor_block_template xor_block_lsx = {
+ .name = "lsx",
+ .do_2 = xor_lsx_2,
+ .do_3 = xor_lsx_3,
+ .do_4 = xor_lsx_4,
+ .do_5 = xor_lsx_5,
+};
+
+#define XOR_SPEED_LSX() \
+ do { \
+ if (cpu_has_lsx) \
+ xor_speed(&xor_block_lsx); \
+ } while (0)
+#else /* CONFIG_CPU_HAS_LSX */
+#define XOR_SPEED_LSX()
+#endif /* CONFIG_CPU_HAS_LSX */
+
+#ifdef CONFIG_CPU_HAS_LASX
+static struct xor_block_template xor_block_lasx = {
+ .name = "lasx",
+ .do_2 = xor_lasx_2,
+ .do_3 = xor_lasx_3,
+ .do_4 = xor_lasx_4,
+ .do_5 = xor_lasx_5,
+};
+
+#define XOR_SPEED_LASX() \
+ do { \
+ if (cpu_has_lasx) \
+ xor_speed(&xor_block_lasx); \
+ } while (0)
+#else /* CONFIG_CPU_HAS_LASX */
+#define XOR_SPEED_LASX()
+#endif /* CONFIG_CPU_HAS_LASX */
+
+/*
+ * For grins, also test the generic routines.
+ *
+ * More importantly: it cannot be ruled out at this point of time, that some
+ * future (maybe reduced) models could run the vector algorithms slower than
+ * the scalar ones, maybe for errata or micro-op reasons. It may be
+ * appropriate to revisit this after one or two more uarch generations.
+ */
+#include <asm-generic/xor.h>
+
+#undef XOR_TRY_TEMPLATES
+#define XOR_TRY_TEMPLATES \
+do { \
+ xor_speed(&xor_block_8regs); \
+ xor_speed(&xor_block_8regs_p); \
+ xor_speed(&xor_block_32regs); \
+ xor_speed(&xor_block_32regs_p); \
+ XOR_SPEED_LSX(); \
+ XOR_SPEED_LASX(); \
+} while (0)
+
+#endif /* _ASM_LOONGARCH_XOR_H */
diff --git a/arch/loongarch/include/asm/xor_simd.h b/arch/loongarch/include/asm/xor_simd.h
new file mode 100644
index 000000000000..471b96332f38
--- /dev/null
+++ b/arch/loongarch/include/asm/xor_simd.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2023 WANG Xuerui <git@xen0n.name>
+ */
+#ifndef _ASM_LOONGARCH_XOR_SIMD_H
+#define _ASM_LOONGARCH_XOR_SIMD_H
+
+#ifdef CONFIG_CPU_HAS_LSX
+void xor_lsx_2(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2);
+void xor_lsx_3(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2, const unsigned long * __restrict p3);
+void xor_lsx_4(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2, const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4);
+void xor_lsx_5(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2, const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4, const unsigned long * __restrict p5);
+#endif /* CONFIG_CPU_HAS_LSX */
+
+#ifdef CONFIG_CPU_HAS_LASX
+void xor_lasx_2(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2);
+void xor_lasx_3(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2, const unsigned long * __restrict p3);
+void xor_lasx_4(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2, const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4);
+void xor_lasx_5(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2, const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4, const unsigned long * __restrict p5);
+#endif /* CONFIG_CPU_HAS_LASX */
+
+#endif /* _ASM_LOONGARCH_XOR_SIMD_H */
diff --git a/arch/loongarch/include/uapi/asm/ptrace.h b/arch/loongarch/include/uapi/asm/ptrace.h
index 06e3be52cb04..ac915f841650 100644
--- a/arch/loongarch/include/uapi/asm/ptrace.h
+++ b/arch/loongarch/include/uapi/asm/ptrace.h
@@ -56,6 +56,12 @@ struct user_lasx_state {
uint64_t vregs[32*4];
};
+struct user_lbt_state {
+ uint64_t scr[4];
+ uint32_t eflags;
+ uint32_t ftop;
+};
+
struct user_watch_state {
uint64_t dbg_info;
struct {
diff --git a/arch/loongarch/include/uapi/asm/sigcontext.h b/arch/loongarch/include/uapi/asm/sigcontext.h
index 4cd7d16f7037..6c22f616b8f1 100644
--- a/arch/loongarch/include/uapi/asm/sigcontext.h
+++ b/arch/loongarch/include/uapi/asm/sigcontext.h
@@ -59,4 +59,14 @@ struct lasx_context {
__u32 fcsr;
};
+/* LBT context */
+#define LBT_CTX_MAGIC 0x42540001
+#define LBT_CTX_ALIGN 8
+struct lbt_context {
+ __u64 regs[4];
+ __u32 eflags;
+ __u32 ftop;
+};
+
+
#endif /* _UAPI_ASM_SIGCONTEXT_H */
diff --git a/arch/loongarch/kernel/Makefile b/arch/loongarch/kernel/Makefile
index 8e279f04f9e7..c56ea0b75448 100644
--- a/arch/loongarch/kernel/Makefile
+++ b/arch/loongarch/kernel/Makefile
@@ -15,6 +15,8 @@ obj-$(CONFIG_EFI) += efi.o
obj-$(CONFIG_CPU_HAS_FPU) += fpu.o kfpu.o
+obj-$(CONFIG_CPU_HAS_LBT) += lbt.o
+
obj-$(CONFIG_ARCH_STRICT_ALIGN) += unaligned.o
ifdef CONFIG_FUNCTION_TRACER
@@ -32,6 +34,12 @@ ifdef CONFIG_FUNCTION_TRACER
CFLAGS_REMOVE_rethook_trampoline.o = $(CC_FLAGS_FTRACE)
endif
+KASAN_SANITIZE_efi.o := n
+KASAN_SANITIZE_cpu-probe.o := n
+KASAN_SANITIZE_traps.o := n
+KASAN_SANITIZE_smp.o := n
+KASAN_SANITIZE_vdso.o := n
+
obj-$(CONFIG_MODULES) += module.o module-sections.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
@@ -54,6 +62,7 @@ obj-$(CONFIG_UNWINDER_PROLOGUE) += unwind_prologue.o
obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_regs.o
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
+obj-$(CONFIG_KGDB) += kgdb.o
obj-$(CONFIG_KPROBES) += kprobes.o
obj-$(CONFIG_RETHOOK) += rethook.o rethook_trampoline.o
obj-$(CONFIG_UPROBES) += uprobes.o
diff --git a/arch/loongarch/kernel/asm-offsets.c b/arch/loongarch/kernel/asm-offsets.c
index 505e4bf59603..8da0726777ed 100644
--- a/arch/loongarch/kernel/asm-offsets.c
+++ b/arch/loongarch/kernel/asm-offsets.c
@@ -118,13 +118,6 @@ void output_thread_defines(void)
OFFSET(THREAD_CSRECFG, task_struct,
thread.csr_ecfg);
- OFFSET(THREAD_SCR0, task_struct, thread.scr0);
- OFFSET(THREAD_SCR1, task_struct, thread.scr1);
- OFFSET(THREAD_SCR2, task_struct, thread.scr2);
- OFFSET(THREAD_SCR3, task_struct, thread.scr3);
-
- OFFSET(THREAD_EFLAGS, task_struct, thread.eflags);
-
OFFSET(THREAD_FPU, task_struct, thread.fpu);
OFFSET(THREAD_BVADDR, task_struct, \
@@ -172,6 +165,17 @@ void output_thread_fpu_defines(void)
OFFSET(THREAD_FCSR, loongarch_fpu, fcsr);
OFFSET(THREAD_FCC, loongarch_fpu, fcc);
+ OFFSET(THREAD_FTOP, loongarch_fpu, ftop);
+ BLANK();
+}
+
+void output_thread_lbt_defines(void)
+{
+ OFFSET(THREAD_SCR0, loongarch_lbt, scr0);
+ OFFSET(THREAD_SCR1, loongarch_lbt, scr1);
+ OFFSET(THREAD_SCR2, loongarch_lbt, scr2);
+ OFFSET(THREAD_SCR3, loongarch_lbt, scr3);
+ OFFSET(THREAD_EFLAGS, loongarch_lbt, eflags);
BLANK();
}
diff --git a/arch/loongarch/kernel/cpu-probe.c b/arch/loongarch/kernel/cpu-probe.c
index e925579c7a71..55320813ee08 100644
--- a/arch/loongarch/kernel/cpu-probe.c
+++ b/arch/loongarch/kernel/cpu-probe.c
@@ -144,6 +144,20 @@ static void cpu_probe_common(struct cpuinfo_loongarch *c)
c->options |= LOONGARCH_CPU_LVZ;
elf_hwcap |= HWCAP_LOONGARCH_LVZ;
}
+#ifdef CONFIG_CPU_HAS_LBT
+ if (config & CPUCFG2_X86BT) {
+ c->options |= LOONGARCH_CPU_LBT_X86;
+ elf_hwcap |= HWCAP_LOONGARCH_LBT_X86;
+ }
+ if (config & CPUCFG2_ARMBT) {
+ c->options |= LOONGARCH_CPU_LBT_ARM;
+ elf_hwcap |= HWCAP_LOONGARCH_LBT_ARM;
+ }
+ if (config & CPUCFG2_MIPSBT) {
+ c->options |= LOONGARCH_CPU_LBT_MIPS;
+ elf_hwcap |= HWCAP_LOONGARCH_LBT_MIPS;
+ }
+#endif
config = read_cpucfg(LOONGARCH_CPUCFG6);
if (config & CPUCFG6_PMP)
diff --git a/arch/loongarch/kernel/entry.S b/arch/loongarch/kernel/entry.S
index d737e3cf42d3..65518bb8f472 100644
--- a/arch/loongarch/kernel/entry.S
+++ b/arch/loongarch/kernel/entry.S
@@ -58,6 +58,11 @@ SYM_FUNC_START(handle_syscall)
SAVE_STATIC
+#ifdef CONFIG_KGDB
+ li.w t1, CSR_CRMD_WE
+ csrxchg t1, t1, LOONGARCH_CSR_CRMD
+#endif
+
move u0, t0
li.d tp, ~_THREAD_MASK
and tp, tp, sp
diff --git a/arch/loongarch/kernel/fpu.S b/arch/loongarch/kernel/fpu.S
index 501094a09f5d..d53ab10f4644 100644
--- a/arch/loongarch/kernel/fpu.S
+++ b/arch/loongarch/kernel/fpu.S
@@ -22,7 +22,7 @@
.macro EX insn, reg, src, offs
.ex\@: \insn \reg, \src, \offs
- _asm_extable .ex\@, fault
+ _asm_extable .ex\@, .L_fpu_fault
.endm
.macro sc_save_fp base
@@ -138,6 +138,13 @@
.macro sc_save_fcsr base, tmp0
movfcsr2gr \tmp0, fcsr0
EX st.w \tmp0, \base, 0
+#if defined(CONFIG_CPU_HAS_LBT)
+ /* TM bit is always 0 if LBT not supported */
+ andi \tmp0, \tmp0, FPU_CSR_TM
+ beqz \tmp0, 1f
+ x86clrtm
+1:
+#endif
.endm
.macro sc_restore_fcsr base, tmp0
@@ -309,7 +316,7 @@ EXPORT_SYMBOL(_save_fp)
*/
SYM_FUNC_START(_restore_fp)
fpu_restore_double a0 t1 # clobbers t1
- fpu_restore_csr a0 t1
+ fpu_restore_csr a0 t1 t2
fpu_restore_cc a0 t1 t2 # clobbers t1, t2
jr ra
SYM_FUNC_END(_restore_fp)
@@ -514,7 +521,6 @@ SYM_FUNC_START(_restore_lasx_context)
jr ra
SYM_FUNC_END(_restore_lasx_context)
-SYM_FUNC_START(fault)
+.L_fpu_fault:
li.w a0, -EFAULT # failure
jr ra
-SYM_FUNC_END(fault)
diff --git a/arch/loongarch/kernel/head.S b/arch/loongarch/kernel/head.S
index 5e828a8bc0a0..53b883db0786 100644
--- a/arch/loongarch/kernel/head.S
+++ b/arch/loongarch/kernel/head.S
@@ -95,12 +95,17 @@ SYM_CODE_START(kernel_entry) # kernel entry point
PTR_LI sp, (_THREAD_SIZE - PT_SIZE)
PTR_ADD sp, sp, tp
set_saved_sp sp, t0, t1
-#endif
- /* relocate_kernel() returns the new kernel entry point */
- jr a0
- ASM_BUG()
+ /* Jump to the new kernel: new_pc = current_pc + random_offset */
+ pcaddi t0, 0
+ add.d t0, t0, a0
+ jirl zero, t0, 0xc
+#endif /* CONFIG_RANDOMIZE_BASE */
+
+#endif /* CONFIG_RELOCATABLE */
+#ifdef CONFIG_KASAN
+ bl kasan_early_init
#endif
bl start_kernel
diff --git a/arch/loongarch/kernel/kfpu.c b/arch/loongarch/kernel/kfpu.c
index 5c46ae8c6cac..ec5b28e570c9 100644
--- a/arch/loongarch/kernel/kfpu.c
+++ b/arch/loongarch/kernel/kfpu.c
@@ -8,19 +8,40 @@
#include <asm/fpu.h>
#include <asm/smp.h>
+static unsigned int euen_mask = CSR_EUEN_FPEN;
+
+/*
+ * The critical section between kernel_fpu_begin() and kernel_fpu_end()
+ * is non-reentrant. It is the caller's responsibility to avoid reentrance.
+ * See drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c as an example.
+ */
static DEFINE_PER_CPU(bool, in_kernel_fpu);
+static DEFINE_PER_CPU(unsigned int, euen_current);
void kernel_fpu_begin(void)
{
+ unsigned int *euen_curr;
+
preempt_disable();
WARN_ON(this_cpu_read(in_kernel_fpu));
this_cpu_write(in_kernel_fpu, true);
+ euen_curr = this_cpu_ptr(&euen_current);
- if (!is_fpu_owner())
- enable_fpu();
+ *euen_curr = csr_xchg32(euen_mask, euen_mask, LOONGARCH_CSR_EUEN);
+
+#ifdef CONFIG_CPU_HAS_LASX
+ if (*euen_curr & CSR_EUEN_LASXEN)
+ _save_lasx(&current->thread.fpu);
+ else
+#endif
+#ifdef CONFIG_CPU_HAS_LSX
+ if (*euen_curr & CSR_EUEN_LSXEN)
+ _save_lsx(&current->thread.fpu);
else
+#endif
+ if (*euen_curr & CSR_EUEN_FPEN)
_save_fp(&current->thread.fpu);
write_fcsr(LOONGARCH_FCSR0, 0);
@@ -29,15 +50,41 @@ EXPORT_SYMBOL_GPL(kernel_fpu_begin);
void kernel_fpu_end(void)
{
+ unsigned int *euen_curr;
+
WARN_ON(!this_cpu_read(in_kernel_fpu));
- if (!is_fpu_owner())
- disable_fpu();
+ euen_curr = this_cpu_ptr(&euen_current);
+
+#ifdef CONFIG_CPU_HAS_LASX
+ if (*euen_curr & CSR_EUEN_LASXEN)
+ _restore_lasx(&current->thread.fpu);
else
+#endif
+#ifdef CONFIG_CPU_HAS_LSX
+ if (*euen_curr & CSR_EUEN_LSXEN)
+ _restore_lsx(&current->thread.fpu);
+ else
+#endif
+ if (*euen_curr & CSR_EUEN_FPEN)
_restore_fp(&current->thread.fpu);
+ *euen_curr = csr_xchg32(*euen_curr, euen_mask, LOONGARCH_CSR_EUEN);
+
this_cpu_write(in_kernel_fpu, false);
preempt_enable();
}
EXPORT_SYMBOL_GPL(kernel_fpu_end);
+
+static int __init init_euen_mask(void)
+{
+ if (cpu_has_lsx)
+ euen_mask |= CSR_EUEN_LSXEN;
+
+ if (cpu_has_lasx)
+ euen_mask |= CSR_EUEN_LASXEN;
+
+ return 0;
+}
+arch_initcall(init_euen_mask);
diff --git a/arch/loongarch/kernel/kgdb.c b/arch/loongarch/kernel/kgdb.c
new file mode 100644
index 000000000000..445c452d72a7
--- /dev/null
+++ b/arch/loongarch/kernel/kgdb.c
@@ -0,0 +1,727 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * LoongArch KGDB support
+ *
+ * Copyright (C) 2023 Loongson Technology Corporation Limited
+ */
+
+#include <linux/hw_breakpoint.h>
+#include <linux/kdebug.h>
+#include <linux/kgdb.h>
+#include <linux/processor.h>
+#include <linux/ptrace.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+
+#include <asm/cacheflush.h>
+#include <asm/fpu.h>
+#include <asm/hw_breakpoint.h>
+#include <asm/inst.h>
+#include <asm/irq_regs.h>
+#include <asm/ptrace.h>
+#include <asm/sigcontext.h>
+
+int kgdb_watch_activated;
+static unsigned int stepped_opcode;
+static unsigned long stepped_address;
+
+struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = {
+ { "r0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[0]) },
+ { "r1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[1]) },
+ { "r2", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[2]) },
+ { "r3", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[3]) },
+ { "r4", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[4]) },
+ { "r5", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[5]) },
+ { "r6", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[6]) },
+ { "r7", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[7]) },
+ { "r8", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[8]) },
+ { "r9", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[9]) },
+ { "r10", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[10]) },
+ { "r11", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[11]) },
+ { "r12", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[12]) },
+ { "r13", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[13]) },
+ { "r14", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[14]) },
+ { "r15", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[15]) },
+ { "r16", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[16]) },
+ { "r17", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[17]) },
+ { "r18", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[18]) },
+ { "r19", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[19]) },
+ { "r20", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[20]) },
+ { "r21", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[21]) },
+ { "r22", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[22]) },
+ { "r23", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[23]) },
+ { "r24", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[24]) },
+ { "r25", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[25]) },
+ { "r26", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[26]) },
+ { "r27", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[27]) },
+ { "r28", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[28]) },
+ { "r29", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[29]) },
+ { "r30", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[30]) },
+ { "r31", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[31]) },
+ { "orig_a0", GDB_SIZEOF_REG, offsetof(struct pt_regs, orig_a0) },
+ { "pc", GDB_SIZEOF_REG, offsetof(struct pt_regs, csr_era) },
+ { "badv", GDB_SIZEOF_REG, offsetof(struct pt_regs, csr_badvaddr) },
+ { "f0", GDB_SIZEOF_REG, 0 },
+ { "f1", GDB_SIZEOF_REG, 1 },
+ { "f2", GDB_SIZEOF_REG, 2 },
+ { "f3", GDB_SIZEOF_REG, 3 },
+ { "f4", GDB_SIZEOF_REG, 4 },
+ { "f5", GDB_SIZEOF_REG, 5 },
+ { "f6", GDB_SIZEOF_REG, 6 },
+ { "f7", GDB_SIZEOF_REG, 7 },
+ { "f8", GDB_SIZEOF_REG, 8 },
+ { "f9", GDB_SIZEOF_REG, 9 },
+ { "f10", GDB_SIZEOF_REG, 10 },
+ { "f11", GDB_SIZEOF_REG, 11 },
+ { "f12", GDB_SIZEOF_REG, 12 },
+ { "f13", GDB_SIZEOF_REG, 13 },
+ { "f14", GDB_SIZEOF_REG, 14 },
+ { "f15", GDB_SIZEOF_REG, 15 },
+ { "f16", GDB_SIZEOF_REG, 16 },
+ { "f17", GDB_SIZEOF_REG, 17 },
+ { "f18", GDB_SIZEOF_REG, 18 },
+ { "f19", GDB_SIZEOF_REG, 19 },
+ { "f20", GDB_SIZEOF_REG, 20 },
+ { "f21", GDB_SIZEOF_REG, 21 },
+ { "f22", GDB_SIZEOF_REG, 22 },
+ { "f23", GDB_SIZEOF_REG, 23 },
+ { "f24", GDB_SIZEOF_REG, 24 },
+ { "f25", GDB_SIZEOF_REG, 25 },
+ { "f26", GDB_SIZEOF_REG, 26 },
+ { "f27", GDB_SIZEOF_REG, 27 },
+ { "f28", GDB_SIZEOF_REG, 28 },
+ { "f29", GDB_SIZEOF_REG, 29 },
+ { "f30", GDB_SIZEOF_REG, 30 },
+ { "f31", GDB_SIZEOF_REG, 31 },
+ { "fcc0", 1, 0 },
+ { "fcc1", 1, 1 },
+ { "fcc2", 1, 2 },
+ { "fcc3", 1, 3 },
+ { "fcc4", 1, 4 },
+ { "fcc5", 1, 5 },
+ { "fcc6", 1, 6 },
+ { "fcc7", 1, 7 },
+ { "fcsr", 4, 0 },
+};
+
+char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
+{
+ int reg_offset, reg_size;
+
+ if (regno < 0 || regno >= DBG_MAX_REG_NUM)
+ return NULL;
+
+ reg_offset = dbg_reg_def[regno].offset;
+ reg_size = dbg_reg_def[regno].size;
+
+ if (reg_offset == -1)
+ goto out;
+
+ /* Handle general-purpose/orig_a0/pc/badv registers */
+ if (regno <= DBG_PT_REGS_END) {
+ memcpy(mem, (void *)regs + reg_offset, reg_size);
+ goto out;
+ }
+
+ if (!(regs->csr_euen & CSR_EUEN_FPEN))
+ goto out;
+
+ save_fp(current);
+
+ /* Handle FP registers */
+ switch (regno) {
+ case DBG_FCSR: /* Process the fcsr */
+ memcpy(mem, (void *)&current->thread.fpu.fcsr, reg_size);
+ break;
+ case DBG_FCC_BASE ... DBG_FCC_END: /* Process the fcc */
+ memcpy(mem, (void *)&current->thread.fpu.fcc + reg_offset, reg_size);
+ break;
+ case DBG_FPR_BASE ... DBG_FPR_END: /* Process the fpr */
+ memcpy(mem, (void *)&current->thread.fpu.fpr[reg_offset], reg_size);
+ break;
+ default:
+ break;
+ }
+
+out:
+ return dbg_reg_def[regno].name;
+}
+
+int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
+{
+ int reg_offset, reg_size;
+
+ if (regno < 0 || regno >= DBG_MAX_REG_NUM)
+ return -EINVAL;
+
+ reg_offset = dbg_reg_def[regno].offset;
+ reg_size = dbg_reg_def[regno].size;
+
+ if (reg_offset == -1)
+ return 0;
+
+ /* Handle general-purpose/orig_a0/pc/badv registers */
+ if (regno <= DBG_PT_REGS_END) {
+ memcpy((void *)regs + reg_offset, mem, reg_size);
+ return 0;
+ }
+
+ if (!(regs->csr_euen & CSR_EUEN_FPEN))
+ return 0;
+
+ /* Handle FP registers */
+ switch (regno) {
+ case DBG_FCSR: /* Process the fcsr */
+ memcpy((void *)&current->thread.fpu.fcsr, mem, reg_size);
+ break;
+ case DBG_FCC_BASE ... DBG_FCC_END: /* Process the fcc */
+ memcpy((void *)&current->thread.fpu.fcc + reg_offset, mem, reg_size);
+ break;
+ case DBG_FPR_BASE ... DBG_FPR_END: /* Process the fpr */
+ memcpy((void *)&current->thread.fpu.fpr[reg_offset], mem, reg_size);
+ break;
+ default:
+ break;
+ }
+
+ restore_fp(current);
+
+ return 0;
+}
+
+/*
+ * Similar to regs_to_gdb_regs() except that process is sleeping and so
+ * we may not be able to get all the info.
+ */
+void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
+{
+ /* Initialize to zero */
+ memset((char *)gdb_regs, 0, NUMREGBYTES);
+
+ gdb_regs[DBG_LOONGARCH_RA] = p->thread.reg01;
+ gdb_regs[DBG_LOONGARCH_TP] = (long)p;
+ gdb_regs[DBG_LOONGARCH_SP] = p->thread.reg03;
+
+ /* S0 - S8 */
+ gdb_regs[DBG_LOONGARCH_S0] = p->thread.reg23;
+ gdb_regs[DBG_LOONGARCH_S1] = p->thread.reg24;
+ gdb_regs[DBG_LOONGARCH_S2] = p->thread.reg25;
+ gdb_regs[DBG_LOONGARCH_S3] = p->thread.reg26;
+ gdb_regs[DBG_LOONGARCH_S4] = p->thread.reg27;
+ gdb_regs[DBG_LOONGARCH_S5] = p->thread.reg28;
+ gdb_regs[DBG_LOONGARCH_S6] = p->thread.reg29;
+ gdb_regs[DBG_LOONGARCH_S7] = p->thread.reg30;
+ gdb_regs[DBG_LOONGARCH_S8] = p->thread.reg31;
+
+ /*
+ * PC use return address (RA), i.e. the moment after return from __switch_to()
+ */
+ gdb_regs[DBG_LOONGARCH_PC] = p->thread.reg01;
+}
+
+void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
+{
+ regs->csr_era = pc;
+}
+
+void arch_kgdb_breakpoint(void)
+{
+ __asm__ __volatile__ ( \
+ ".globl kgdb_breakinst\n\t" \
+ "nop\n" \
+ "kgdb_breakinst:\tbreak 2\n\t"); /* BRK_KDB = 2 */
+}
+
+/*
+ * Calls linux_debug_hook before the kernel dies. If KGDB is enabled,
+ * then try to fall into the debugger
+ */
+static int kgdb_loongarch_notify(struct notifier_block *self, unsigned long cmd, void *ptr)
+{
+ struct die_args *args = (struct die_args *)ptr;
+ struct pt_regs *regs = args->regs;
+
+ /* Userspace events, ignore. */
+ if (user_mode(regs))
+ return NOTIFY_DONE;
+
+ if (!kgdb_io_module_registered)
+ return NOTIFY_DONE;
+
+ if (atomic_read(&kgdb_active) != -1)
+ kgdb_nmicallback(smp_processor_id(), regs);
+
+ if (kgdb_handle_exception(args->trapnr, args->signr, cmd, regs))
+ return NOTIFY_DONE;
+
+ if (atomic_read(&kgdb_setting_breakpoint))
+ if (regs->csr_era == (unsigned long)&kgdb_breakinst)
+ regs->csr_era += LOONGARCH_INSN_SIZE;
+
+ return NOTIFY_STOP;
+}
+
+bool kgdb_breakpoint_handler(struct pt_regs *regs)
+{
+ struct die_args args = {
+ .regs = regs,
+ .str = "Break",
+ .err = BRK_KDB,
+ .trapnr = read_csr_excode(),
+ .signr = SIGTRAP,
+
+ };
+
+ return (kgdb_loongarch_notify(NULL, DIE_TRAP, &args) == NOTIFY_STOP) ? true : false;
+}
+
+static struct notifier_block kgdb_notifier = {
+ .notifier_call = kgdb_loongarch_notify,
+};
+
+static inline void kgdb_arch_update_addr(struct pt_regs *regs,
+ char *remcom_in_buffer)
+{
+ unsigned long addr;
+ char *ptr;
+
+ ptr = &remcom_in_buffer[1];
+ if (kgdb_hex2long(&ptr, &addr))
+ regs->csr_era = addr;
+}
+
+/* Calculate the new address for after a step */
+static int get_step_address(struct pt_regs *regs, unsigned long *next_addr)
+{
+ char cj_val;
+ unsigned int si, si_l, si_h, rd, rj, cj;
+ unsigned long pc = instruction_pointer(regs);
+ union loongarch_instruction *ip = (union loongarch_instruction *)pc;
+
+ if (pc & 3) {
+ pr_warn("%s: invalid pc 0x%lx\n", __func__, pc);
+ return -EINVAL;
+ }
+
+ *next_addr = pc + LOONGARCH_INSN_SIZE;
+
+ si_h = ip->reg0i26_format.immediate_h;
+ si_l = ip->reg0i26_format.immediate_l;
+ switch (ip->reg0i26_format.opcode) {
+ case b_op:
+ *next_addr = pc + sign_extend64((si_h << 16 | si_l) << 2, 27);
+ return 0;
+ case bl_op:
+ *next_addr = pc + sign_extend64((si_h << 16 | si_l) << 2, 27);
+ regs->regs[1] = pc + LOONGARCH_INSN_SIZE;
+ return 0;
+ }
+
+ rj = ip->reg1i21_format.rj;
+ cj = (rj & 0x07) + DBG_FCC_BASE;
+ si_l = ip->reg1i21_format.immediate_l;
+ si_h = ip->reg1i21_format.immediate_h;
+ dbg_get_reg(cj, &cj_val, regs);
+ switch (ip->reg1i21_format.opcode) {
+ case beqz_op:
+ if (regs->regs[rj] == 0)
+ *next_addr = pc + sign_extend64((si_h << 16 | si_l) << 2, 22);
+ return 0;
+ case bnez_op:
+ if (regs->regs[rj] != 0)
+ *next_addr = pc + sign_extend64((si_h << 16 | si_l) << 2, 22);
+ return 0;
+ case bceqz_op: /* bceqz_op = bcnez_op */
+ if (((rj & 0x18) == 0x00) && !cj_val) /* bceqz */
+ *next_addr = pc + sign_extend64((si_h << 16 | si_l) << 2, 22);
+ if (((rj & 0x18) == 0x08) && cj_val) /* bcnez */
+ *next_addr = pc + sign_extend64((si_h << 16 | si_l) << 2, 22);
+ return 0;
+ }
+
+ rj = ip->reg2i16_format.rj;
+ rd = ip->reg2i16_format.rd;
+ si = ip->reg2i16_format.immediate;
+ switch (ip->reg2i16_format.opcode) {
+ case beq_op:
+ if (regs->regs[rj] == regs->regs[rd])
+ *next_addr = pc + sign_extend64(si << 2, 17);
+ return 0;
+ case bne_op:
+ if (regs->regs[rj] != regs->regs[rd])
+ *next_addr = pc + sign_extend64(si << 2, 17);
+ return 0;
+ case blt_op:
+ if ((long)regs->regs[rj] < (long)regs->regs[rd])
+ *next_addr = pc + sign_extend64(si << 2, 17);
+ return 0;
+ case bge_op:
+ if ((long)regs->regs[rj] >= (long)regs->regs[rd])
+ *next_addr = pc + sign_extend64(si << 2, 17);
+ return 0;
+ case bltu_op:
+ if (regs->regs[rj] < regs->regs[rd])
+ *next_addr = pc + sign_extend64(si << 2, 17);
+ return 0;
+ case bgeu_op:
+ if (regs->regs[rj] >= regs->regs[rd])
+ *next_addr = pc + sign_extend64(si << 2, 17);
+ return 0;
+ case jirl_op:
+ regs->regs[rd] = pc + LOONGARCH_INSN_SIZE;
+ *next_addr = regs->regs[rj] + sign_extend64(si << 2, 17);
+ return 0;
+ }
+
+ return 0;
+}
+
+static int do_single_step(struct pt_regs *regs)
+{
+ int error = 0;
+ unsigned long addr = 0; /* Determine where the target instruction will send us to */
+
+ error = get_step_address(regs, &addr);
+ if (error)
+ return error;
+
+ /* Store the opcode in the stepped address */
+ error = get_kernel_nofault(stepped_opcode, (void *)addr);
+ if (error)
+ return error;
+
+ stepped_address = addr;
+
+ /* Replace the opcode with the break instruction */
+ error = copy_to_kernel_nofault((void *)stepped_address,
+ arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
+ flush_icache_range(addr, addr + BREAK_INSTR_SIZE);
+
+ if (error) {
+ stepped_opcode = 0;
+ stepped_address = 0;
+ } else {
+ kgdb_single_step = 1;
+ atomic_set(&kgdb_cpu_doing_single_step, raw_smp_processor_id());
+ }
+
+ return error;
+}
+
+/* Undo a single step */
+static void undo_single_step(struct pt_regs *regs)
+{
+ if (stepped_opcode) {
+ copy_to_kernel_nofault((void *)stepped_address,
+ (void *)&stepped_opcode, BREAK_INSTR_SIZE);
+ flush_icache_range(stepped_address, stepped_address + BREAK_INSTR_SIZE);
+ }
+
+ stepped_opcode = 0;
+ stepped_address = 0;
+ kgdb_single_step = 0;
+ atomic_set(&kgdb_cpu_doing_single_step, -1);
+}
+
+int kgdb_arch_handle_exception(int vector, int signo, int err_code,
+ char *remcom_in_buffer, char *remcom_out_buffer,
+ struct pt_regs *regs)
+{
+ int ret = 0;
+
+ undo_single_step(regs);
+ regs->csr_prmd |= CSR_PRMD_PWE;
+
+ switch (remcom_in_buffer[0]) {
+ case 'D':
+ case 'k':
+ regs->csr_prmd &= ~CSR_PRMD_PWE;
+ fallthrough;
+ case 'c':
+ kgdb_arch_update_addr(regs, remcom_in_buffer);
+ break;
+ case 's':
+ kgdb_arch_update_addr(regs, remcom_in_buffer);
+ ret = do_single_step(regs);
+ break;
+ default:
+ ret = -1;
+ }
+
+ return ret;
+}
+
+static struct hw_breakpoint {
+ unsigned int enabled;
+ unsigned long addr;
+ int len;
+ int type;
+ struct perf_event * __percpu *pev;
+} breakinfo[LOONGARCH_MAX_BRP];
+
+static int hw_break_reserve_slot(int breakno)
+{
+ int cpu, cnt = 0;
+ struct perf_event **pevent;
+
+ for_each_online_cpu(cpu) {
+ cnt++;
+ pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu);
+ if (dbg_reserve_bp_slot(*pevent))
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ for_each_online_cpu(cpu) {
+ cnt--;
+ if (!cnt)
+ break;
+ pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu);
+ dbg_release_bp_slot(*pevent);
+ }
+
+ return -1;
+}
+
+static int hw_break_release_slot(int breakno)
+{
+ int cpu;
+ struct perf_event **pevent;
+
+ if (dbg_is_early)
+ return 0;
+
+ for_each_online_cpu(cpu) {
+ pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu);
+ if (dbg_release_bp_slot(*pevent))
+ /*
+ * The debugger is responsible for handing the retry on
+ * remove failure.
+ */
+ return -1;
+ }
+
+ return 0;
+}
+
+static int kgdb_set_hw_break(unsigned long addr, int len, enum kgdb_bptype bptype)
+{
+ int i;
+
+ for (i = 0; i < LOONGARCH_MAX_BRP; i++)
+ if (!breakinfo[i].enabled)
+ break;
+
+ if (i == LOONGARCH_MAX_BRP)
+ return -1;
+
+ switch (bptype) {
+ case BP_HARDWARE_BREAKPOINT:
+ breakinfo[i].type = HW_BREAKPOINT_X;
+ break;
+ case BP_READ_WATCHPOINT:
+ breakinfo[i].type = HW_BREAKPOINT_R;
+ break;
+ case BP_WRITE_WATCHPOINT:
+ breakinfo[i].type = HW_BREAKPOINT_W;
+ break;
+ case BP_ACCESS_WATCHPOINT:
+ breakinfo[i].type = HW_BREAKPOINT_RW;
+ break;
+ default:
+ return -1;
+ }
+
+ switch (len) {
+ case 1:
+ breakinfo[i].len = HW_BREAKPOINT_LEN_1;
+ break;
+ case 2:
+ breakinfo[i].len = HW_BREAKPOINT_LEN_2;
+ break;
+ case 4:
+ breakinfo[i].len = HW_BREAKPOINT_LEN_4;
+ break;
+ case 8:
+ breakinfo[i].len = HW_BREAKPOINT_LEN_8;
+ break;
+ default:
+ return -1;
+ }
+
+ breakinfo[i].addr = addr;
+ if (hw_break_reserve_slot(i)) {
+ breakinfo[i].addr = 0;
+ return -1;
+ }
+ breakinfo[i].enabled = 1;
+
+ return 0;
+}
+
+static int kgdb_remove_hw_break(unsigned long addr, int len, enum kgdb_bptype bptype)
+{
+ int i;
+
+ for (i = 0; i < LOONGARCH_MAX_BRP; i++)
+ if (breakinfo[i].addr == addr && breakinfo[i].enabled)
+ break;
+
+ if (i == LOONGARCH_MAX_BRP)
+ return -1;
+
+ if (hw_break_release_slot(i)) {
+ pr_err("Cannot remove hw breakpoint at %lx\n", addr);
+ return -1;
+ }
+ breakinfo[i].enabled = 0;
+
+ return 0;
+}
+
+static void kgdb_disable_hw_break(struct pt_regs *regs)
+{
+ int i;
+ int cpu = raw_smp_processor_id();
+ struct perf_event *bp;
+
+ for (i = 0; i < LOONGARCH_MAX_BRP; i++) {
+ if (!breakinfo[i].enabled)
+ continue;
+
+ bp = *per_cpu_ptr(breakinfo[i].pev, cpu);
+ if (bp->attr.disabled == 1)
+ continue;
+
+ arch_uninstall_hw_breakpoint(bp);
+ bp->attr.disabled = 1;
+ }
+
+ /* Disable hardware debugging while we are in kgdb */
+ csr_xchg32(0, CSR_CRMD_WE, LOONGARCH_CSR_CRMD);
+}
+
+static void kgdb_remove_all_hw_break(void)
+{
+ int i;
+ int cpu = raw_smp_processor_id();
+ struct perf_event *bp;
+
+ for (i = 0; i < LOONGARCH_MAX_BRP; i++) {
+ if (!breakinfo[i].enabled)
+ continue;
+
+ bp = *per_cpu_ptr(breakinfo[i].pev, cpu);
+ if (!bp->attr.disabled) {
+ arch_uninstall_hw_breakpoint(bp);
+ bp->attr.disabled = 1;
+ continue;
+ }
+
+ if (hw_break_release_slot(i))
+ pr_err("KGDB: hw bpt remove failed %lx\n", breakinfo[i].addr);
+ breakinfo[i].enabled = 0;
+ }
+
+ csr_xchg32(0, CSR_CRMD_WE, LOONGARCH_CSR_CRMD);
+ kgdb_watch_activated = 0;
+}
+
+static void kgdb_correct_hw_break(void)
+{
+ int i, activated = 0;
+
+ for (i = 0; i < LOONGARCH_MAX_BRP; i++) {
+ struct perf_event *bp;
+ int val;
+ int cpu = raw_smp_processor_id();
+
+ if (!breakinfo[i].enabled)
+ continue;
+
+ bp = *per_cpu_ptr(breakinfo[i].pev, cpu);
+ if (bp->attr.disabled != 1)
+ continue;
+
+ bp->attr.bp_addr = breakinfo[i].addr;
+ bp->attr.bp_len = breakinfo[i].len;
+ bp->attr.bp_type = breakinfo[i].type;
+
+ val = hw_breakpoint_arch_parse(bp, &bp->attr, counter_arch_bp(bp));
+ if (val)
+ return;
+
+ val = arch_install_hw_breakpoint(bp);
+ if (!val)
+ bp->attr.disabled = 0;
+ activated = 1;
+ }
+
+ csr_xchg32(activated ? CSR_CRMD_WE : 0, CSR_CRMD_WE, LOONGARCH_CSR_CRMD);
+ kgdb_watch_activated = activated;
+}
+
+const struct kgdb_arch arch_kgdb_ops = {
+ .gdb_bpt_instr = {0x02, 0x00, break_op >> 1, 0x00}, /* BRK_KDB = 2 */
+ .flags = KGDB_HW_BREAKPOINT,
+ .set_hw_breakpoint = kgdb_set_hw_break,
+ .remove_hw_breakpoint = kgdb_remove_hw_break,
+ .disable_hw_break = kgdb_disable_hw_break,
+ .remove_all_hw_break = kgdb_remove_all_hw_break,
+ .correct_hw_break = kgdb_correct_hw_break,
+};
+
+int kgdb_arch_init(void)
+{
+ return register_die_notifier(&kgdb_notifier);
+}
+
+void kgdb_arch_late(void)
+{
+ int i, cpu;
+ struct perf_event_attr attr;
+ struct perf_event **pevent;
+
+ hw_breakpoint_init(&attr);
+
+ attr.bp_addr = (unsigned long)kgdb_arch_init;
+ attr.bp_len = HW_BREAKPOINT_LEN_4;
+ attr.bp_type = HW_BREAKPOINT_W;
+ attr.disabled = 1;
+
+ for (i = 0; i < LOONGARCH_MAX_BRP; i++) {
+ if (breakinfo[i].pev)
+ continue;
+
+ breakinfo[i].pev = register_wide_hw_breakpoint(&attr, NULL, NULL);
+ if (IS_ERR((void * __force)breakinfo[i].pev)) {
+ pr_err("kgdb: Could not allocate hw breakpoints.\n");
+ breakinfo[i].pev = NULL;
+ return;
+ }
+
+ for_each_online_cpu(cpu) {
+ pevent = per_cpu_ptr(breakinfo[i].pev, cpu);
+ if (pevent[0]->destroy) {
+ pevent[0]->destroy = NULL;
+ release_bp_slot(*pevent);
+ }
+ }
+ }
+}
+
+void kgdb_arch_exit(void)
+{
+ int i;
+
+ for (i = 0; i < LOONGARCH_MAX_BRP; i++) {
+ if (breakinfo[i].pev) {
+ unregister_wide_hw_breakpoint(breakinfo[i].pev);
+ breakinfo[i].pev = NULL;
+ }
+ }
+
+ unregister_die_notifier(&kgdb_notifier);
+}
diff --git a/arch/loongarch/kernel/lbt.S b/arch/loongarch/kernel/lbt.S
new file mode 100644
index 000000000000..9c75120a26d8
--- /dev/null
+++ b/arch/loongarch/kernel/lbt.S
@@ -0,0 +1,155 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Author: Qi Hu <huqi@loongson.cn>
+ * Huacai Chen <chenhuacai@loongson.cn>
+ *
+ * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
+ */
+#include <asm/asm.h>
+#include <asm/asmmacro.h>
+#include <asm/asm-extable.h>
+#include <asm/asm-offsets.h>
+#include <asm/errno.h>
+#include <asm/regdef.h>
+
+#define SCR_REG_WIDTH 8
+
+ .macro EX insn, reg, src, offs
+.ex\@: \insn \reg, \src, \offs
+ _asm_extable .ex\@, .L_lbt_fault
+ .endm
+
+/*
+ * Save a thread's lbt context.
+ */
+SYM_FUNC_START(_save_lbt)
+ movscr2gr t1, $scr0 # save scr
+ stptr.d t1, a0, THREAD_SCR0
+ movscr2gr t1, $scr1
+ stptr.d t1, a0, THREAD_SCR1
+ movscr2gr t1, $scr2
+ stptr.d t1, a0, THREAD_SCR2
+ movscr2gr t1, $scr3
+ stptr.d t1, a0, THREAD_SCR3
+
+ x86mfflag t1, 0x3f # save eflags
+ stptr.d t1, a0, THREAD_EFLAGS
+ jr ra
+SYM_FUNC_END(_save_lbt)
+EXPORT_SYMBOL(_save_lbt)
+
+/*
+ * Restore a thread's lbt context.
+ */
+SYM_FUNC_START(_restore_lbt)
+ ldptr.d t1, a0, THREAD_SCR0 # restore scr
+ movgr2scr $scr0, t1
+ ldptr.d t1, a0, THREAD_SCR1
+ movgr2scr $scr1, t1
+ ldptr.d t1, a0, THREAD_SCR2
+ movgr2scr $scr2, t1
+ ldptr.d t1, a0, THREAD_SCR3
+ movgr2scr $scr3, t1
+
+ ldptr.d t1, a0, THREAD_EFLAGS # restore eflags
+ x86mtflag t1, 0x3f
+ jr ra
+SYM_FUNC_END(_restore_lbt)
+EXPORT_SYMBOL(_restore_lbt)
+
+/*
+ * Load scr/eflag with zero.
+ */
+SYM_FUNC_START(_init_lbt)
+ movgr2scr $scr0, zero
+ movgr2scr $scr1, zero
+ movgr2scr $scr2, zero
+ movgr2scr $scr3, zero
+
+ x86mtflag zero, 0x3f
+ jr ra
+SYM_FUNC_END(_init_lbt)
+
+/*
+ * a0: scr
+ * a1: eflag
+ */
+SYM_FUNC_START(_save_lbt_context)
+ movscr2gr t1, $scr0 # save scr
+ EX st.d t1, a0, (0 * SCR_REG_WIDTH)
+ movscr2gr t1, $scr1
+ EX st.d t1, a0, (1 * SCR_REG_WIDTH)
+ movscr2gr t1, $scr2
+ EX st.d t1, a0, (2 * SCR_REG_WIDTH)
+ movscr2gr t1, $scr3
+ EX st.d t1, a0, (3 * SCR_REG_WIDTH)
+
+ x86mfflag t1, 0x3f # save eflags
+ EX st.w t1, a1, 0
+ li.w a0, 0 # success
+ jr ra
+SYM_FUNC_END(_save_lbt_context)
+
+/*
+ * a0: scr
+ * a1: eflag
+ */
+SYM_FUNC_START(_restore_lbt_context)
+ EX ld.d t1, a0, (0 * SCR_REG_WIDTH) # restore scr
+ movgr2scr $scr0, t1
+ EX ld.d t1, a0, (1 * SCR_REG_WIDTH)
+ movgr2scr $scr1, t1
+ EX ld.d t1, a0, (2 * SCR_REG_WIDTH)
+ movgr2scr $scr2, t1
+ EX ld.d t1, a0, (3 * SCR_REG_WIDTH)
+ movgr2scr $scr3, t1
+
+ EX ld.w t1, a1, 0 # restore eflags
+ x86mtflag t1, 0x3f
+ li.w a0, 0 # success
+ jr ra
+SYM_FUNC_END(_restore_lbt_context)
+
+/*
+ * a0: ftop
+ */
+SYM_FUNC_START(_save_ftop_context)
+ x86mftop t1
+ st.w t1, a0, 0
+ li.w a0, 0 # success
+ jr ra
+SYM_FUNC_END(_save_ftop_context)
+
+/*
+ * a0: ftop
+ */
+SYM_FUNC_START(_restore_ftop_context)
+ ld.w t1, a0, 0
+ andi t1, t1, 0x7
+ la.pcrel a0, 1f
+ alsl.d a0, t1, a0, 3
+ jr a0
+1:
+ x86mttop 0
+ b 2f
+ x86mttop 1
+ b 2f
+ x86mttop 2
+ b 2f
+ x86mttop 3
+ b 2f
+ x86mttop 4
+ b 2f
+ x86mttop 5
+ b 2f
+ x86mttop 6
+ b 2f
+ x86mttop 7
+2:
+ li.w a0, 0 # success
+ jr ra
+SYM_FUNC_END(_restore_ftop_context)
+
+.L_lbt_fault:
+ li.w a0, -EFAULT # failure
+ jr ra
diff --git a/arch/loongarch/kernel/numa.c b/arch/loongarch/kernel/numa.c
index 708665895b47..c7d33c489e04 100644
--- a/arch/loongarch/kernel/numa.c
+++ b/arch/loongarch/kernel/numa.c
@@ -67,39 +67,7 @@ static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
void __init pcpu_populate_pte(unsigned long addr)
{
- pgd_t *pgd = pgd_offset_k(addr);
- p4d_t *p4d = p4d_offset(pgd, addr);
- pud_t *pud;
- pmd_t *pmd;
-
- if (p4d_none(*p4d)) {
- pud_t *new;
-
- new = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
- pgd_populate(&init_mm, pgd, new);
-#ifndef __PAGETABLE_PUD_FOLDED
- pud_init(new);
-#endif
- }
-
- pud = pud_offset(p4d, addr);
- if (pud_none(*pud)) {
- pmd_t *new;
-
- new = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
- pud_populate(&init_mm, pud, new);
-#ifndef __PAGETABLE_PMD_FOLDED
- pmd_init(new);
-#endif
- }
-
- pmd = pmd_offset(pud, addr);
- if (!pmd_present(*pmd)) {
- pte_t *new;
-
- new = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
- pmd_populate_kernel(&init_mm, pmd, new);
- }
+ populate_kernel_pte(addr);
}
void __init setup_per_cpu_areas(void)
@@ -470,7 +438,6 @@ void __init mem_init(void)
{
high_memory = (void *) __va(get_num_physpages() << PAGE_SHIFT);
memblock_free_all();
- setup_zero_pages(); /* This comes from node 0 */
}
int pcibus_to_node(struct pci_bus *bus)
diff --git a/arch/loongarch/kernel/process.c b/arch/loongarch/kernel/process.c
index ba457e43f5be..3cb082e0c992 100644
--- a/arch/loongarch/kernel/process.c
+++ b/arch/loongarch/kernel/process.c
@@ -38,6 +38,7 @@
#include <asm/cpu.h>
#include <asm/elf.h>
#include <asm/fpu.h>
+#include <asm/lbt.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/irq_regs.h>
@@ -82,9 +83,11 @@ void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp)
euen = regs->csr_euen & ~(CSR_EUEN_FPEN);
regs->csr_euen = euen;
lose_fpu(0);
+ lose_lbt(0);
clear_thread_flag(TIF_LSX_CTX_LIVE);
clear_thread_flag(TIF_LASX_CTX_LIVE);
+ clear_thread_flag(TIF_LBT_CTX_LIVE);
clear_used_math();
regs->csr_era = pc;
regs->regs[3] = sp;
@@ -121,10 +124,14 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
preempt_enable();
- if (used_math())
- memcpy(dst, src, sizeof(struct task_struct));
- else
+ if (!used_math())
memcpy(dst, src, offsetof(struct task_struct, thread.fpu.fpr));
+ else
+ memcpy(dst, src, offsetof(struct task_struct, thread.lbt.scr0));
+
+#ifdef CONFIG_CPU_HAS_LBT
+ memcpy(&dst->thread.lbt, &src->thread.lbt, sizeof(struct loongarch_lbt));
+#endif
return 0;
}
@@ -189,8 +196,10 @@ out:
ptrace_hw_copy_thread(p);
clear_tsk_thread_flag(p, TIF_USEDFPU);
clear_tsk_thread_flag(p, TIF_USEDSIMD);
+ clear_tsk_thread_flag(p, TIF_USEDLBT);
clear_tsk_thread_flag(p, TIF_LSX_CTX_LIVE);
clear_tsk_thread_flag(p, TIF_LASX_CTX_LIVE);
+ clear_tsk_thread_flag(p, TIF_LBT_CTX_LIVE);
return 0;
}
diff --git a/arch/loongarch/kernel/ptrace.c b/arch/loongarch/kernel/ptrace.c
index f72adbf530c6..c114c5ef1332 100644
--- a/arch/loongarch/kernel/ptrace.c
+++ b/arch/loongarch/kernel/ptrace.c
@@ -38,6 +38,7 @@
#include <asm/cpu.h>
#include <asm/cpu-info.h>
#include <asm/fpu.h>
+#include <asm/lbt.h>
#include <asm/loongarch.h>
#include <asm/page.h>
#include <asm/pgtable.h>
@@ -338,6 +339,46 @@ static int simd_set(struct task_struct *target,
#endif /* CONFIG_CPU_HAS_LSX */
+#ifdef CONFIG_CPU_HAS_LBT
+static int lbt_get(struct task_struct *target,
+ const struct user_regset *regset,
+ struct membuf to)
+{
+ int r;
+
+ r = membuf_write(&to, &target->thread.lbt.scr0, sizeof(target->thread.lbt.scr0));
+ r = membuf_write(&to, &target->thread.lbt.scr1, sizeof(target->thread.lbt.scr1));
+ r = membuf_write(&to, &target->thread.lbt.scr2, sizeof(target->thread.lbt.scr2));
+ r = membuf_write(&to, &target->thread.lbt.scr3, sizeof(target->thread.lbt.scr3));
+ r = membuf_write(&to, &target->thread.lbt.eflags, sizeof(u32));
+ r = membuf_write(&to, &target->thread.fpu.ftop, sizeof(u32));
+
+ return r;
+}
+
+static int lbt_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ int err = 0;
+ const int eflags_start = 4 * sizeof(target->thread.lbt.scr0);
+ const int ftop_start = eflags_start + sizeof(u32);
+
+ err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.lbt.scr0,
+ 0, 4 * sizeof(target->thread.lbt.scr0));
+ err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.lbt.eflags,
+ eflags_start, ftop_start);
+ err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.fpu.ftop,
+ ftop_start, ftop_start + sizeof(u32));
+
+ return err;
+}
+#endif /* CONFIG_CPU_HAS_LBT */
+
#ifdef CONFIG_HAVE_HW_BREAKPOINT
/*
@@ -802,6 +843,9 @@ enum loongarch_regset {
#ifdef CONFIG_CPU_HAS_LASX
REGSET_LASX,
#endif
+#ifdef CONFIG_CPU_HAS_LBT
+ REGSET_LBT,
+#endif
#ifdef CONFIG_HAVE_HW_BREAKPOINT
REGSET_HW_BREAK,
REGSET_HW_WATCH,
@@ -853,6 +897,16 @@ static const struct user_regset loongarch64_regsets[] = {
.set = simd_set,
},
#endif
+#ifdef CONFIG_CPU_HAS_LBT
+ [REGSET_LBT] = {
+ .core_note_type = NT_LOONGARCH_LBT,
+ .n = 5,
+ .size = sizeof(u64),
+ .align = sizeof(u64),
+ .regset_get = lbt_get,
+ .set = lbt_set,
+ },
+#endif
#ifdef CONFIG_HAVE_HW_BREAKPOINT
[REGSET_HW_BREAK] = {
.core_note_type = NT_LOONGARCH_HW_BREAK,
diff --git a/arch/loongarch/kernel/relocate.c b/arch/loongarch/kernel/relocate.c
index 01f94d1e3edf..6c3eff9af9fb 100644
--- a/arch/loongarch/kernel/relocate.c
+++ b/arch/loongarch/kernel/relocate.c
@@ -157,12 +157,11 @@ static inline void __init update_reloc_offset(unsigned long *addr, long random_o
*new_addr = (unsigned long)reloc_offset;
}
-void * __init relocate_kernel(void)
+unsigned long __init relocate_kernel(void)
{
unsigned long kernel_length;
unsigned long random_offset = 0;
void *location_new = _text; /* Default to original kernel start */
- void *kernel_entry = start_kernel; /* Default to original kernel entry point */
char *cmdline = early_ioremap(fw_arg1, COMMAND_LINE_SIZE); /* Boot command line is passed in fw_arg1 */
strscpy(boot_command_line, cmdline, COMMAND_LINE_SIZE);
@@ -190,9 +189,6 @@ void * __init relocate_kernel(void)
reloc_offset += random_offset;
- /* Return the new kernel's entry point */
- kernel_entry = RELOCATED_KASLR(start_kernel);
-
/* The current thread is now within the relocated kernel */
__current_thread_info = RELOCATED_KASLR(__current_thread_info);
@@ -204,7 +200,7 @@ void * __init relocate_kernel(void)
relocate_absolute(random_offset);
- return kernel_entry;
+ return random_offset;
}
/*
diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c
index 9d830ab4e302..7783f0a3d742 100644
--- a/arch/loongarch/kernel/setup.c
+++ b/arch/loongarch/kernel/setup.c
@@ -626,4 +626,8 @@ void __init setup_arch(char **cmdline_p)
#endif
paging_init();
+
+#ifdef CONFIG_KASAN
+ kasan_init();
+#endif
}
diff --git a/arch/loongarch/kernel/signal.c b/arch/loongarch/kernel/signal.c
index ceb899366c0a..504fdfe85203 100644
--- a/arch/loongarch/kernel/signal.c
+++ b/arch/loongarch/kernel/signal.c
@@ -32,6 +32,7 @@
#include <asm/cacheflush.h>
#include <asm/cpu-features.h>
#include <asm/fpu.h>
+#include <asm/lbt.h>
#include <asm/ucontext.h>
#include <asm/vdso.h>
@@ -44,6 +45,9 @@
/* Make sure we will not lose FPU ownership */
#define lock_fpu_owner() ({ preempt_disable(); pagefault_disable(); })
#define unlock_fpu_owner() ({ pagefault_enable(); preempt_enable(); })
+/* Make sure we will not lose LBT ownership */
+#define lock_lbt_owner() ({ preempt_disable(); pagefault_disable(); })
+#define unlock_lbt_owner() ({ pagefault_enable(); preempt_enable(); })
/* Assembly functions to move context to/from the FPU */
extern asmlinkage int
@@ -59,6 +63,13 @@ _save_lasx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
extern asmlinkage int
_restore_lasx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
+#ifdef CONFIG_CPU_HAS_LBT
+extern asmlinkage int _save_lbt_context(void __user *regs, void __user *eflags);
+extern asmlinkage int _restore_lbt_context(void __user *regs, void __user *eflags);
+extern asmlinkage int _save_ftop_context(void __user *ftop);
+extern asmlinkage int _restore_ftop_context(void __user *ftop);
+#endif
+
struct rt_sigframe {
struct siginfo rs_info;
struct ucontext rs_uctx;
@@ -75,6 +86,7 @@ struct extctx_layout {
struct _ctx_layout fpu;
struct _ctx_layout lsx;
struct _ctx_layout lasx;
+ struct _ctx_layout lbt;
struct _ctx_layout end;
};
@@ -215,6 +227,52 @@ static int copy_lasx_from_sigcontext(struct lasx_context __user *ctx)
return err;
}
+#ifdef CONFIG_CPU_HAS_LBT
+static int copy_lbt_to_sigcontext(struct lbt_context __user *ctx)
+{
+ int err = 0;
+ uint64_t __user *regs = (uint64_t *)&ctx->regs;
+ uint32_t __user *eflags = (uint32_t *)&ctx->eflags;
+
+ err |= __put_user(current->thread.lbt.scr0, &regs[0]);
+ err |= __put_user(current->thread.lbt.scr1, &regs[1]);
+ err |= __put_user(current->thread.lbt.scr2, &regs[2]);
+ err |= __put_user(current->thread.lbt.scr3, &regs[3]);
+ err |= __put_user(current->thread.lbt.eflags, eflags);
+
+ return err;
+}
+
+static int copy_lbt_from_sigcontext(struct lbt_context __user *ctx)
+{
+ int err = 0;
+ uint64_t __user *regs = (uint64_t *)&ctx->regs;
+ uint32_t __user *eflags = (uint32_t *)&ctx->eflags;
+
+ err |= __get_user(current->thread.lbt.scr0, &regs[0]);
+ err |= __get_user(current->thread.lbt.scr1, &regs[1]);
+ err |= __get_user(current->thread.lbt.scr2, &regs[2]);
+ err |= __get_user(current->thread.lbt.scr3, &regs[3]);
+ err |= __get_user(current->thread.lbt.eflags, eflags);
+
+ return err;
+}
+
+static int copy_ftop_to_sigcontext(struct lbt_context __user *ctx)
+{
+ uint32_t __user *ftop = &ctx->ftop;
+
+ return __put_user(current->thread.fpu.ftop, ftop);
+}
+
+static int copy_ftop_from_sigcontext(struct lbt_context __user *ctx)
+{
+ uint32_t __user *ftop = &ctx->ftop;
+
+ return __get_user(current->thread.fpu.ftop, ftop);
+}
+#endif
+
/*
* Wrappers for the assembly _{save,restore}_fp_context functions.
*/
@@ -272,6 +330,41 @@ static int restore_hw_lasx_context(struct lasx_context __user *ctx)
return _restore_lasx_context(regs, fcc, fcsr);
}
+/*
+ * Wrappers for the assembly _{save,restore}_lbt_context functions.
+ */
+#ifdef CONFIG_CPU_HAS_LBT
+static int save_hw_lbt_context(struct lbt_context __user *ctx)
+{
+ uint64_t __user *regs = (uint64_t *)&ctx->regs;
+ uint32_t __user *eflags = (uint32_t *)&ctx->eflags;
+
+ return _save_lbt_context(regs, eflags);
+}
+
+static int restore_hw_lbt_context(struct lbt_context __user *ctx)
+{
+ uint64_t __user *regs = (uint64_t *)&ctx->regs;
+ uint32_t __user *eflags = (uint32_t *)&ctx->eflags;
+
+ return _restore_lbt_context(regs, eflags);
+}
+
+static int save_hw_ftop_context(struct lbt_context __user *ctx)
+{
+ uint32_t __user *ftop = &ctx->ftop;
+
+ return _save_ftop_context(ftop);
+}
+
+static int restore_hw_ftop_context(struct lbt_context __user *ctx)
+{
+ uint32_t __user *ftop = &ctx->ftop;
+
+ return _restore_ftop_context(ftop);
+}
+#endif
+
static int fcsr_pending(unsigned int __user *fcsr)
{
int err, sig = 0;
@@ -519,6 +612,77 @@ static int protected_restore_lasx_context(struct extctx_layout *extctx)
return err ?: sig;
}
+#ifdef CONFIG_CPU_HAS_LBT
+static int protected_save_lbt_context(struct extctx_layout *extctx)
+{
+ int err = 0;
+ struct sctx_info __user *info = extctx->lbt.addr;
+ struct lbt_context __user *lbt_ctx =
+ (struct lbt_context *)get_ctx_through_ctxinfo(info);
+ uint64_t __user *regs = (uint64_t *)&lbt_ctx->regs;
+ uint32_t __user *eflags = (uint32_t *)&lbt_ctx->eflags;
+
+ while (1) {
+ lock_lbt_owner();
+ if (is_lbt_owner())
+ err |= save_hw_lbt_context(lbt_ctx);
+ else
+ err |= copy_lbt_to_sigcontext(lbt_ctx);
+ if (is_fpu_owner())
+ err |= save_hw_ftop_context(lbt_ctx);
+ else
+ err |= copy_ftop_to_sigcontext(lbt_ctx);
+ unlock_lbt_owner();
+
+ err |= __put_user(LBT_CTX_MAGIC, &info->magic);
+ err |= __put_user(extctx->lbt.size, &info->size);
+
+ if (likely(!err))
+ break;
+ /* Touch the LBT context and try again */
+ err = __put_user(0, &regs[0]) | __put_user(0, eflags);
+
+ if (err)
+ return err;
+ }
+
+ return err;
+}
+
+static int protected_restore_lbt_context(struct extctx_layout *extctx)
+{
+ int err = 0, tmp __maybe_unused;
+ struct sctx_info __user *info = extctx->lbt.addr;
+ struct lbt_context __user *lbt_ctx =
+ (struct lbt_context *)get_ctx_through_ctxinfo(info);
+ uint64_t __user *regs = (uint64_t *)&lbt_ctx->regs;
+ uint32_t __user *eflags = (uint32_t *)&lbt_ctx->eflags;
+
+ while (1) {
+ lock_lbt_owner();
+ if (is_lbt_owner())
+ err |= restore_hw_lbt_context(lbt_ctx);
+ else
+ err |= copy_lbt_from_sigcontext(lbt_ctx);
+ if (is_fpu_owner())
+ err |= restore_hw_ftop_context(lbt_ctx);
+ else
+ err |= copy_ftop_from_sigcontext(lbt_ctx);
+ unlock_lbt_owner();
+
+ if (likely(!err))
+ break;
+ /* Touch the LBT context and try again */
+ err = __get_user(tmp, &regs[0]) | __get_user(tmp, eflags);
+
+ if (err)
+ return err;
+ }
+
+ return err;
+}
+#endif
+
static int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
struct extctx_layout *extctx)
{
@@ -539,6 +703,11 @@ static int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
else if (extctx->fpu.addr)
err |= protected_save_fpu_context(extctx);
+#ifdef CONFIG_CPU_HAS_LBT
+ if (extctx->lbt.addr)
+ err |= protected_save_lbt_context(extctx);
+#endif
+
/* Set the "end" magic */
info = (struct sctx_info *)extctx->end.addr;
err |= __put_user(0, &info->magic);
@@ -584,6 +753,13 @@ static int parse_extcontext(struct sigcontext __user *sc, struct extctx_layout *
extctx->lasx.addr = info;
break;
+ case LBT_CTX_MAGIC:
+ if (size < (sizeof(struct sctx_info) +
+ sizeof(struct lbt_context)))
+ goto invalid;
+ extctx->lbt.addr = info;
+ break;
+
default:
goto invalid;
}
@@ -636,6 +812,11 @@ static int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc
else if (extctx.fpu.addr)
err |= protected_restore_fpu_context(&extctx);
+#ifdef CONFIG_CPU_HAS_LBT
+ if (extctx.lbt.addr)
+ err |= protected_restore_lbt_context(&extctx);
+#endif
+
bad:
return err;
}
@@ -700,6 +881,13 @@ static unsigned long setup_extcontext(struct extctx_layout *extctx, unsigned lon
sizeof(struct fpu_context), FPU_CTX_ALIGN, new_sp);
}
+#ifdef CONFIG_CPU_HAS_LBT
+ if (cpu_has_lbt && thread_lbt_context_live()) {
+ new_sp = extframe_alloc(extctx, &extctx->lbt,
+ sizeof(struct lbt_context), LBT_CTX_ALIGN, new_sp);
+ }
+#endif
+
return new_sp;
}
diff --git a/arch/loongarch/kernel/stacktrace.c b/arch/loongarch/kernel/stacktrace.c
index 2463d2fea21f..92270f14db94 100644
--- a/arch/loongarch/kernel/stacktrace.c
+++ b/arch/loongarch/kernel/stacktrace.c
@@ -18,17 +18,19 @@ void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
struct pt_regs dummyregs;
struct unwind_state state;
- regs = &dummyregs;
+ if (!regs) {
+ regs = &dummyregs;
- if (task == current) {
- regs->regs[3] = (unsigned long)__builtin_frame_address(0);
- regs->csr_era = (unsigned long)__builtin_return_address(0);
- } else {
- regs->regs[3] = thread_saved_fp(task);
- regs->csr_era = thread_saved_ra(task);
+ if (task == current) {
+ regs->regs[3] = (unsigned long)__builtin_frame_address(0);
+ regs->csr_era = (unsigned long)__builtin_return_address(0);
+ } else {
+ regs->regs[3] = thread_saved_fp(task);
+ regs->csr_era = thread_saved_ra(task);
+ }
+ regs->regs[1] = 0;
}
- regs->regs[1] = 0;
for (unwind_start(&state, task, regs);
!unwind_done(&state) && !unwind_error(&state); unwind_next_frame(&state)) {
addr = unwind_get_return_address(&state);
diff --git a/arch/loongarch/kernel/traps.c b/arch/loongarch/kernel/traps.c
index 89699db45cec..65214774ef7c 100644
--- a/arch/loongarch/kernel/traps.c
+++ b/arch/loongarch/kernel/traps.c
@@ -36,7 +36,9 @@
#include <asm/break.h>
#include <asm/cpu.h>
#include <asm/fpu.h>
+#include <asm/lbt.h>
#include <asm/inst.h>
+#include <asm/kgdb.h>
#include <asm/loongarch.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
@@ -702,6 +704,11 @@ asmlinkage void noinstr do_bp(struct pt_regs *regs)
* pertain to them.
*/
switch (bcode) {
+ case BRK_KDB:
+ if (kgdb_breakpoint_handler(regs))
+ goto out;
+ else
+ break;
case BRK_KPROBE_BP:
if (kprobe_breakpoint_handler(regs))
goto out;
@@ -768,6 +775,9 @@ asmlinkage void noinstr do_watch(struct pt_regs *regs)
#ifndef CONFIG_HAVE_HW_BREAKPOINT
pr_warn("Hardware watch point handler not implemented!\n");
#else
+ if (kgdb_breakpoint_handler(regs))
+ goto out;
+
if (test_tsk_thread_flag(current, TIF_SINGLESTEP)) {
int llbit = (csr_read32(LOONGARCH_CSR_LLBCTL) & 0x1);
unsigned long pc = instruction_pointer(regs);
@@ -966,13 +976,47 @@ out:
irqentry_exit(regs, state);
}
+static void init_restore_lbt(void)
+{
+ if (!thread_lbt_context_live()) {
+ /* First time LBT context user */
+ init_lbt();
+ set_thread_flag(TIF_LBT_CTX_LIVE);
+ } else {
+ if (!is_lbt_owner())
+ own_lbt_inatomic(1);
+ }
+
+ BUG_ON(!is_lbt_enabled());
+}
+
asmlinkage void noinstr do_lbt(struct pt_regs *regs)
{
irqentry_state_t state = irqentry_enter(regs);
- local_irq_enable();
- force_sig(SIGILL);
- local_irq_disable();
+ /*
+ * BTD (Binary Translation Disable exception) can be triggered
+ * during FP save/restore if TM (Top Mode) is on, which may
+ * cause irq_enable during 'switch_to'. To avoid this situation
+ * (including the user using 'MOVGR2GCSR' to turn on TM, which
+ * will not trigger the BTE), we need to check PRMD first.
+ */
+ if (regs->csr_prmd & CSR_PRMD_PIE)
+ local_irq_enable();
+
+ if (!cpu_has_lbt) {
+ force_sig(SIGILL);
+ goto out;
+ }
+ BUG_ON(is_lbt_enabled());
+
+ preempt_disable();
+ init_restore_lbt();
+ preempt_enable();
+
+out:
+ if (regs->csr_prmd & CSR_PRMD_PIE)
+ local_irq_disable();
irqentry_exit(regs, state);
}
diff --git a/arch/loongarch/lib/Makefile b/arch/loongarch/lib/Makefile
index d60d4e096cfa..a77bf160bfc4 100644
--- a/arch/loongarch/lib/Makefile
+++ b/arch/loongarch/lib/Makefile
@@ -6,4 +6,6 @@
lib-y += delay.o memset.o memcpy.o memmove.o \
clear_user.o copy_user.o csum.o dump_tlb.o unaligned.o
+obj-$(CONFIG_CPU_HAS_LSX) += xor_simd.o xor_simd_glue.o
+
obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
diff --git a/arch/loongarch/lib/clear_user.S b/arch/loongarch/lib/clear_user.S
index 0790eadce166..be741544e62b 100644
--- a/arch/loongarch/lib/clear_user.S
+++ b/arch/loongarch/lib/clear_user.S
@@ -11,19 +11,6 @@
#include <asm/cpu.h>
#include <asm/regdef.h>
-.irp to, 0, 1, 2, 3, 4, 5, 6, 7
-.L_fixup_handle_\to\():
- sub.d a0, a2, a0
- addi.d a0, a0, (\to) * (-8)
- jr ra
-.endr
-
-.irp to, 0, 2, 4
-.L_fixup_handle_s\to\():
- addi.d a0, a1, -\to
- jr ra
-.endr
-
SYM_FUNC_START(__clear_user)
/*
* Some CPUs support hardware unaligned access
@@ -51,7 +38,7 @@ SYM_FUNC_START(__clear_user_generic)
2: move a0, a1
jr ra
- _asm_extable 1b, .L_fixup_handle_s0
+ _asm_extable 1b, 2b
SYM_FUNC_END(__clear_user_generic)
/*
@@ -173,33 +160,47 @@ SYM_FUNC_START(__clear_user_fast)
jr ra
/* fixup and ex_table */
- _asm_extable 0b, .L_fixup_handle_0
- _asm_extable 1b, .L_fixup_handle_0
- _asm_extable 2b, .L_fixup_handle_1
- _asm_extable 3b, .L_fixup_handle_2
- _asm_extable 4b, .L_fixup_handle_3
- _asm_extable 5b, .L_fixup_handle_4
- _asm_extable 6b, .L_fixup_handle_5
- _asm_extable 7b, .L_fixup_handle_6
- _asm_extable 8b, .L_fixup_handle_7
- _asm_extable 9b, .L_fixup_handle_0
- _asm_extable 10b, .L_fixup_handle_1
- _asm_extable 11b, .L_fixup_handle_2
- _asm_extable 12b, .L_fixup_handle_3
- _asm_extable 13b, .L_fixup_handle_0
- _asm_extable 14b, .L_fixup_handle_1
- _asm_extable 15b, .L_fixup_handle_0
- _asm_extable 16b, .L_fixup_handle_0
- _asm_extable 17b, .L_fixup_handle_s0
- _asm_extable 18b, .L_fixup_handle_s0
- _asm_extable 19b, .L_fixup_handle_s0
- _asm_extable 20b, .L_fixup_handle_s2
- _asm_extable 21b, .L_fixup_handle_s0
- _asm_extable 22b, .L_fixup_handle_s0
- _asm_extable 23b, .L_fixup_handle_s4
- _asm_extable 24b, .L_fixup_handle_s0
- _asm_extable 25b, .L_fixup_handle_s4
- _asm_extable 26b, .L_fixup_handle_s0
- _asm_extable 27b, .L_fixup_handle_s4
- _asm_extable 28b, .L_fixup_handle_s0
+.Llarge_fixup:
+ sub.d a1, a2, a0
+
+.Lsmall_fixup:
+29: st.b zero, a0, 0
+ addi.d a0, a0, 1
+ addi.d a1, a1, -1
+ bgt a1, zero, 29b
+
+.Lexit:
+ move a0, a1
+ jr ra
+
+ _asm_extable 0b, .Lsmall_fixup
+ _asm_extable 1b, .Llarge_fixup
+ _asm_extable 2b, .Llarge_fixup
+ _asm_extable 3b, .Llarge_fixup
+ _asm_extable 4b, .Llarge_fixup
+ _asm_extable 5b, .Llarge_fixup
+ _asm_extable 6b, .Llarge_fixup
+ _asm_extable 7b, .Llarge_fixup
+ _asm_extable 8b, .Llarge_fixup
+ _asm_extable 9b, .Llarge_fixup
+ _asm_extable 10b, .Llarge_fixup
+ _asm_extable 11b, .Llarge_fixup
+ _asm_extable 12b, .Llarge_fixup
+ _asm_extable 13b, .Llarge_fixup
+ _asm_extable 14b, .Llarge_fixup
+ _asm_extable 15b, .Llarge_fixup
+ _asm_extable 16b, .Llarge_fixup
+ _asm_extable 17b, .Lexit
+ _asm_extable 18b, .Lsmall_fixup
+ _asm_extable 19b, .Lsmall_fixup
+ _asm_extable 20b, .Lsmall_fixup
+ _asm_extable 21b, .Lsmall_fixup
+ _asm_extable 22b, .Lsmall_fixup
+ _asm_extable 23b, .Lsmall_fixup
+ _asm_extable 24b, .Lsmall_fixup
+ _asm_extable 25b, .Lsmall_fixup
+ _asm_extable 26b, .Lsmall_fixup
+ _asm_extable 27b, .Lsmall_fixup
+ _asm_extable 28b, .Lsmall_fixup
+ _asm_extable 29b, .Lexit
SYM_FUNC_END(__clear_user_fast)
diff --git a/arch/loongarch/lib/copy_user.S b/arch/loongarch/lib/copy_user.S
index bfe3d2793d00..feec3d362803 100644
--- a/arch/loongarch/lib/copy_user.S
+++ b/arch/loongarch/lib/copy_user.S
@@ -11,19 +11,6 @@
#include <asm/cpu.h>
#include <asm/regdef.h>
-.irp to, 0, 1, 2, 3, 4, 5, 6, 7
-.L_fixup_handle_\to\():
- sub.d a0, a2, a0
- addi.d a0, a0, (\to) * (-8)
- jr ra
-.endr
-
-.irp to, 0, 2, 4
-.L_fixup_handle_s\to\():
- addi.d a0, a2, -\to
- jr ra
-.endr
-
SYM_FUNC_START(__copy_user)
/*
* Some CPUs support hardware unaligned access
@@ -54,8 +41,8 @@ SYM_FUNC_START(__copy_user_generic)
3: move a0, a2
jr ra
- _asm_extable 1b, .L_fixup_handle_s0
- _asm_extable 2b, .L_fixup_handle_s0
+ _asm_extable 1b, 3b
+ _asm_extable 2b, 3b
SYM_FUNC_END(__copy_user_generic)
/*
@@ -69,10 +56,10 @@ SYM_FUNC_START(__copy_user_fast)
sltui t0, a2, 9
bnez t0, .Lsmall
- add.d a3, a1, a2
- add.d a2, a0, a2
0: ld.d t0, a1, 0
1: st.d t0, a0, 0
+ add.d a3, a1, a2
+ add.d a2, a0, a2
/* align up destination address */
andi t1, a0, 7
@@ -94,7 +81,6 @@ SYM_FUNC_START(__copy_user_fast)
7: ld.d t5, a1, 40
8: ld.d t6, a1, 48
9: ld.d t7, a1, 56
- addi.d a1, a1, 64
10: st.d t0, a0, 0
11: st.d t1, a0, 8
12: st.d t2, a0, 16
@@ -103,6 +89,7 @@ SYM_FUNC_START(__copy_user_fast)
15: st.d t5, a0, 40
16: st.d t6, a0, 48
17: st.d t7, a0, 56
+ addi.d a1, a1, 64
addi.d a0, a0, 64
bltu a1, a4, .Lloop64
@@ -114,11 +101,11 @@ SYM_FUNC_START(__copy_user_fast)
19: ld.d t1, a1, 8
20: ld.d t2, a1, 16
21: ld.d t3, a1, 24
- addi.d a1, a1, 32
22: st.d t0, a0, 0
23: st.d t1, a0, 8
24: st.d t2, a0, 16
25: st.d t3, a0, 24
+ addi.d a1, a1, 32
addi.d a0, a0, 32
.Llt32:
@@ -126,9 +113,9 @@ SYM_FUNC_START(__copy_user_fast)
bgeu a1, a4, .Llt16
26: ld.d t0, a1, 0
27: ld.d t1, a1, 8
- addi.d a1, a1, 16
28: st.d t0, a0, 0
29: st.d t1, a0, 8
+ addi.d a1, a1, 16
addi.d a0, a0, 16
.Llt16:
@@ -136,6 +123,7 @@ SYM_FUNC_START(__copy_user_fast)
bgeu a1, a4, .Llt8
30: ld.d t0, a1, 0
31: st.d t0, a0, 0
+ addi.d a1, a1, 8
addi.d a0, a0, 8
.Llt8:
@@ -214,62 +202,79 @@ SYM_FUNC_START(__copy_user_fast)
jr ra
/* fixup and ex_table */
- _asm_extable 0b, .L_fixup_handle_0
- _asm_extable 1b, .L_fixup_handle_0
- _asm_extable 2b, .L_fixup_handle_0
- _asm_extable 3b, .L_fixup_handle_0
- _asm_extable 4b, .L_fixup_handle_0
- _asm_extable 5b, .L_fixup_handle_0
- _asm_extable 6b, .L_fixup_handle_0
- _asm_extable 7b, .L_fixup_handle_0
- _asm_extable 8b, .L_fixup_handle_0
- _asm_extable 9b, .L_fixup_handle_0
- _asm_extable 10b, .L_fixup_handle_0
- _asm_extable 11b, .L_fixup_handle_1
- _asm_extable 12b, .L_fixup_handle_2
- _asm_extable 13b, .L_fixup_handle_3
- _asm_extable 14b, .L_fixup_handle_4
- _asm_extable 15b, .L_fixup_handle_5
- _asm_extable 16b, .L_fixup_handle_6
- _asm_extable 17b, .L_fixup_handle_7
- _asm_extable 18b, .L_fixup_handle_0
- _asm_extable 19b, .L_fixup_handle_0
- _asm_extable 20b, .L_fixup_handle_0
- _asm_extable 21b, .L_fixup_handle_0
- _asm_extable 22b, .L_fixup_handle_0
- _asm_extable 23b, .L_fixup_handle_1
- _asm_extable 24b, .L_fixup_handle_2
- _asm_extable 25b, .L_fixup_handle_3
- _asm_extable 26b, .L_fixup_handle_0
- _asm_extable 27b, .L_fixup_handle_0
- _asm_extable 28b, .L_fixup_handle_0
- _asm_extable 29b, .L_fixup_handle_1
- _asm_extable 30b, .L_fixup_handle_0
- _asm_extable 31b, .L_fixup_handle_0
- _asm_extable 32b, .L_fixup_handle_0
- _asm_extable 33b, .L_fixup_handle_0
- _asm_extable 34b, .L_fixup_handle_s0
- _asm_extable 35b, .L_fixup_handle_s0
- _asm_extable 36b, .L_fixup_handle_s0
- _asm_extable 37b, .L_fixup_handle_s0
- _asm_extable 38b, .L_fixup_handle_s0
- _asm_extable 39b, .L_fixup_handle_s0
- _asm_extable 40b, .L_fixup_handle_s0
- _asm_extable 41b, .L_fixup_handle_s2
- _asm_extable 42b, .L_fixup_handle_s0
- _asm_extable 43b, .L_fixup_handle_s0
- _asm_extable 44b, .L_fixup_handle_s0
- _asm_extable 45b, .L_fixup_handle_s0
- _asm_extable 46b, .L_fixup_handle_s0
- _asm_extable 47b, .L_fixup_handle_s4
- _asm_extable 48b, .L_fixup_handle_s0
- _asm_extable 49b, .L_fixup_handle_s0
- _asm_extable 50b, .L_fixup_handle_s0
- _asm_extable 51b, .L_fixup_handle_s4
- _asm_extable 52b, .L_fixup_handle_s0
- _asm_extable 53b, .L_fixup_handle_s0
- _asm_extable 54b, .L_fixup_handle_s0
- _asm_extable 55b, .L_fixup_handle_s4
- _asm_extable 56b, .L_fixup_handle_s0
- _asm_extable 57b, .L_fixup_handle_s0
+.Llarge_fixup:
+ sub.d a2, a2, a0
+
+.Lsmall_fixup:
+58: ld.b t0, a1, 0
+59: st.b t0, a0, 0
+ addi.d a0, a0, 1
+ addi.d a1, a1, 1
+ addi.d a2, a2, -1
+ bgt a2, zero, 58b
+
+.Lexit:
+ move a0, a2
+ jr ra
+
+ _asm_extable 0b, .Lsmall_fixup
+ _asm_extable 1b, .Lsmall_fixup
+ _asm_extable 2b, .Llarge_fixup
+ _asm_extable 3b, .Llarge_fixup
+ _asm_extable 4b, .Llarge_fixup
+ _asm_extable 5b, .Llarge_fixup
+ _asm_extable 6b, .Llarge_fixup
+ _asm_extable 7b, .Llarge_fixup
+ _asm_extable 8b, .Llarge_fixup
+ _asm_extable 9b, .Llarge_fixup
+ _asm_extable 10b, .Llarge_fixup
+ _asm_extable 11b, .Llarge_fixup
+ _asm_extable 12b, .Llarge_fixup
+ _asm_extable 13b, .Llarge_fixup
+ _asm_extable 14b, .Llarge_fixup
+ _asm_extable 15b, .Llarge_fixup
+ _asm_extable 16b, .Llarge_fixup
+ _asm_extable 17b, .Llarge_fixup
+ _asm_extable 18b, .Llarge_fixup
+ _asm_extable 19b, .Llarge_fixup
+ _asm_extable 20b, .Llarge_fixup
+ _asm_extable 21b, .Llarge_fixup
+ _asm_extable 22b, .Llarge_fixup
+ _asm_extable 23b, .Llarge_fixup
+ _asm_extable 24b, .Llarge_fixup
+ _asm_extable 25b, .Llarge_fixup
+ _asm_extable 26b, .Llarge_fixup
+ _asm_extable 27b, .Llarge_fixup
+ _asm_extable 28b, .Llarge_fixup
+ _asm_extable 29b, .Llarge_fixup
+ _asm_extable 30b, .Llarge_fixup
+ _asm_extable 31b, .Llarge_fixup
+ _asm_extable 32b, .Llarge_fixup
+ _asm_extable 33b, .Llarge_fixup
+ _asm_extable 34b, .Lexit
+ _asm_extable 35b, .Lexit
+ _asm_extable 36b, .Lsmall_fixup
+ _asm_extable 37b, .Lsmall_fixup
+ _asm_extable 38b, .Lsmall_fixup
+ _asm_extable 39b, .Lsmall_fixup
+ _asm_extable 40b, .Lsmall_fixup
+ _asm_extable 41b, .Lsmall_fixup
+ _asm_extable 42b, .Lsmall_fixup
+ _asm_extable 43b, .Lsmall_fixup
+ _asm_extable 44b, .Lsmall_fixup
+ _asm_extable 45b, .Lsmall_fixup
+ _asm_extable 46b, .Lsmall_fixup
+ _asm_extable 47b, .Lsmall_fixup
+ _asm_extable 48b, .Lsmall_fixup
+ _asm_extable 49b, .Lsmall_fixup
+ _asm_extable 50b, .Lsmall_fixup
+ _asm_extable 51b, .Lsmall_fixup
+ _asm_extable 52b, .Lsmall_fixup
+ _asm_extable 53b, .Lsmall_fixup
+ _asm_extable 54b, .Lsmall_fixup
+ _asm_extable 55b, .Lsmall_fixup
+ _asm_extable 56b, .Lsmall_fixup
+ _asm_extable 57b, .Lsmall_fixup
+ _asm_extable 58b, .Lexit
+ _asm_extable 59b, .Lexit
SYM_FUNC_END(__copy_user_fast)
diff --git a/arch/loongarch/lib/memcpy.S b/arch/loongarch/lib/memcpy.S
index cc30b3b6252f..fa1148878d2b 100644
--- a/arch/loongarch/lib/memcpy.S
+++ b/arch/loongarch/lib/memcpy.S
@@ -10,6 +10,8 @@
#include <asm/cpu.h>
#include <asm/regdef.h>
+.section .noinstr.text, "ax"
+
SYM_FUNC_START(memcpy)
/*
* Some CPUs support hardware unaligned access
@@ -17,9 +19,13 @@ SYM_FUNC_START(memcpy)
ALTERNATIVE "b __memcpy_generic", \
"b __memcpy_fast", CPU_FEATURE_UAL
SYM_FUNC_END(memcpy)
-_ASM_NOKPROBE(memcpy)
+SYM_FUNC_ALIAS(__memcpy, memcpy)
EXPORT_SYMBOL(memcpy)
+EXPORT_SYMBOL(__memcpy)
+
+_ASM_NOKPROBE(memcpy)
+_ASM_NOKPROBE(__memcpy)
/*
* void *__memcpy_generic(void *dst, const void *src, size_t n)
diff --git a/arch/loongarch/lib/memmove.S b/arch/loongarch/lib/memmove.S
index 7dc76d1484b6..82dae062fec8 100644
--- a/arch/loongarch/lib/memmove.S
+++ b/arch/loongarch/lib/memmove.S
@@ -10,23 +10,29 @@
#include <asm/cpu.h>
#include <asm/regdef.h>
+.section .noinstr.text, "ax"
+
SYM_FUNC_START(memmove)
- blt a0, a1, memcpy /* dst < src, memcpy */
- blt a1, a0, rmemcpy /* src < dst, rmemcpy */
- jr ra /* dst == src, return */
+ blt a0, a1, __memcpy /* dst < src, memcpy */
+ blt a1, a0, __rmemcpy /* src < dst, rmemcpy */
+ jr ra /* dst == src, return */
SYM_FUNC_END(memmove)
-_ASM_NOKPROBE(memmove)
+SYM_FUNC_ALIAS(__memmove, memmove)
EXPORT_SYMBOL(memmove)
+EXPORT_SYMBOL(__memmove)
+
+_ASM_NOKPROBE(memmove)
+_ASM_NOKPROBE(__memmove)
-SYM_FUNC_START(rmemcpy)
+SYM_FUNC_START(__rmemcpy)
/*
* Some CPUs support hardware unaligned access
*/
ALTERNATIVE "b __rmemcpy_generic", \
"b __rmemcpy_fast", CPU_FEATURE_UAL
-SYM_FUNC_END(rmemcpy)
-_ASM_NOKPROBE(rmemcpy)
+SYM_FUNC_END(__rmemcpy)
+_ASM_NOKPROBE(__rmemcpy)
/*
* void *__rmemcpy_generic(void *dst, const void *src, size_t n)
diff --git a/arch/loongarch/lib/memset.S b/arch/loongarch/lib/memset.S
index 3f20f7996e8e..06d3ca54cbfe 100644
--- a/arch/loongarch/lib/memset.S
+++ b/arch/loongarch/lib/memset.S
@@ -16,6 +16,8 @@
bstrins.d \r0, \r0, 63, 32
.endm
+.section .noinstr.text, "ax"
+
SYM_FUNC_START(memset)
/*
* Some CPUs support hardware unaligned access
@@ -23,9 +25,13 @@ SYM_FUNC_START(memset)
ALTERNATIVE "b __memset_generic", \
"b __memset_fast", CPU_FEATURE_UAL
SYM_FUNC_END(memset)
-_ASM_NOKPROBE(memset)
+SYM_FUNC_ALIAS(__memset, memset)
EXPORT_SYMBOL(memset)
+EXPORT_SYMBOL(__memset)
+
+_ASM_NOKPROBE(memset)
+_ASM_NOKPROBE(__memset)
/*
* void *__memset_generic(void *s, int c, size_t n)
diff --git a/arch/loongarch/lib/xor_simd.c b/arch/loongarch/lib/xor_simd.c
new file mode 100644
index 000000000000..84cd24b728c4
--- /dev/null
+++ b/arch/loongarch/lib/xor_simd.c
@@ -0,0 +1,93 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * LoongArch SIMD XOR operations
+ *
+ * Copyright (C) 2023 WANG Xuerui <git@xen0n.name>
+ */
+
+#include "xor_simd.h"
+
+/*
+ * Process one cache line (64 bytes) per loop. This is assuming all future
+ * popular LoongArch cores are similar performance-characteristics-wise to the
+ * current models.
+ */
+#define LINE_WIDTH 64
+
+#ifdef CONFIG_CPU_HAS_LSX
+
+#define LD(reg, base, offset) \
+ "vld $vr" #reg ", %[" #base "], " #offset "\n\t"
+#define ST(reg, base, offset) \
+ "vst $vr" #reg ", %[" #base "], " #offset "\n\t"
+#define XOR(dj, k) "vxor.v $vr" #dj ", $vr" #dj ", $vr" #k "\n\t"
+
+#define LD_INOUT_LINE(base) \
+ LD(0, base, 0) \
+ LD(1, base, 16) \
+ LD(2, base, 32) \
+ LD(3, base, 48)
+
+#define LD_AND_XOR_LINE(base) \
+ LD(4, base, 0) \
+ LD(5, base, 16) \
+ LD(6, base, 32) \
+ LD(7, base, 48) \
+ XOR(0, 4) \
+ XOR(1, 5) \
+ XOR(2, 6) \
+ XOR(3, 7)
+
+#define ST_LINE(base) \
+ ST(0, base, 0) \
+ ST(1, base, 16) \
+ ST(2, base, 32) \
+ ST(3, base, 48)
+
+#define XOR_FUNC_NAME(nr) __xor_lsx_##nr
+#include "xor_template.c"
+
+#undef LD
+#undef ST
+#undef XOR
+#undef LD_INOUT_LINE
+#undef LD_AND_XOR_LINE
+#undef ST_LINE
+#undef XOR_FUNC_NAME
+
+#endif /* CONFIG_CPU_HAS_LSX */
+
+#ifdef CONFIG_CPU_HAS_LASX
+
+#define LD(reg, base, offset) \
+ "xvld $xr" #reg ", %[" #base "], " #offset "\n\t"
+#define ST(reg, base, offset) \
+ "xvst $xr" #reg ", %[" #base "], " #offset "\n\t"
+#define XOR(dj, k) "xvxor.v $xr" #dj ", $xr" #dj ", $xr" #k "\n\t"
+
+#define LD_INOUT_LINE(base) \
+ LD(0, base, 0) \
+ LD(1, base, 32)
+
+#define LD_AND_XOR_LINE(base) \
+ LD(2, base, 0) \
+ LD(3, base, 32) \
+ XOR(0, 2) \
+ XOR(1, 3)
+
+#define ST_LINE(base) \
+ ST(0, base, 0) \
+ ST(1, base, 32)
+
+#define XOR_FUNC_NAME(nr) __xor_lasx_##nr
+#include "xor_template.c"
+
+#undef LD
+#undef ST
+#undef XOR
+#undef LD_INOUT_LINE
+#undef LD_AND_XOR_LINE
+#undef ST_LINE
+#undef XOR_FUNC_NAME
+
+#endif /* CONFIG_CPU_HAS_LASX */
diff --git a/arch/loongarch/lib/xor_simd.h b/arch/loongarch/lib/xor_simd.h
new file mode 100644
index 000000000000..f50f32514d80
--- /dev/null
+++ b/arch/loongarch/lib/xor_simd.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Simple interface to link xor_simd.c and xor_simd_glue.c
+ *
+ * Separating these files ensures that no SIMD instructions are run outside of
+ * the kfpu critical section.
+ */
+
+#ifndef __LOONGARCH_LIB_XOR_SIMD_H
+#define __LOONGARCH_LIB_XOR_SIMD_H
+
+#ifdef CONFIG_CPU_HAS_LSX
+void __xor_lsx_2(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2);
+void __xor_lsx_3(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2, const unsigned long * __restrict p3);
+void __xor_lsx_4(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2, const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4);
+void __xor_lsx_5(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2, const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4, const unsigned long * __restrict p5);
+#endif /* CONFIG_CPU_HAS_LSX */
+
+#ifdef CONFIG_CPU_HAS_LASX
+void __xor_lasx_2(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2);
+void __xor_lasx_3(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2, const unsigned long * __restrict p3);
+void __xor_lasx_4(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2, const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4);
+void __xor_lasx_5(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2, const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4, const unsigned long * __restrict p5);
+#endif /* CONFIG_CPU_HAS_LASX */
+
+#endif /* __LOONGARCH_LIB_XOR_SIMD_H */
diff --git a/arch/loongarch/lib/xor_simd_glue.c b/arch/loongarch/lib/xor_simd_glue.c
new file mode 100644
index 000000000000..393f689dbcf6
--- /dev/null
+++ b/arch/loongarch/lib/xor_simd_glue.c
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * LoongArch SIMD XOR operations
+ *
+ * Copyright (C) 2023 WANG Xuerui <git@xen0n.name>
+ */
+
+#include <linux/export.h>
+#include <linux/sched.h>
+#include <asm/fpu.h>
+#include <asm/xor_simd.h>
+#include "xor_simd.h"
+
+#define MAKE_XOR_GLUE_2(flavor) \
+void xor_##flavor##_2(unsigned long bytes, unsigned long * __restrict p1, \
+ const unsigned long * __restrict p2) \
+{ \
+ kernel_fpu_begin(); \
+ __xor_##flavor##_2(bytes, p1, p2); \
+ kernel_fpu_end(); \
+} \
+EXPORT_SYMBOL_GPL(xor_##flavor##_2)
+
+#define MAKE_XOR_GLUE_3(flavor) \
+void xor_##flavor##_3(unsigned long bytes, unsigned long * __restrict p1, \
+ const unsigned long * __restrict p2, \
+ const unsigned long * __restrict p3) \
+{ \
+ kernel_fpu_begin(); \
+ __xor_##flavor##_3(bytes, p1, p2, p3); \
+ kernel_fpu_end(); \
+} \
+EXPORT_SYMBOL_GPL(xor_##flavor##_3)
+
+#define MAKE_XOR_GLUE_4(flavor) \
+void xor_##flavor##_4(unsigned long bytes, unsigned long * __restrict p1, \
+ const unsigned long * __restrict p2, \
+ const unsigned long * __restrict p3, \
+ const unsigned long * __restrict p4) \
+{ \
+ kernel_fpu_begin(); \
+ __xor_##flavor##_4(bytes, p1, p2, p3, p4); \
+ kernel_fpu_end(); \
+} \
+EXPORT_SYMBOL_GPL(xor_##flavor##_4)
+
+#define MAKE_XOR_GLUE_5(flavor) \
+void xor_##flavor##_5(unsigned long bytes, unsigned long * __restrict p1, \
+ const unsigned long * __restrict p2, \
+ const unsigned long * __restrict p3, \
+ const unsigned long * __restrict p4, \
+ const unsigned long * __restrict p5) \
+{ \
+ kernel_fpu_begin(); \
+ __xor_##flavor##_5(bytes, p1, p2, p3, p4, p5); \
+ kernel_fpu_end(); \
+} \
+EXPORT_SYMBOL_GPL(xor_##flavor##_5)
+
+#define MAKE_XOR_GLUES(flavor) \
+ MAKE_XOR_GLUE_2(flavor); \
+ MAKE_XOR_GLUE_3(flavor); \
+ MAKE_XOR_GLUE_4(flavor); \
+ MAKE_XOR_GLUE_5(flavor)
+
+#ifdef CONFIG_CPU_HAS_LSX
+MAKE_XOR_GLUES(lsx);
+#endif
+
+#ifdef CONFIG_CPU_HAS_LASX
+MAKE_XOR_GLUES(lasx);
+#endif
diff --git a/arch/loongarch/lib/xor_template.c b/arch/loongarch/lib/xor_template.c
new file mode 100644
index 000000000000..0358ced7fe33
--- /dev/null
+++ b/arch/loongarch/lib/xor_template.c
@@ -0,0 +1,110 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2023 WANG Xuerui <git@xen0n.name>
+ *
+ * Template for XOR operations, instantiated in xor_simd.c.
+ *
+ * Expected preprocessor definitions:
+ *
+ * - LINE_WIDTH
+ * - XOR_FUNC_NAME(nr)
+ * - LD_INOUT_LINE(buf)
+ * - LD_AND_XOR_LINE(buf)
+ * - ST_LINE(buf)
+ */
+
+void XOR_FUNC_NAME(2)(unsigned long bytes,
+ unsigned long * __restrict v1,
+ const unsigned long * __restrict v2)
+{
+ unsigned long lines = bytes / LINE_WIDTH;
+
+ do {
+ __asm__ __volatile__ (
+ LD_INOUT_LINE(v1)
+ LD_AND_XOR_LINE(v2)
+ ST_LINE(v1)
+ : : [v1] "r"(v1), [v2] "r"(v2) : "memory"
+ );
+
+ v1 += LINE_WIDTH / sizeof(unsigned long);
+ v2 += LINE_WIDTH / sizeof(unsigned long);
+ } while (--lines > 0);
+}
+
+void XOR_FUNC_NAME(3)(unsigned long bytes,
+ unsigned long * __restrict v1,
+ const unsigned long * __restrict v2,
+ const unsigned long * __restrict v3)
+{
+ unsigned long lines = bytes / LINE_WIDTH;
+
+ do {
+ __asm__ __volatile__ (
+ LD_INOUT_LINE(v1)
+ LD_AND_XOR_LINE(v2)
+ LD_AND_XOR_LINE(v3)
+ ST_LINE(v1)
+ : : [v1] "r"(v1), [v2] "r"(v2), [v3] "r"(v3) : "memory"
+ );
+
+ v1 += LINE_WIDTH / sizeof(unsigned long);
+ v2 += LINE_WIDTH / sizeof(unsigned long);
+ v3 += LINE_WIDTH / sizeof(unsigned long);
+ } while (--lines > 0);
+}
+
+void XOR_FUNC_NAME(4)(unsigned long bytes,
+ unsigned long * __restrict v1,
+ const unsigned long * __restrict v2,
+ const unsigned long * __restrict v3,
+ const unsigned long * __restrict v4)
+{
+ unsigned long lines = bytes / LINE_WIDTH;
+
+ do {
+ __asm__ __volatile__ (
+ LD_INOUT_LINE(v1)
+ LD_AND_XOR_LINE(v2)
+ LD_AND_XOR_LINE(v3)
+ LD_AND_XOR_LINE(v4)
+ ST_LINE(v1)
+ : : [v1] "r"(v1), [v2] "r"(v2), [v3] "r"(v3), [v4] "r"(v4)
+ : "memory"
+ );
+
+ v1 += LINE_WIDTH / sizeof(unsigned long);
+ v2 += LINE_WIDTH / sizeof(unsigned long);
+ v3 += LINE_WIDTH / sizeof(unsigned long);
+ v4 += LINE_WIDTH / sizeof(unsigned long);
+ } while (--lines > 0);
+}
+
+void XOR_FUNC_NAME(5)(unsigned long bytes,
+ unsigned long * __restrict v1,
+ const unsigned long * __restrict v2,
+ const unsigned long * __restrict v3,
+ const unsigned long * __restrict v4,
+ const unsigned long * __restrict v5)
+{
+ unsigned long lines = bytes / LINE_WIDTH;
+
+ do {
+ __asm__ __volatile__ (
+ LD_INOUT_LINE(v1)
+ LD_AND_XOR_LINE(v2)
+ LD_AND_XOR_LINE(v3)
+ LD_AND_XOR_LINE(v4)
+ LD_AND_XOR_LINE(v5)
+ ST_LINE(v1)
+ : : [v1] "r"(v1), [v2] "r"(v2), [v3] "r"(v3), [v4] "r"(v4),
+ [v5] "r"(v5) : "memory"
+ );
+
+ v1 += LINE_WIDTH / sizeof(unsigned long);
+ v2 += LINE_WIDTH / sizeof(unsigned long);
+ v3 += LINE_WIDTH / sizeof(unsigned long);
+ v4 += LINE_WIDTH / sizeof(unsigned long);
+ v5 += LINE_WIDTH / sizeof(unsigned long);
+ } while (--lines > 0);
+}
diff --git a/arch/loongarch/mm/Makefile b/arch/loongarch/mm/Makefile
index 8ffc6383f836..e4d1e581dbae 100644
--- a/arch/loongarch/mm/Makefile
+++ b/arch/loongarch/mm/Makefile
@@ -7,3 +7,6 @@ obj-y += init.o cache.o tlb.o tlbex.o extable.o \
fault.o ioremap.o maccess.o mmap.o pgtable.o page.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
+obj-$(CONFIG_KASAN) += kasan_init.o
+
+KASAN_SANITIZE_kasan_init.o := n
diff --git a/arch/loongarch/mm/cache.c b/arch/loongarch/mm/cache.c
index 72685a48eaf0..6be04d36ca07 100644
--- a/arch/loongarch/mm/cache.c
+++ b/arch/loongarch/mm/cache.c
@@ -156,7 +156,6 @@ void cpu_cache_init(void)
current_cpu_data.cache_leaves_present = leaf;
current_cpu_data.options |= LOONGARCH_CPU_PREFETCH;
- shm_align_mask = PAGE_SIZE - 1;
}
static const pgprot_t protection_map[16] = {
diff --git a/arch/loongarch/mm/fault.c b/arch/loongarch/mm/fault.c
index da5b6d518cdb..e6376e3dce86 100644
--- a/arch/loongarch/mm/fault.c
+++ b/arch/loongarch/mm/fault.c
@@ -23,6 +23,7 @@
#include <linux/kprobes.h>
#include <linux/perf_event.h>
#include <linux/uaccess.h>
+#include <linux/kfence.h>
#include <asm/branch.h>
#include <asm/mmu_context.h>
@@ -30,7 +31,8 @@
int show_unhandled_signals = 1;
-static void __kprobes no_context(struct pt_regs *regs, unsigned long address)
+static void __kprobes no_context(struct pt_regs *regs,
+ unsigned long write, unsigned long address)
{
const int field = sizeof(unsigned long) * 2;
@@ -38,6 +40,9 @@ static void __kprobes no_context(struct pt_regs *regs, unsigned long address)
if (fixup_exception(regs))
return;
+ if (kfence_handle_page_fault(address, write, regs))
+ return;
+
/*
* Oops. The kernel tried to access some bad page. We'll have to
* terminate things with extreme prejudice.
@@ -51,14 +56,15 @@ static void __kprobes no_context(struct pt_regs *regs, unsigned long address)
die("Oops", regs);
}
-static void __kprobes do_out_of_memory(struct pt_regs *regs, unsigned long address)
+static void __kprobes do_out_of_memory(struct pt_regs *regs,
+ unsigned long write, unsigned long address)
{
/*
* We ran out of memory, call the OOM killer, and return the userspace
* (which will retry the fault, or kill us if we got oom-killed).
*/
if (!user_mode(regs)) {
- no_context(regs, address);
+ no_context(regs, write, address);
return;
}
pagefault_out_of_memory();
@@ -69,7 +75,7 @@ static void __kprobes do_sigbus(struct pt_regs *regs,
{
/* Kernel mode? Handle exceptions or die */
if (!user_mode(regs)) {
- no_context(regs, address);
+ no_context(regs, write, address);
return;
}
@@ -90,7 +96,7 @@ static void __kprobes do_sigsegv(struct pt_regs *regs,
/* Kernel mode? Handle exceptions or die */
if (!user_mode(regs)) {
- no_context(regs, address);
+ no_context(regs, write, address);
return;
}
@@ -149,7 +155,7 @@ static void __kprobes __do_page_fault(struct pt_regs *regs,
*/
if (address & __UA_LIMIT) {
if (!user_mode(regs))
- no_context(regs, address);
+ no_context(regs, write, address);
else
do_sigsegv(regs, write, address, si_code);
return;
@@ -211,7 +217,7 @@ good_area:
if (fault_signal_pending(fault, regs)) {
if (!user_mode(regs))
- no_context(regs, address);
+ no_context(regs, write, address);
return;
}
@@ -232,7 +238,7 @@ good_area:
if (unlikely(fault & VM_FAULT_ERROR)) {
mmap_read_unlock(mm);
if (fault & VM_FAULT_OOM) {
- do_out_of_memory(regs, address);
+ do_out_of_memory(regs, write, address);
return;
} else if (fault & VM_FAULT_SIGSEGV) {
do_sigsegv(regs, write, address, si_code);
diff --git a/arch/loongarch/mm/init.c b/arch/loongarch/mm/init.c
index 3b7d8129570b..f3fe8c06ba4d 100644
--- a/arch/loongarch/mm/init.c
+++ b/arch/loongarch/mm/init.c
@@ -35,33 +35,8 @@
#include <asm/pgalloc.h>
#include <asm/tlb.h>
-/*
- * We have up to 8 empty zeroed pages so we can map one of the right colour
- * when needed. Since page is never written to after the initialization we
- * don't have to care about aliases on other CPUs.
- */
-unsigned long empty_zero_page, zero_page_mask;
+unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
EXPORT_SYMBOL(empty_zero_page);
-EXPORT_SYMBOL(zero_page_mask);
-
-void setup_zero_pages(void)
-{
- unsigned int order, i;
- struct page *page;
-
- order = 0;
-
- empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
- if (!empty_zero_page)
- panic("Oh boy, that early out of memory?");
-
- page = virt_to_page((void *)empty_zero_page);
- split_page(page, order);
- for (i = 0; i < (1 << order); i++, page++)
- mark_page_reserved(page);
-
- zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
-}
void copy_user_highpage(struct page *to, struct page *from,
unsigned long vaddr, struct vm_area_struct *vma)
@@ -106,7 +81,6 @@ void __init mem_init(void)
high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
memblock_free_all();
- setup_zero_pages(); /* Setup zeroed pages. */
}
#endif /* !CONFIG_NUMA */
@@ -191,43 +165,42 @@ void vmemmap_free(unsigned long start, unsigned long end, struct vmem_altmap *al
#endif
#endif
-static pte_t *fixmap_pte(unsigned long addr)
+pte_t * __init populate_kernel_pte(unsigned long addr)
{
- pgd_t *pgd;
- p4d_t *p4d;
+ pgd_t *pgd = pgd_offset_k(addr);
+ p4d_t *p4d = p4d_offset(pgd, addr);
pud_t *pud;
pmd_t *pmd;
- pgd = pgd_offset_k(addr);
- p4d = p4d_offset(pgd, addr);
-
- if (pgd_none(*pgd)) {
- pud_t *new __maybe_unused;
-
- new = memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
- pgd_populate(&init_mm, pgd, new);
+ if (p4d_none(*p4d)) {
+ pud = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+ if (!pud)
+ panic("%s: Failed to allocate memory\n", __func__);
+ p4d_populate(&init_mm, p4d, pud);
#ifndef __PAGETABLE_PUD_FOLDED
- pud_init(new);
+ pud_init(pud);
#endif
}
pud = pud_offset(p4d, addr);
if (pud_none(*pud)) {
- pmd_t *new __maybe_unused;
-
- new = memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
- pud_populate(&init_mm, pud, new);
+ pmd = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+ if (!pmd)
+ panic("%s: Failed to allocate memory\n", __func__);
+ pud_populate(&init_mm, pud, pmd);
#ifndef __PAGETABLE_PMD_FOLDED
- pmd_init(new);
+ pmd_init(pmd);
#endif
}
pmd = pmd_offset(pud, addr);
- if (pmd_none(*pmd)) {
- pte_t *new __maybe_unused;
+ if (!pmd_present(*pmd)) {
+ pte_t *pte;
- new = memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
- pmd_populate_kernel(&init_mm, pmd, new);
+ pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+ if (!pte)
+ panic("%s: Failed to allocate memory\n", __func__);
+ pmd_populate_kernel(&init_mm, pmd, pte);
}
return pte_offset_kernel(pmd, addr);
@@ -241,7 +214,7 @@ void __init __set_fixmap(enum fixed_addresses idx,
BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
- ptep = fixmap_pte(addr);
+ ptep = populate_kernel_pte(addr);
if (!pte_none(*ptep)) {
pte_ERROR(*ptep);
return;
diff --git a/arch/loongarch/mm/kasan_init.c b/arch/loongarch/mm/kasan_init.c
new file mode 100644
index 000000000000..da68bc1a4643
--- /dev/null
+++ b/arch/loongarch/mm/kasan_init.c
@@ -0,0 +1,243 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2023 Loongson Technology Corporation Limited
+ */
+#define pr_fmt(fmt) "kasan: " fmt
+#include <linux/kasan.h>
+#include <linux/memblock.h>
+#include <linux/sched/task.h>
+
+#include <asm/tlbflush.h>
+#include <asm/pgalloc.h>
+#include <asm-generic/sections.h>
+
+static pgd_t kasan_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
+
+#ifdef __PAGETABLE_PUD_FOLDED
+#define __p4d_none(early, p4d) (0)
+#else
+#define __p4d_none(early, p4d) (early ? (p4d_val(p4d) == 0) : \
+(__pa(p4d_val(p4d)) == (unsigned long)__pa(kasan_early_shadow_pud)))
+#endif
+
+#ifdef __PAGETABLE_PMD_FOLDED
+#define __pud_none(early, pud) (0)
+#else
+#define __pud_none(early, pud) (early ? (pud_val(pud) == 0) : \
+(__pa(pud_val(pud)) == (unsigned long)__pa(kasan_early_shadow_pmd)))
+#endif
+
+#define __pmd_none(early, pmd) (early ? (pmd_val(pmd) == 0) : \
+(__pa(pmd_val(pmd)) == (unsigned long)__pa(kasan_early_shadow_pte)))
+
+#define __pte_none(early, pte) (early ? pte_none(pte) : \
+((pte_val(pte) & _PFN_MASK) == (unsigned long)__pa(kasan_early_shadow_page)))
+
+bool kasan_early_stage = true;
+
+/*
+ * Alloc memory for shadow memory page table.
+ */
+static phys_addr_t __init kasan_alloc_zeroed_page(int node)
+{
+ void *p = memblock_alloc_try_nid(PAGE_SIZE, PAGE_SIZE,
+ __pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, node);
+ if (!p)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%llx\n",
+ __func__, PAGE_SIZE, PAGE_SIZE, node, __pa(MAX_DMA_ADDRESS));
+
+ return __pa(p);
+}
+
+static pte_t *__init kasan_pte_offset(pmd_t *pmdp, unsigned long addr, int node, bool early)
+{
+ if (__pmd_none(early, READ_ONCE(*pmdp))) {
+ phys_addr_t pte_phys = early ?
+ __pa_symbol(kasan_early_shadow_pte) : kasan_alloc_zeroed_page(node);
+ if (!early)
+ memcpy(__va(pte_phys), kasan_early_shadow_pte, sizeof(kasan_early_shadow_pte));
+ pmd_populate_kernel(NULL, pmdp, (pte_t *)__va(pte_phys));
+ }
+
+ return pte_offset_kernel(pmdp, addr);
+}
+
+static pmd_t *__init kasan_pmd_offset(pud_t *pudp, unsigned long addr, int node, bool early)
+{
+ if (__pud_none(early, READ_ONCE(*pudp))) {
+ phys_addr_t pmd_phys = early ?
+ __pa_symbol(kasan_early_shadow_pmd) : kasan_alloc_zeroed_page(node);
+ if (!early)
+ memcpy(__va(pmd_phys), kasan_early_shadow_pmd, sizeof(kasan_early_shadow_pmd));
+ pud_populate(&init_mm, pudp, (pmd_t *)__va(pmd_phys));
+ }
+
+ return pmd_offset(pudp, addr);
+}
+
+static pud_t *__init kasan_pud_offset(p4d_t *p4dp, unsigned long addr, int node, bool early)
+{
+ if (__p4d_none(early, READ_ONCE(*p4dp))) {
+ phys_addr_t pud_phys = early ?
+ __pa_symbol(kasan_early_shadow_pud) : kasan_alloc_zeroed_page(node);
+ if (!early)
+ memcpy(__va(pud_phys), kasan_early_shadow_pud, sizeof(kasan_early_shadow_pud));
+ p4d_populate(&init_mm, p4dp, (pud_t *)__va(pud_phys));
+ }
+
+ return pud_offset(p4dp, addr);
+}
+
+static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr,
+ unsigned long end, int node, bool early)
+{
+ unsigned long next;
+ pte_t *ptep = kasan_pte_offset(pmdp, addr, node, early);
+
+ do {
+ phys_addr_t page_phys = early ?
+ __pa_symbol(kasan_early_shadow_page)
+ : kasan_alloc_zeroed_page(node);
+ next = addr + PAGE_SIZE;
+ set_pte(ptep, pfn_pte(__phys_to_pfn(page_phys), PAGE_KERNEL));
+ } while (ptep++, addr = next, addr != end && __pte_none(early, READ_ONCE(*ptep)));
+}
+
+static void __init kasan_pmd_populate(pud_t *pudp, unsigned long addr,
+ unsigned long end, int node, bool early)
+{
+ unsigned long next;
+ pmd_t *pmdp = kasan_pmd_offset(pudp, addr, node, early);
+
+ do {
+ next = pmd_addr_end(addr, end);
+ kasan_pte_populate(pmdp, addr, next, node, early);
+ } while (pmdp++, addr = next, addr != end && __pmd_none(early, READ_ONCE(*pmdp)));
+}
+
+static void __init kasan_pud_populate(p4d_t *p4dp, unsigned long addr,
+ unsigned long end, int node, bool early)
+{
+ unsigned long next;
+ pud_t *pudp = kasan_pud_offset(p4dp, addr, node, early);
+
+ do {
+ next = pud_addr_end(addr, end);
+ kasan_pmd_populate(pudp, addr, next, node, early);
+ } while (pudp++, addr = next, addr != end);
+}
+
+static void __init kasan_p4d_populate(pgd_t *pgdp, unsigned long addr,
+ unsigned long end, int node, bool early)
+{
+ unsigned long next;
+ p4d_t *p4dp = p4d_offset(pgdp, addr);
+
+ do {
+ next = p4d_addr_end(addr, end);
+ kasan_pud_populate(p4dp, addr, next, node, early);
+ } while (p4dp++, addr = next, addr != end);
+}
+
+static void __init kasan_pgd_populate(unsigned long addr, unsigned long end,
+ int node, bool early)
+{
+ unsigned long next;
+ pgd_t *pgdp;
+
+ pgdp = pgd_offset_k(addr);
+
+ do {
+ next = pgd_addr_end(addr, end);
+ kasan_p4d_populate(pgdp, addr, next, node, early);
+ } while (pgdp++, addr = next, addr != end);
+
+}
+
+/* Set up full kasan mappings, ensuring that the mapped pages are zeroed */
+static void __init kasan_map_populate(unsigned long start, unsigned long end,
+ int node)
+{
+ kasan_pgd_populate(start & PAGE_MASK, PAGE_ALIGN(end), node, false);
+}
+
+asmlinkage void __init kasan_early_init(void)
+{
+ BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, PGDIR_SIZE));
+ BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PGDIR_SIZE));
+}
+
+static inline void kasan_set_pgd(pgd_t *pgdp, pgd_t pgdval)
+{
+ WRITE_ONCE(*pgdp, pgdval);
+}
+
+static void __init clear_pgds(unsigned long start, unsigned long end)
+{
+ /*
+ * Remove references to kasan page tables from
+ * swapper_pg_dir. pgd_clear() can't be used
+ * here because it's nop on 2,3-level pagetable setups
+ */
+ for (; start < end; start += PGDIR_SIZE)
+ kasan_set_pgd((pgd_t *)pgd_offset_k(start), __pgd(0));
+}
+
+void __init kasan_init(void)
+{
+ u64 i;
+ phys_addr_t pa_start, pa_end;
+
+ /*
+ * PGD was populated as invalid_pmd_table or invalid_pud_table
+ * in pagetable_init() which depends on how many levels of page
+ * table you are using, but we had to clean the gpd of kasan
+ * shadow memory, as the pgd value is none-zero.
+ * The assertion pgd_none is going to be false and the formal populate
+ * afterwards is not going to create any new pgd at all.
+ */
+ memcpy(kasan_pg_dir, swapper_pg_dir, sizeof(kasan_pg_dir));
+ csr_write64(__pa_symbol(kasan_pg_dir), LOONGARCH_CSR_PGDH);
+ local_flush_tlb_all();
+
+ clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
+
+ /* Maps everything to a single page of zeroes */
+ kasan_pgd_populate(KASAN_SHADOW_START, KASAN_SHADOW_END, NUMA_NO_NODE, true);
+
+ kasan_populate_early_shadow(kasan_mem_to_shadow((void *)VMALLOC_START),
+ kasan_mem_to_shadow((void *)KFENCE_AREA_END));
+
+ kasan_early_stage = false;
+
+ /* Populate the linear mapping */
+ for_each_mem_range(i, &pa_start, &pa_end) {
+ void *start = (void *)phys_to_virt(pa_start);
+ void *end = (void *)phys_to_virt(pa_end);
+
+ if (start >= end)
+ break;
+
+ kasan_map_populate((unsigned long)kasan_mem_to_shadow(start),
+ (unsigned long)kasan_mem_to_shadow(end), NUMA_NO_NODE);
+ }
+
+ /* Populate modules mapping */
+ kasan_map_populate((unsigned long)kasan_mem_to_shadow((void *)MODULES_VADDR),
+ (unsigned long)kasan_mem_to_shadow((void *)MODULES_END), NUMA_NO_NODE);
+ /*
+ * KAsan may reuse the contents of kasan_early_shadow_pte directly, so we
+ * should make sure that it maps the zero page read-only.
+ */
+ for (i = 0; i < PTRS_PER_PTE; i++)
+ set_pte(&kasan_early_shadow_pte[i],
+ pfn_pte(__phys_to_pfn(__pa_symbol(kasan_early_shadow_page)), PAGE_KERNEL_RO));
+
+ memset(kasan_early_shadow_page, 0, PAGE_SIZE);
+ csr_write64(__pa_symbol(swapper_pg_dir), LOONGARCH_CSR_PGDH);
+ local_flush_tlb_all();
+
+ /* At this point kasan is fully initialized. Enable error messages */
+ init_task.kasan_depth = 0;
+ pr_info("KernelAddressSanitizer initialized.\n");
+}
diff --git a/arch/loongarch/mm/mmap.c b/arch/loongarch/mm/mmap.c
index fbe1a4856fc4..a9630a81b38a 100644
--- a/arch/loongarch/mm/mmap.c
+++ b/arch/loongarch/mm/mmap.c
@@ -8,12 +8,11 @@
#include <linux/mm.h>
#include <linux/mman.h>
-unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */
-EXPORT_SYMBOL(shm_align_mask);
+#define SHM_ALIGN_MASK (SHMLBA - 1)
-#define COLOUR_ALIGN(addr, pgoff) \
- ((((addr) + shm_align_mask) & ~shm_align_mask) + \
- (((pgoff) << PAGE_SHIFT) & shm_align_mask))
+#define COLOUR_ALIGN(addr, pgoff) \
+ ((((addr) + SHM_ALIGN_MASK) & ~SHM_ALIGN_MASK) \
+ + (((pgoff) << PAGE_SHIFT) & SHM_ALIGN_MASK))
enum mmap_allocation_direction {UP, DOWN};
@@ -40,7 +39,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
* cache aliasing constraints.
*/
if ((flags & MAP_SHARED) &&
- ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
+ ((addr - (pgoff << PAGE_SHIFT)) & SHM_ALIGN_MASK))
return -EINVAL;
return addr;
}
@@ -63,7 +62,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
}
info.length = len;
- info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0;
+ info.align_mask = do_color_align ? (PAGE_MASK & SHM_ALIGN_MASK) : 0;
info.align_offset = pgoff << PAGE_SHIFT;
if (dir == DOWN) {
diff --git a/arch/loongarch/mm/pgtable.c b/arch/loongarch/mm/pgtable.c
index b14343e211b6..71d0539e2d0b 100644
--- a/arch/loongarch/mm/pgtable.c
+++ b/arch/loongarch/mm/pgtable.c
@@ -9,6 +9,18 @@
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
+struct page *dmw_virt_to_page(unsigned long kaddr)
+{
+ return pfn_to_page(virt_to_pfn(kaddr));
+}
+EXPORT_SYMBOL_GPL(dmw_virt_to_page);
+
+struct page *tlb_virt_to_page(unsigned long kaddr)
+{
+ return pfn_to_page(pte_pfn(*virt_to_kpte(kaddr)));
+}
+EXPORT_SYMBOL_GPL(tlb_virt_to_page);
+
pgd_t *pgd_alloc(struct mm_struct *mm)
{
pgd_t *init, *ret = NULL;
diff --git a/arch/loongarch/vdso/Makefile b/arch/loongarch/vdso/Makefile
index a50308b6fc25..5c97d1463328 100644
--- a/arch/loongarch/vdso/Makefile
+++ b/arch/loongarch/vdso/Makefile
@@ -1,6 +1,9 @@
# SPDX-License-Identifier: GPL-2.0
# Objects to go into the VDSO.
+KASAN_SANITIZE := n
+KCOV_INSTRUMENT := n
+
# Include the generic Makefile to check the built vdso.
include $(srctree)/lib/vdso/Makefile
diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
index e23d06b51a20..2a60d7a72f1f 100644
--- a/arch/parisc/include/asm/cache.h
+++ b/arch/parisc/include/asm/cache.h
@@ -37,6 +37,7 @@ extern int split_tlb;
extern int dcache_stride;
extern int icache_stride;
extern struct pdc_cache_info cache_info;
+extern struct pdc_btlb_info btlb_info;
void parisc_setup_cache_timing(void);
#define pdtlb(sr, addr) asm volatile("pdtlb 0(%%sr%0,%1)" \
diff --git a/arch/parisc/include/asm/mckinley.h b/arch/parisc/include/asm/mckinley.h
deleted file mode 100644
index 1314390b9034..000000000000
--- a/arch/parisc/include/asm/mckinley.h
+++ /dev/null
@@ -1,8 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef ASM_PARISC_MCKINLEY_H
-#define ASM_PARISC_MCKINLEY_H
-
-/* declared in arch/parisc/kernel/setup.c */
-extern struct proc_dir_entry * proc_mckinley_root;
-
-#endif /*ASM_PARISC_MCKINLEY_H*/
diff --git a/arch/parisc/include/asm/pdc.h b/arch/parisc/include/asm/pdc.h
index 269b9a159f01..5d2d9737e579 100644
--- a/arch/parisc/include/asm/pdc.h
+++ b/arch/parisc/include/asm/pdc.h
@@ -44,10 +44,11 @@ int pdc_model_capabilities(unsigned long *capabilities);
int pdc_model_platform_info(char *orig_prod_num, char *current_prod_num, char *serial_no);
int pdc_cache_info(struct pdc_cache_info *cache);
int pdc_spaceid_bits(unsigned long *space_bits);
-#ifndef CONFIG_PA20
int pdc_btlb_info(struct pdc_btlb_info *btlb);
+int pdc_btlb_insert(unsigned long long vpage, unsigned long physpage, unsigned long len,
+ unsigned long entry_info, unsigned long slot);
+int pdc_btlb_purge_all(void);
int pdc_mem_map_hpa(struct pdc_memory_map *r_addr, struct pdc_module_path *mod_path);
-#endif /* !CONFIG_PA20 */
int pdc_pim_toc11(struct pdc_toc_pim_11 *ret);
int pdc_pim_toc20(struct pdc_toc_pim_20 *ret);
int pdc_lan_station_id(char *lan_addr, unsigned long net_hpa);
diff --git a/arch/parisc/include/asm/processor.h b/arch/parisc/include/asm/processor.h
index d77c43d32974..ff6cbdb6903b 100644
--- a/arch/parisc/include/asm/processor.h
+++ b/arch/parisc/include/asm/processor.h
@@ -310,6 +310,7 @@ extern void do_syscall_trace_exit(struct pt_regs *);
struct seq_file;
extern void early_trap_init(void);
extern void collect_boot_cpu_data(void);
+extern void btlb_init_per_cpu(void);
extern int show_cpuinfo (struct seq_file *m, void *v);
/* driver code in driver/parisc */
diff --git a/arch/parisc/include/asm/ropes.h b/arch/parisc/include/asm/ropes.h
index fd96706c7234..e2d2d7e9bfde 100644
--- a/arch/parisc/include/asm/ropes.h
+++ b/arch/parisc/include/asm/ropes.h
@@ -29,7 +29,7 @@
struct ioc {
void __iomem *ioc_hpa; /* I/O MMU base address */
char *res_map; /* resource map, bit == pdir entry */
- u64 *pdir_base; /* physical base address */
+ __le64 *pdir_base; /* physical base address */
unsigned long ibase; /* pdir IOV Space base - shared w/lba_pci */
unsigned long imask; /* pdir IOV Space mask - shared w/lba_pci */
#ifdef ZX1_SUPPORT
@@ -86,6 +86,9 @@ struct sba_device {
struct ioc ioc[MAX_IOC];
};
+/* list of SBA's in system, see drivers/parisc/sba_iommu.c */
+extern struct sba_device *sba_list;
+
#define ASTRO_RUNWAY_PORT 0x582
#define IKE_MERCED_PORT 0x803
#define REO_MERCED_PORT 0x804
@@ -110,7 +113,7 @@ static inline int IS_PLUTO(struct parisc_device *d) {
#define SBA_PDIR_VALID_BIT 0x8000000000000000ULL
-#define SBA_AGPGART_COOKIE 0x0000badbadc0ffeeULL
+#define SBA_AGPGART_COOKIE (__force __le64) 0x0000badbadc0ffeeULL
#define SBA_FUNC_ID 0x0000 /* function id */
#define SBA_FCLASS 0x0008 /* function class, bist, header, rev... */
diff --git a/arch/parisc/include/asm/shmparam.h b/arch/parisc/include/asm/shmparam.h
index 74f74e4d35b7..5a95b0f62b87 100644
--- a/arch/parisc/include/asm/shmparam.h
+++ b/arch/parisc/include/asm/shmparam.h
@@ -2,6 +2,21 @@
#ifndef _ASMPARISC_SHMPARAM_H
#define _ASMPARISC_SHMPARAM_H
+/*
+ * PA-RISC uses virtually indexed & physically tagged (VIPT) caches
+ * which has strict requirements when two pages to the same physical
+ * address are accessed through different mappings. Read the section
+ * "Address Aliasing" in the arch docs for more detail:
+ * PA-RISC 1.1 (page 3-6):
+ * https://parisc.wiki.kernel.org/images-parisc/6/68/Pa11_acd.pdf
+ * PA-RISC 2.0 (page F-5):
+ * https://parisc.wiki.kernel.org/images-parisc/7/73/Parisc2.0.pdf
+ *
+ * For Linux we allow kernel and userspace to map pages on page size
+ * granularity (SHMLBA) but have to ensure that, if two pages are
+ * mapped to the same physical address, the virtual and physical
+ * addresses modulo SHM_COLOUR are identical.
+ */
#define SHMLBA PAGE_SIZE /* attach addr a multiple of this */
#define SHM_COLOUR 0x00400000 /* shared mappings colouring */
diff --git a/arch/parisc/kernel/asm-offsets.c b/arch/parisc/kernel/asm-offsets.c
index 94652e13c260..757816a7bd4b 100644
--- a/arch/parisc/kernel/asm-offsets.c
+++ b/arch/parisc/kernel/asm-offsets.c
@@ -275,6 +275,8 @@ int main(void)
* and kernel data on physical huge pages */
#ifdef CONFIG_HUGETLB_PAGE
DEFINE(HUGEPAGE_SIZE, 1UL << REAL_HPAGE_SHIFT);
+#elif !defined(CONFIG_64BIT)
+ DEFINE(HUGEPAGE_SIZE, 4*1024*1024);
#else
DEFINE(HUGEPAGE_SIZE, PAGE_SIZE);
#endif
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index 442109a48940..268d90a9325b 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -58,7 +58,7 @@ int pa_serialize_tlb_flushes __ro_after_init;
struct pdc_cache_info cache_info __ro_after_init;
#ifndef CONFIG_PA20
-static struct pdc_btlb_info btlb_info __ro_after_init;
+struct pdc_btlb_info btlb_info __ro_after_init;
#endif
DEFINE_STATIC_KEY_TRUE(parisc_has_cache);
@@ -264,12 +264,6 @@ parisc_cache_init(void)
icache_stride = CAFL_STRIDE(cache_info.ic_conf);
#undef CAFL_STRIDE
-#ifndef CONFIG_PA20
- if (pdc_btlb_info(&btlb_info) < 0) {
- memset(&btlb_info, 0, sizeof btlb_info);
- }
-#endif
-
if ((boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) ==
PDC_MODEL_NVA_UNSUPPORTED) {
printk(KERN_WARNING "parisc_cache_init: Only equivalent aliasing supported!\n");
diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c
index 8f4b77648491..ed8b75948061 100644
--- a/arch/parisc/kernel/drivers.c
+++ b/arch/parisc/kernel/drivers.c
@@ -925,9 +925,9 @@ static __init void qemu_header(void)
pr_info("#define PARISC_MODEL \"%s\"\n\n",
boot_cpu_data.pdc.sys_model_name);
+ #define p ((unsigned long *)&boot_cpu_data.pdc.model)
pr_info("#define PARISC_PDC_MODEL 0x%lx, 0x%lx, 0x%lx, "
"0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%lx\n\n",
- #define p ((unsigned long *)&boot_cpu_data.pdc.model)
p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], p[8]);
#undef p
diff --git a/arch/parisc/kernel/firmware.c b/arch/parisc/kernel/firmware.c
index 8f37e75f2fb9..81078abec521 100644
--- a/arch/parisc/kernel/firmware.c
+++ b/arch/parisc/kernel/firmware.c
@@ -687,7 +687,6 @@ int pdc_spaceid_bits(unsigned long *space_bits)
return retval;
}
-#ifndef CONFIG_PA20
/**
* pdc_btlb_info - Return block TLB information.
* @btlb: The return buffer.
@@ -696,18 +695,51 @@ int pdc_spaceid_bits(unsigned long *space_bits)
*/
int pdc_btlb_info(struct pdc_btlb_info *btlb)
{
- int retval;
+ int retval;
unsigned long flags;
- spin_lock_irqsave(&pdc_lock, flags);
- retval = mem_pdc_call(PDC_BLOCK_TLB, PDC_BTLB_INFO, __pa(pdc_result), 0);
- memcpy(btlb, pdc_result, sizeof(*btlb));
- spin_unlock_irqrestore(&pdc_lock, flags);
+ if (IS_ENABLED(CONFIG_PA20))
+ return PDC_BAD_PROC;
- if(retval < 0) {
- btlb->max_size = 0;
- }
- return retval;
+ spin_lock_irqsave(&pdc_lock, flags);
+ retval = mem_pdc_call(PDC_BLOCK_TLB, PDC_BTLB_INFO, __pa(pdc_result), 0);
+ memcpy(btlb, pdc_result, sizeof(*btlb));
+ spin_unlock_irqrestore(&pdc_lock, flags);
+
+ if(retval < 0) {
+ btlb->max_size = 0;
+ }
+ return retval;
+}
+
+int pdc_btlb_insert(unsigned long long vpage, unsigned long physpage, unsigned long len,
+ unsigned long entry_info, unsigned long slot)
+{
+ int retval;
+ unsigned long flags;
+
+ if (IS_ENABLED(CONFIG_PA20))
+ return PDC_BAD_PROC;
+
+ spin_lock_irqsave(&pdc_lock, flags);
+ retval = mem_pdc_call(PDC_BLOCK_TLB, PDC_BTLB_INSERT, (unsigned long) (vpage >> 32),
+ (unsigned long) vpage, physpage, len, entry_info, slot);
+ spin_unlock_irqrestore(&pdc_lock, flags);
+ return retval;
+}
+
+int pdc_btlb_purge_all(void)
+{
+ int retval;
+ unsigned long flags;
+
+ if (IS_ENABLED(CONFIG_PA20))
+ return PDC_BAD_PROC;
+
+ spin_lock_irqsave(&pdc_lock, flags);
+ retval = mem_pdc_call(PDC_BLOCK_TLB, PDC_BTLB_PURGE_ALL);
+ spin_unlock_irqrestore(&pdc_lock, flags);
+ return retval;
}
/**
@@ -728,6 +760,9 @@ int pdc_mem_map_hpa(struct pdc_memory_map *address,
int retval;
unsigned long flags;
+ if (IS_ENABLED(CONFIG_PA20))
+ return PDC_BAD_PROC;
+
spin_lock_irqsave(&pdc_lock, flags);
memcpy(pdc_result2, mod_path, sizeof(*mod_path));
retval = mem_pdc_call(PDC_MEM_MAP, PDC_MEM_MAP_HPA, __pa(pdc_result),
@@ -737,7 +772,6 @@ int pdc_mem_map_hpa(struct pdc_memory_map *address,
return retval;
}
-#endif /* !CONFIG_PA20 */
/**
* pdc_lan_station_id - Get the LAN address.
diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S
index fd15fd4bbb61..a171bf3c6b31 100644
--- a/arch/parisc/kernel/head.S
+++ b/arch/parisc/kernel/head.S
@@ -180,10 +180,10 @@ $pgt_fill_loop:
std %dp,0x18(%r10)
#endif
-#ifdef CONFIG_64BIT
- /* Get PDCE_PROC for monarch CPU. */
#define MEM_PDC_LO 0x388
#define MEM_PDC_HI 0x35C
+#ifdef CONFIG_64BIT
+ /* Get PDCE_PROC for monarch CPU. */
ldw MEM_PDC_LO(%r0),%r3
ldw MEM_PDC_HI(%r0),%r10
depd %r10, 31, 32, %r3 /* move to upper word */
@@ -269,7 +269,17 @@ stext_pdc_ret:
tovirt_r1 %r6
mtctl %r6,%cr30 /* restore task thread info */
#endif
-
+
+#ifndef CONFIG_64BIT
+ /* clear all BTLBs */
+ ldi PDC_BLOCK_TLB,%arg0
+ load32 PA(stext_pdc_btlb_ret), %rp
+ ldw MEM_PDC_LO(%r0),%r3
+ bv (%r3)
+ ldi PDC_BTLB_PURGE_ALL,%arg1
+stext_pdc_btlb_ret:
+#endif
+
/* PARANOID: clear user scratch/user space SR's */
mtsp %r0,%sr0
mtsp %r0,%sr1
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
index 12c4d4104ade..2f81bfd4f15e 100644
--- a/arch/parisc/kernel/irq.c
+++ b/arch/parisc/kernel/irq.c
@@ -365,7 +365,7 @@ union irq_stack_union {
volatile unsigned int lock[1];
};
-DEFINE_PER_CPU(union irq_stack_union, irq_stack_union) = {
+static DEFINE_PER_CPU(union irq_stack_union, irq_stack_union) = {
.slock = { 1,1,1,1 },
};
#endif
diff --git a/arch/parisc/kernel/processor.c b/arch/parisc/kernel/processor.c
index a0e2d37c5b3b..1fc89fa2c2d2 100644
--- a/arch/parisc/kernel/processor.c
+++ b/arch/parisc/kernel/processor.c
@@ -368,6 +368,8 @@ int init_per_cpu(int cpunum)
/* FUTURE: Enable Performance Monitor : ccr bit 0x20 */
init_percpu_prof(cpunum);
+ btlb_init_per_cpu();
+
return ret;
}
diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S
index 1aaa2ca09800..58694d1989c2 100644
--- a/arch/parisc/kernel/vmlinux.lds.S
+++ b/arch/parisc/kernel/vmlinux.lds.S
@@ -154,6 +154,7 @@ SECTIONS
}
/* End of data section */
+ . = ALIGN(PAGE_SIZE);
_edata = .;
/* BSS */
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index a088c243edea..a2a3e89f2d9a 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -32,6 +32,7 @@
#include <asm/sections.h>
#include <asm/msgbuf.h>
#include <asm/sparsemem.h>
+#include <asm/asm-offsets.h>
extern int data_start;
extern void parisc_kernel_start(void); /* Kernel entry point in head.S */
@@ -720,6 +721,77 @@ void __init paging_init(void)
parisc_bootmem_free();
}
+static void alloc_btlb(unsigned long start, unsigned long end, int *slot,
+ unsigned long entry_info)
+{
+ const int slot_max = btlb_info.fixed_range_info.num_comb;
+ int min_num_pages = btlb_info.min_size;
+ unsigned long size;
+
+ /* map at minimum 4 pages */
+ if (min_num_pages < 4)
+ min_num_pages = 4;
+
+ size = HUGEPAGE_SIZE;
+ while (start < end && *slot < slot_max && size >= PAGE_SIZE) {
+ /* starting address must have same alignment as size! */
+ /* if correctly aligned and fits in double size, increase */
+ if (((start & (2 * size - 1)) == 0) &&
+ (end - start) >= (2 * size)) {
+ size <<= 1;
+ continue;
+ }
+ /* if current size alignment is too big, try smaller size */
+ if ((start & (size - 1)) != 0) {
+ size >>= 1;
+ continue;
+ }
+ if ((end - start) >= size) {
+ if ((size >> PAGE_SHIFT) >= min_num_pages)
+ pdc_btlb_insert(start >> PAGE_SHIFT, __pa(start) >> PAGE_SHIFT,
+ size >> PAGE_SHIFT, entry_info, *slot);
+ (*slot)++;
+ start += size;
+ continue;
+ }
+ size /= 2;
+ continue;
+ }
+}
+
+void btlb_init_per_cpu(void)
+{
+ unsigned long s, t, e;
+ int slot;
+
+ /* BTLBs are not available on 64-bit CPUs */
+ if (IS_ENABLED(CONFIG_PA20))
+ return;
+ else if (pdc_btlb_info(&btlb_info) < 0) {
+ memset(&btlb_info, 0, sizeof btlb_info);
+ }
+
+ /* insert BLTLBs for code and data segments */
+ s = (uintptr_t) dereference_function_descriptor(&_stext);
+ e = (uintptr_t) dereference_function_descriptor(&_etext);
+ t = (uintptr_t) dereference_function_descriptor(&_sdata);
+ BUG_ON(t != e);
+
+ /* code segments */
+ slot = 0;
+ alloc_btlb(s, e, &slot, 0x13800000);
+
+ /* sanity check */
+ t = (uintptr_t) dereference_function_descriptor(&_edata);
+ e = (uintptr_t) dereference_function_descriptor(&__bss_start);
+ BUG_ON(t != e);
+
+ /* data segments */
+ s = (uintptr_t) dereference_function_descriptor(&_sdata);
+ e = (uintptr_t) dereference_function_descriptor(&__bss_stop);
+ alloc_btlb(s, e, &slot, 0x11800000);
+}
+
#ifdef CONFIG_PA20
/*
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index 5138dce1a0b4..d607ab0f7c6d 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -273,7 +273,14 @@ config RISCV_DMA_NONCOHERENT
select ARCH_HAS_SYNC_DMA_FOR_CPU
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
select DMA_BOUNCE_UNALIGNED_KMALLOC if SWIOTLB
- select DMA_DIRECT_REMAP
+ select DMA_DIRECT_REMAP if MMU
+
+config RISCV_NONSTANDARD_CACHE_OPS
+ bool
+ depends on RISCV_DMA_NONCOHERENT
+ help
+ This enables function pointer support for non-standard noncoherent
+ systems to handle cache management.
config AS_HAS_INSN
def_bool $(as-instr,.insn r 51$(comma) 0$(comma) 0$(comma) t0$(comma) t0$(comma) zero)
@@ -713,6 +720,25 @@ config RELOCATABLE
If unsure, say N.
+config RANDOMIZE_BASE
+ bool "Randomize the address of the kernel image"
+ select RELOCATABLE
+ depends on MMU && 64BIT && !XIP_KERNEL
+ help
+ Randomizes the virtual address at which the kernel image is
+ loaded, as a security feature that deters exploit attempts
+ relying on knowledge of the location of kernel internals.
+
+ It is the bootloader's job to provide entropy, by passing a
+ random u64 value in /chosen/kaslr-seed at kernel entry.
+
+ When booting via the UEFI stub, it will invoke the firmware's
+ EFI_RNG_PROTOCOL implementation (if available) to supply entropy
+ to the kernel proper. In addition, it will randomise the physical
+ location of the kernel Image as well.
+
+ If unsure, say N.
+
endmenu # "Kernel features"
menu "Boot options"
diff --git a/arch/riscv/Kconfig.errata b/arch/riscv/Kconfig.errata
index 0c8f4652cd82..566bcefeab50 100644
--- a/arch/riscv/Kconfig.errata
+++ b/arch/riscv/Kconfig.errata
@@ -1,5 +1,26 @@
menu "CPU errata selection"
+config ERRATA_ANDES
+ bool "Andes AX45MP errata"
+ depends on RISCV_ALTERNATIVE && RISCV_SBI
+ help
+ All Andes errata Kconfig depend on this Kconfig. Disabling
+ this Kconfig will disable all Andes errata. Please say "Y"
+ here if your platform uses Andes CPU cores.
+
+ Otherwise, please say "N" here to avoid unnecessary overhead.
+
+config ERRATA_ANDES_CMO
+ bool "Apply Andes cache management errata"
+ depends on ERRATA_ANDES && ARCH_R9A07G043
+ select RISCV_DMA_NONCOHERENT
+ default y
+ help
+ This will apply the cache management errata to handle the
+ non-standard handling on non-coherent operations on Andes cores.
+
+ If you don't know what to do here, say "Y".
+
config ERRATA_SIFIVE
bool "SiFive errata"
depends on RISCV_ALTERNATIVE
diff --git a/arch/riscv/errata/Makefile b/arch/riscv/errata/Makefile
index 7b2637c8c332..8a2739485123 100644
--- a/arch/riscv/errata/Makefile
+++ b/arch/riscv/errata/Makefile
@@ -2,5 +2,6 @@ ifdef CONFIG_RELOCATABLE
KBUILD_CFLAGS += -fno-pie
endif
+obj-$(CONFIG_ERRATA_ANDES) += andes/
obj-$(CONFIG_ERRATA_SIFIVE) += sifive/
obj-$(CONFIG_ERRATA_THEAD) += thead/
diff --git a/arch/riscv/errata/andes/Makefile b/arch/riscv/errata/andes/Makefile
new file mode 100644
index 000000000000..2d644e19caef
--- /dev/null
+++ b/arch/riscv/errata/andes/Makefile
@@ -0,0 +1 @@
+obj-y += errata.o
diff --git a/arch/riscv/errata/andes/errata.c b/arch/riscv/errata/andes/errata.c
new file mode 100644
index 000000000000..197db68cc8da
--- /dev/null
+++ b/arch/riscv/errata/andes/errata.c
@@ -0,0 +1,66 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Erratas to be applied for Andes CPU cores
+ *
+ * Copyright (C) 2023 Renesas Electronics Corporation.
+ *
+ * Author: Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>
+ */
+
+#include <linux/memory.h>
+#include <linux/module.h>
+
+#include <asm/alternative.h>
+#include <asm/cacheflush.h>
+#include <asm/errata_list.h>
+#include <asm/patch.h>
+#include <asm/processor.h>
+#include <asm/sbi.h>
+#include <asm/vendorid_list.h>
+
+#define ANDESTECH_AX45MP_MARCHID 0x8000000000008a45UL
+#define ANDESTECH_AX45MP_MIMPID 0x500UL
+#define ANDESTECH_SBI_EXT_ANDES 0x0900031E
+
+#define ANDES_SBI_EXT_IOCP_SW_WORKAROUND 1
+
+static long ax45mp_iocp_sw_workaround(void)
+{
+ struct sbiret ret;
+
+ /*
+ * ANDES_SBI_EXT_IOCP_SW_WORKAROUND SBI EXT checks if the IOCP is missing and
+ * cache is controllable only then CMO will be applied to the platform.
+ */
+ ret = sbi_ecall(ANDESTECH_SBI_EXT_ANDES, ANDES_SBI_EXT_IOCP_SW_WORKAROUND,
+ 0, 0, 0, 0, 0, 0);
+
+ return ret.error ? 0 : ret.value;
+}
+
+static bool errata_probe_iocp(unsigned int stage, unsigned long arch_id, unsigned long impid)
+{
+ if (!IS_ENABLED(CONFIG_ERRATA_ANDES_CMO))
+ return false;
+
+ if (arch_id != ANDESTECH_AX45MP_MARCHID || impid != ANDESTECH_AX45MP_MIMPID)
+ return false;
+
+ if (!ax45mp_iocp_sw_workaround())
+ return false;
+
+ /* Set this just to make core cbo code happy */
+ riscv_cbom_block_size = 1;
+ riscv_noncoherent_supported();
+
+ return true;
+}
+
+void __init_or_module andes_errata_patch_func(struct alt_entry *begin, struct alt_entry *end,
+ unsigned long archid, unsigned long impid,
+ unsigned int stage)
+{
+ errata_probe_iocp(stage, archid, impid);
+
+ /* we have nothing to patch here ATM so just return back */
+}
diff --git a/arch/riscv/errata/thead/errata.c b/arch/riscv/errata/thead/errata.c
index be84b14f0118..0554ed4bf087 100644
--- a/arch/riscv/errata/thead/errata.c
+++ b/arch/riscv/errata/thead/errata.c
@@ -120,11 +120,3 @@ void thead_errata_patch_func(struct alt_entry *begin, struct alt_entry *end,
if (stage == RISCV_ALTERNATIVES_EARLY_BOOT)
local_flush_icache_all();
}
-
-void thead_feature_probe_func(unsigned int cpu,
- unsigned long archid,
- unsigned long impid)
-{
- if ((archid == 0) && (impid == 0))
- per_cpu(misaligned_access_speed, cpu) = RISCV_HWPROBE_MISALIGNED_FAST;
-}
diff --git a/arch/riscv/include/asm/alternative.h b/arch/riscv/include/asm/alternative.h
index 6a41537826a7..3c2b59b25017 100644
--- a/arch/riscv/include/asm/alternative.h
+++ b/arch/riscv/include/asm/alternative.h
@@ -30,7 +30,6 @@
#define ALT_OLD_PTR(a) __ALT_PTR(a, old_offset)
#define ALT_ALT_PTR(a) __ALT_PTR(a, alt_offset)
-void probe_vendor_features(unsigned int cpu);
void __init apply_boot_alternatives(void);
void __init apply_early_boot_alternatives(void);
void apply_module_alternatives(void *start, size_t length);
@@ -46,6 +45,9 @@ struct alt_entry {
u32 patch_id; /* The patch ID (erratum ID or cpufeature ID) */
};
+void andes_errata_patch_func(struct alt_entry *begin, struct alt_entry *end,
+ unsigned long archid, unsigned long impid,
+ unsigned int stage);
void sifive_errata_patch_func(struct alt_entry *begin, struct alt_entry *end,
unsigned long archid, unsigned long impid,
unsigned int stage);
@@ -53,15 +55,11 @@ void thead_errata_patch_func(struct alt_entry *begin, struct alt_entry *end,
unsigned long archid, unsigned long impid,
unsigned int stage);
-void thead_feature_probe_func(unsigned int cpu, unsigned long archid,
- unsigned long impid);
-
void riscv_cpufeature_patch_func(struct alt_entry *begin, struct alt_entry *end,
unsigned int stage);
#else /* CONFIG_RISCV_ALTERNATIVE */
-static inline void probe_vendor_features(unsigned int cpu) { }
static inline void apply_boot_alternatives(void) { }
static inline void apply_early_boot_alternatives(void) { }
static inline void apply_module_alternatives(void *start, size_t length) { }
diff --git a/arch/riscv/include/asm/cpufeature.h b/arch/riscv/include/asm/cpufeature.h
index 23fed53b8815..d0345bd659c9 100644
--- a/arch/riscv/include/asm/cpufeature.h
+++ b/arch/riscv/include/asm/cpufeature.h
@@ -30,4 +30,6 @@ DECLARE_PER_CPU(long, misaligned_access_speed);
/* Per-cpu ISA extensions. */
extern struct riscv_isainfo hart_isa[NR_CPUS];
+void check_unaligned_access(int cpu);
+
#endif
diff --git a/arch/riscv/include/asm/dma-noncoherent.h b/arch/riscv/include/asm/dma-noncoherent.h
new file mode 100644
index 000000000000..312cfa0858fb
--- /dev/null
+++ b/arch/riscv/include/asm/dma-noncoherent.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2023 Renesas Electronics Corp.
+ */
+
+#ifndef __ASM_DMA_NONCOHERENT_H
+#define __ASM_DMA_NONCOHERENT_H
+
+#include <linux/dma-direct.h>
+
+/*
+ * struct riscv_nonstd_cache_ops - Structure for non-standard CMO function pointers
+ *
+ * @wback: Function pointer for cache writeback
+ * @inv: Function pointer for invalidating cache
+ * @wback_inv: Function pointer for flushing the cache (writeback + invalidating)
+ */
+struct riscv_nonstd_cache_ops {
+ void (*wback)(phys_addr_t paddr, size_t size);
+ void (*inv)(phys_addr_t paddr, size_t size);
+ void (*wback_inv)(phys_addr_t paddr, size_t size);
+};
+
+extern struct riscv_nonstd_cache_ops noncoherent_cache_ops;
+
+void riscv_noncoherent_register_cache_ops(const struct riscv_nonstd_cache_ops *ops);
+
+#endif /* __ASM_DMA_NONCOHERENT_H */
diff --git a/arch/riscv/include/asm/efi.h b/arch/riscv/include/asm/efi.h
index 8a6a128ec57f..46a355913b27 100644
--- a/arch/riscv/include/asm/efi.h
+++ b/arch/riscv/include/asm/efi.h
@@ -45,4 +45,6 @@ void arch_efi_call_virt_teardown(void);
unsigned long stext_offset(void);
+void efi_icache_sync(unsigned long start, unsigned long end);
+
#endif /* _ASM_EFI_H */
diff --git a/arch/riscv/include/asm/errata_list.h b/arch/riscv/include/asm/errata_list.h
index fb1a810f3d8c..e2ecd01bfac7 100644
--- a/arch/riscv/include/asm/errata_list.h
+++ b/arch/riscv/include/asm/errata_list.h
@@ -11,6 +11,11 @@
#include <asm/hwcap.h>
#include <asm/vendorid_list.h>
+#ifdef CONFIG_ERRATA_ANDES
+#define ERRATA_ANDESTECH_NO_IOCP 0
+#define ERRATA_ANDESTECH_NUMBER 1
+#endif
+
#ifdef CONFIG_ERRATA_SIFIVE
#define ERRATA_SIFIVE_CIP_453 0
#define ERRATA_SIFIVE_CIP_1200 1
diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h
index b55ba20903ec..5488ecc337b6 100644
--- a/arch/riscv/include/asm/page.h
+++ b/arch/riscv/include/asm/page.h
@@ -106,6 +106,7 @@ typedef struct page *pgtable_t;
struct kernel_mapping {
unsigned long page_offset;
unsigned long virt_addr;
+ unsigned long virt_offset;
uintptr_t phys_addr;
uintptr_t size;
/* Offset between linear mapping virtual address and kernel load address */
@@ -185,6 +186,8 @@ extern phys_addr_t __phys_addr_symbol(unsigned long x);
#define sym_to_pfn(x) __phys_to_pfn(__pa_symbol(x))
+unsigned long kaslr_offset(void);
+
#endif /* __ASSEMBLY__ */
#define virt_addr_valid(vaddr) ({ \
diff --git a/arch/riscv/include/asm/patch.h b/arch/riscv/include/asm/patch.h
index 63c98833d510..e88b52d39eac 100644
--- a/arch/riscv/include/asm/patch.h
+++ b/arch/riscv/include/asm/patch.h
@@ -7,6 +7,7 @@
#define _ASM_RISCV_PATCH_H
int patch_text_nosync(void *addr, const void *insns, size_t len);
+int patch_text_set_nosync(void *addr, u8 c, size_t len);
int patch_text(void *addr, u32 *insns, int ninsns);
extern int riscv_patch_in_stop_machine;
diff --git a/arch/riscv/include/asm/vendorid_list.h b/arch/riscv/include/asm/vendorid_list.h
index cb89af3f0704..e55407ace0c3 100644
--- a/arch/riscv/include/asm/vendorid_list.h
+++ b/arch/riscv/include/asm/vendorid_list.h
@@ -5,6 +5,7 @@
#ifndef ASM_VENDOR_LIST_H
#define ASM_VENDOR_LIST_H
+#define ANDESTECH_VENDOR_ID 0x31e
#define SIFIVE_VENDOR_ID 0x489
#define THEAD_VENDOR_ID 0x5b7
diff --git a/arch/riscv/include/uapi/asm/ptrace.h b/arch/riscv/include/uapi/asm/ptrace.h
index 6d2d9afaabea..a38268b19c3d 100644
--- a/arch/riscv/include/uapi/asm/ptrace.h
+++ b/arch/riscv/include/uapi/asm/ptrace.h
@@ -108,13 +108,18 @@ struct __riscv_v_ext_state {
* In signal handler, datap will be set a correct user stack offset
* and vector registers will be copied to the address of datap
* pointer.
- *
- * In ptrace syscall, datap will be set to zero and the vector
- * registers will be copied to the address right after this
- * structure.
*/
};
+struct __riscv_v_regset_state {
+ unsigned long vstart;
+ unsigned long vl;
+ unsigned long vtype;
+ unsigned long vcsr;
+ unsigned long vlenb;
+ char vreg[];
+};
+
/*
* According to spec: The number of bits in a single vector register,
* VLEN >= ELEN, which must be a power of 2, and must be no greater than
diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile
index 6ac56af42f4a..95cf25d48405 100644
--- a/arch/riscv/kernel/Makefile
+++ b/arch/riscv/kernel/Makefile
@@ -38,6 +38,7 @@ extra-y += vmlinux.lds
obj-y += head.o
obj-y += soc.o
obj-$(CONFIG_RISCV_ALTERNATIVE) += alternative.o
+obj-y += copy-unaligned.o
obj-y += cpu.o
obj-y += cpufeature.o
obj-y += entry.o
diff --git a/arch/riscv/kernel/alternative.c b/arch/riscv/kernel/alternative.c
index 6b75788c18e6..319a1da0358b 100644
--- a/arch/riscv/kernel/alternative.c
+++ b/arch/riscv/kernel/alternative.c
@@ -27,8 +27,6 @@ struct cpu_manufacturer_info_t {
void (*patch_func)(struct alt_entry *begin, struct alt_entry *end,
unsigned long archid, unsigned long impid,
unsigned int stage);
- void (*feature_probe_func)(unsigned int cpu, unsigned long archid,
- unsigned long impid);
};
static void riscv_fill_cpu_mfr_info(struct cpu_manufacturer_info_t *cpu_mfr_info)
@@ -43,8 +41,12 @@ static void riscv_fill_cpu_mfr_info(struct cpu_manufacturer_info_t *cpu_mfr_info
cpu_mfr_info->imp_id = sbi_get_mimpid();
#endif
- cpu_mfr_info->feature_probe_func = NULL;
switch (cpu_mfr_info->vendor_id) {
+#ifdef CONFIG_ERRATA_ANDES
+ case ANDESTECH_VENDOR_ID:
+ cpu_mfr_info->patch_func = andes_errata_patch_func;
+ break;
+#endif
#ifdef CONFIG_ERRATA_SIFIVE
case SIFIVE_VENDOR_ID:
cpu_mfr_info->patch_func = sifive_errata_patch_func;
@@ -53,7 +55,6 @@ static void riscv_fill_cpu_mfr_info(struct cpu_manufacturer_info_t *cpu_mfr_info
#ifdef CONFIG_ERRATA_THEAD
case THEAD_VENDOR_ID:
cpu_mfr_info->patch_func = thead_errata_patch_func;
- cpu_mfr_info->feature_probe_func = thead_feature_probe_func;
break;
#endif
default:
@@ -143,20 +144,6 @@ void riscv_alternative_fix_offsets(void *alt_ptr, unsigned int len,
}
}
-/* Called on each CPU as it starts */
-void probe_vendor_features(unsigned int cpu)
-{
- struct cpu_manufacturer_info_t cpu_mfr_info;
-
- riscv_fill_cpu_mfr_info(&cpu_mfr_info);
- if (!cpu_mfr_info.feature_probe_func)
- return;
-
- cpu_mfr_info.feature_probe_func(cpu,
- cpu_mfr_info.arch_id,
- cpu_mfr_info.imp_id);
-}
-
/*
* This is called very early in the boot process (directly after we run
* a feature detect on the boot CPU). No need to worry about other CPUs
@@ -211,7 +198,6 @@ void __init apply_boot_alternatives(void)
/* If called on non-boot cpu things could go wrong */
WARN_ON(smp_processor_id() != 0);
- probe_vendor_features(0);
_apply_alternatives((struct alt_entry *)__alt_start,
(struct alt_entry *)__alt_end,
RISCV_ALTERNATIVES_BOOT);
diff --git a/arch/riscv/kernel/copy-unaligned.S b/arch/riscv/kernel/copy-unaligned.S
new file mode 100644
index 000000000000..cfdecfbaad62
--- /dev/null
+++ b/arch/riscv/kernel/copy-unaligned.S
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2023 Rivos Inc. */
+
+#include <linux/linkage.h>
+#include <asm/asm.h>
+
+ .text
+
+/* void __riscv_copy_words_unaligned(void *, const void *, size_t) */
+/* Performs a memcpy without aligning buffers, using word loads and stores. */
+/* Note: The size is truncated to a multiple of 8 * SZREG */
+ENTRY(__riscv_copy_words_unaligned)
+ andi a4, a2, ~((8*SZREG)-1)
+ beqz a4, 2f
+ add a3, a1, a4
+1:
+ REG_L a4, 0(a1)
+ REG_L a5, SZREG(a1)
+ REG_L a6, 2*SZREG(a1)
+ REG_L a7, 3*SZREG(a1)
+ REG_L t0, 4*SZREG(a1)
+ REG_L t1, 5*SZREG(a1)
+ REG_L t2, 6*SZREG(a1)
+ REG_L t3, 7*SZREG(a1)
+ REG_S a4, 0(a0)
+ REG_S a5, SZREG(a0)
+ REG_S a6, 2*SZREG(a0)
+ REG_S a7, 3*SZREG(a0)
+ REG_S t0, 4*SZREG(a0)
+ REG_S t1, 5*SZREG(a0)
+ REG_S t2, 6*SZREG(a0)
+ REG_S t3, 7*SZREG(a0)
+ addi a0, a0, 8*SZREG
+ addi a1, a1, 8*SZREG
+ bltu a1, a3, 1b
+
+2:
+ ret
+END(__riscv_copy_words_unaligned)
+
+/* void __riscv_copy_bytes_unaligned(void *, const void *, size_t) */
+/* Performs a memcpy without aligning buffers, using only byte accesses. */
+/* Note: The size is truncated to a multiple of 8 */
+ENTRY(__riscv_copy_bytes_unaligned)
+ andi a4, a2, ~(8-1)
+ beqz a4, 2f
+ add a3, a1, a4
+1:
+ lb a4, 0(a1)
+ lb a5, 1(a1)
+ lb a6, 2(a1)
+ lb a7, 3(a1)
+ lb t0, 4(a1)
+ lb t1, 5(a1)
+ lb t2, 6(a1)
+ lb t3, 7(a1)
+ sb a4, 0(a0)
+ sb a5, 1(a0)
+ sb a6, 2(a0)
+ sb a7, 3(a0)
+ sb t0, 4(a0)
+ sb t1, 5(a0)
+ sb t2, 6(a0)
+ sb t3, 7(a0)
+ addi a0, a0, 8
+ addi a1, a1, 8
+ bltu a1, a3, 1b
+
+2:
+ ret
+END(__riscv_copy_bytes_unaligned)
diff --git a/arch/riscv/kernel/copy-unaligned.h b/arch/riscv/kernel/copy-unaligned.h
new file mode 100644
index 000000000000..e3d70d35b708
--- /dev/null
+++ b/arch/riscv/kernel/copy-unaligned.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2023 Rivos, Inc.
+ */
+#ifndef __RISCV_KERNEL_COPY_UNALIGNED_H
+#define __RISCV_KERNEL_COPY_UNALIGNED_H
+
+#include <linux/types.h>
+
+void __riscv_copy_words_unaligned(void *dst, const void *src, size_t size);
+void __riscv_copy_bytes_unaligned(void *dst, const void *src, size_t size);
+
+#endif /* __RISCV_KERNEL_COPY_UNALIGNED_H */
diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c
index ef7b4fd9e876..1cfbba65d11a 100644
--- a/arch/riscv/kernel/cpufeature.c
+++ b/arch/riscv/kernel/cpufeature.c
@@ -18,12 +18,19 @@
#include <asm/cacheflush.h>
#include <asm/cpufeature.h>
#include <asm/hwcap.h>
+#include <asm/hwprobe.h>
#include <asm/patch.h>
#include <asm/processor.h>
#include <asm/vector.h>
+#include "copy-unaligned.h"
+
#define NUM_ALPHA_EXTS ('z' - 'a' + 1)
+#define MISALIGNED_ACCESS_JIFFIES_LG2 1
+#define MISALIGNED_BUFFER_SIZE 0x4000
+#define MISALIGNED_COPY_SIZE ((MISALIGNED_BUFFER_SIZE / 2) - 0x80)
+
unsigned long elf_hwcap __read_mostly;
/* Host ISA bitmap */
@@ -549,6 +556,103 @@ unsigned long riscv_get_elf_hwcap(void)
return hwcap;
}
+void check_unaligned_access(int cpu)
+{
+ u64 start_cycles, end_cycles;
+ u64 word_cycles;
+ u64 byte_cycles;
+ int ratio;
+ unsigned long start_jiffies, now;
+ struct page *page;
+ void *dst;
+ void *src;
+ long speed = RISCV_HWPROBE_MISALIGNED_SLOW;
+
+ page = alloc_pages(GFP_NOWAIT, get_order(MISALIGNED_BUFFER_SIZE));
+ if (!page) {
+ pr_warn("Can't alloc pages to measure memcpy performance");
+ return;
+ }
+
+ /* Make an unaligned destination buffer. */
+ dst = (void *)((unsigned long)page_address(page) | 0x1);
+ /* Unalign src as well, but differently (off by 1 + 2 = 3). */
+ src = dst + (MISALIGNED_BUFFER_SIZE / 2);
+ src += 2;
+ word_cycles = -1ULL;
+ /* Do a warmup. */
+ __riscv_copy_words_unaligned(dst, src, MISALIGNED_COPY_SIZE);
+ preempt_disable();
+ start_jiffies = jiffies;
+ while ((now = jiffies) == start_jiffies)
+ cpu_relax();
+
+ /*
+ * For a fixed amount of time, repeatedly try the function, and take
+ * the best time in cycles as the measurement.
+ */
+ while (time_before(jiffies, now + (1 << MISALIGNED_ACCESS_JIFFIES_LG2))) {
+ start_cycles = get_cycles64();
+ /* Ensure the CSR read can't reorder WRT to the copy. */
+ mb();
+ __riscv_copy_words_unaligned(dst, src, MISALIGNED_COPY_SIZE);
+ /* Ensure the copy ends before the end time is snapped. */
+ mb();
+ end_cycles = get_cycles64();
+ if ((end_cycles - start_cycles) < word_cycles)
+ word_cycles = end_cycles - start_cycles;
+ }
+
+ byte_cycles = -1ULL;
+ __riscv_copy_bytes_unaligned(dst, src, MISALIGNED_COPY_SIZE);
+ start_jiffies = jiffies;
+ while ((now = jiffies) == start_jiffies)
+ cpu_relax();
+
+ while (time_before(jiffies, now + (1 << MISALIGNED_ACCESS_JIFFIES_LG2))) {
+ start_cycles = get_cycles64();
+ mb();
+ __riscv_copy_bytes_unaligned(dst, src, MISALIGNED_COPY_SIZE);
+ mb();
+ end_cycles = get_cycles64();
+ if ((end_cycles - start_cycles) < byte_cycles)
+ byte_cycles = end_cycles - start_cycles;
+ }
+
+ preempt_enable();
+
+ /* Don't divide by zero. */
+ if (!word_cycles || !byte_cycles) {
+ pr_warn("cpu%d: rdtime lacks granularity needed to measure unaligned access speed\n",
+ cpu);
+
+ goto out;
+ }
+
+ if (word_cycles < byte_cycles)
+ speed = RISCV_HWPROBE_MISALIGNED_FAST;
+
+ ratio = div_u64((byte_cycles * 100), word_cycles);
+ pr_info("cpu%d: Ratio of byte access time to unaligned word access is %d.%02d, unaligned accesses are %s\n",
+ cpu,
+ ratio / 100,
+ ratio % 100,
+ (speed == RISCV_HWPROBE_MISALIGNED_FAST) ? "fast" : "slow");
+
+ per_cpu(misaligned_access_speed, cpu) = speed;
+
+out:
+ __free_pages(page, get_order(MISALIGNED_BUFFER_SIZE));
+}
+
+static int check_unaligned_access_boot_cpu(void)
+{
+ check_unaligned_access(0);
+ return 0;
+}
+
+arch_initcall(check_unaligned_access_boot_cpu);
+
#ifdef CONFIG_RISCV_ALTERNATIVE
/*
* Alternative patch sites consider 48 bits when determining when to patch
diff --git a/arch/riscv/kernel/image-vars.h b/arch/riscv/kernel/image-vars.h
index 15616155008c..ea1a10355ce9 100644
--- a/arch/riscv/kernel/image-vars.h
+++ b/arch/riscv/kernel/image-vars.h
@@ -27,6 +27,7 @@ __efistub__start = _start;
__efistub__start_kernel = _start_kernel;
__efistub__end = _end;
__efistub__edata = _edata;
+__efistub___init_text_end = __init_text_end;
__efistub_screen_info = screen_info;
#endif
diff --git a/arch/riscv/kernel/patch.c b/arch/riscv/kernel/patch.c
index 575e71d6c8ae..13ee7bf589a1 100644
--- a/arch/riscv/kernel/patch.c
+++ b/arch/riscv/kernel/patch.c
@@ -6,6 +6,7 @@
#include <linux/spinlock.h>
#include <linux/mm.h>
#include <linux/memory.h>
+#include <linux/string.h>
#include <linux/uaccess.h>
#include <linux/stop_machine.h>
#include <asm/kprobes.h>
@@ -53,13 +54,52 @@ static void patch_unmap(int fixmap)
}
NOKPROBE_SYMBOL(patch_unmap);
-static int patch_insn_write(void *addr, const void *insn, size_t len)
+static int __patch_insn_set(void *addr, u8 c, size_t len)
+{
+ void *waddr = addr;
+ bool across_pages = (((uintptr_t)addr & ~PAGE_MASK) + len) > PAGE_SIZE;
+
+ /*
+ * Only two pages can be mapped at a time for writing.
+ */
+ if (len + offset_in_page(addr) > 2 * PAGE_SIZE)
+ return -EINVAL;
+ /*
+ * Before reaching here, it was expected to lock the text_mutex
+ * already, so we don't need to give another lock here and could
+ * ensure that it was safe between each cores.
+ */
+ lockdep_assert_held(&text_mutex);
+
+ if (across_pages)
+ patch_map(addr + PAGE_SIZE, FIX_TEXT_POKE1);
+
+ waddr = patch_map(addr, FIX_TEXT_POKE0);
+
+ memset(waddr, c, len);
+
+ patch_unmap(FIX_TEXT_POKE0);
+
+ if (across_pages)
+ patch_unmap(FIX_TEXT_POKE1);
+
+ return 0;
+}
+NOKPROBE_SYMBOL(__patch_insn_set);
+
+static int __patch_insn_write(void *addr, const void *insn, size_t len)
{
void *waddr = addr;
bool across_pages = (((uintptr_t) addr & ~PAGE_MASK) + len) > PAGE_SIZE;
int ret;
/*
+ * Only two pages can be mapped at a time for writing.
+ */
+ if (len + offset_in_page(addr) > 2 * PAGE_SIZE)
+ return -EINVAL;
+
+ /*
* Before reaching here, it was expected to lock the text_mutex
* already, so we don't need to give another lock here and could
* ensure that it was safe between each cores.
@@ -74,7 +114,7 @@ static int patch_insn_write(void *addr, const void *insn, size_t len)
lockdep_assert_held(&text_mutex);
if (across_pages)
- patch_map(addr + len, FIX_TEXT_POKE1);
+ patch_map(addr + PAGE_SIZE, FIX_TEXT_POKE1);
waddr = patch_map(addr, FIX_TEXT_POKE0);
@@ -87,15 +127,79 @@ static int patch_insn_write(void *addr, const void *insn, size_t len)
return ret;
}
-NOKPROBE_SYMBOL(patch_insn_write);
+NOKPROBE_SYMBOL(__patch_insn_write);
#else
-static int patch_insn_write(void *addr, const void *insn, size_t len)
+static int __patch_insn_set(void *addr, u8 c, size_t len)
+{
+ memset(addr, c, len);
+
+ return 0;
+}
+NOKPROBE_SYMBOL(__patch_insn_set);
+
+static int __patch_insn_write(void *addr, const void *insn, size_t len)
{
return copy_to_kernel_nofault(addr, insn, len);
}
-NOKPROBE_SYMBOL(patch_insn_write);
+NOKPROBE_SYMBOL(__patch_insn_write);
#endif /* CONFIG_MMU */
+static int patch_insn_set(void *addr, u8 c, size_t len)
+{
+ size_t patched = 0;
+ size_t size;
+ int ret = 0;
+
+ /*
+ * __patch_insn_set() can only work on 2 pages at a time so call it in a
+ * loop with len <= 2 * PAGE_SIZE.
+ */
+ while (patched < len && !ret) {
+ size = min_t(size_t, PAGE_SIZE * 2 - offset_in_page(addr + patched), len - patched);
+ ret = __patch_insn_set(addr + patched, c, size);
+
+ patched += size;
+ }
+
+ return ret;
+}
+NOKPROBE_SYMBOL(patch_insn_set);
+
+int patch_text_set_nosync(void *addr, u8 c, size_t len)
+{
+ u32 *tp = addr;
+ int ret;
+
+ ret = patch_insn_set(tp, c, len);
+
+ if (!ret)
+ flush_icache_range((uintptr_t)tp, (uintptr_t)tp + len);
+
+ return ret;
+}
+NOKPROBE_SYMBOL(patch_text_set_nosync);
+
+static int patch_insn_write(void *addr, const void *insn, size_t len)
+{
+ size_t patched = 0;
+ size_t size;
+ int ret = 0;
+
+ /*
+ * Copy the instructions to the destination address, two pages at a time
+ * because __patch_insn_write() can only handle len <= 2 * PAGE_SIZE.
+ */
+ while (patched < len && !ret) {
+ size = min_t(size_t, PAGE_SIZE * 2 - offset_in_page(addr + patched), len - patched);
+ ret = __patch_insn_write(addr + patched, insn + patched, size);
+
+ patched += size;
+ }
+
+ return ret;
+}
+NOKPROBE_SYMBOL(patch_insn_write);
+
int patch_text_nosync(void *addr, const void *insns, size_t len)
{
u32 *tp = addr;
diff --git a/arch/riscv/kernel/pi/Makefile b/arch/riscv/kernel/pi/Makefile
index 7b593d44c712..07915dc9279e 100644
--- a/arch/riscv/kernel/pi/Makefile
+++ b/arch/riscv/kernel/pi/Makefile
@@ -35,5 +35,5 @@ $(obj)/string.o: $(srctree)/lib/string.c FORCE
$(obj)/ctype.o: $(srctree)/lib/ctype.c FORCE
$(call if_changed_rule,cc_o_c)
-obj-y := cmdline_early.pi.o string.pi.o ctype.pi.o lib-fdt.pi.o lib-fdt_ro.pi.o
+obj-y := cmdline_early.pi.o fdt_early.pi.o string.pi.o ctype.pi.o lib-fdt.pi.o lib-fdt_ro.pi.o
extra-y := $(patsubst %.pi.o,%.o,$(obj-y))
diff --git a/arch/riscv/kernel/pi/cmdline_early.c b/arch/riscv/kernel/pi/cmdline_early.c
index 05652d13c746..68e786c84c94 100644
--- a/arch/riscv/kernel/pi/cmdline_early.c
+++ b/arch/riscv/kernel/pi/cmdline_early.c
@@ -14,6 +14,7 @@ static char early_cmdline[COMMAND_LINE_SIZE];
* LLVM complain because the function is actually unused in this file).
*/
u64 set_satp_mode_from_cmdline(uintptr_t dtb_pa);
+bool set_nokaslr_from_cmdline(uintptr_t dtb_pa);
static char *get_early_cmdline(uintptr_t dtb_pa)
{
@@ -60,3 +61,15 @@ u64 set_satp_mode_from_cmdline(uintptr_t dtb_pa)
return match_noXlvl(cmdline);
}
+
+static bool match_nokaslr(char *cmdline)
+{
+ return strstr(cmdline, "nokaslr");
+}
+
+bool set_nokaslr_from_cmdline(uintptr_t dtb_pa)
+{
+ char *cmdline = get_early_cmdline(dtb_pa);
+
+ return match_nokaslr(cmdline);
+}
diff --git a/arch/riscv/kernel/pi/fdt_early.c b/arch/riscv/kernel/pi/fdt_early.c
new file mode 100644
index 000000000000..899610e042ab
--- /dev/null
+++ b/arch/riscv/kernel/pi/fdt_early.c
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/libfdt.h>
+
+/*
+ * Declare the functions that are exported (but prefixed) here so that LLVM
+ * does not complain it lacks the 'static' keyword (which, if added, makes
+ * LLVM complain because the function is actually unused in this file).
+ */
+u64 get_kaslr_seed(uintptr_t dtb_pa);
+
+u64 get_kaslr_seed(uintptr_t dtb_pa)
+{
+ int node, len;
+ fdt64_t *prop;
+ u64 ret;
+
+ node = fdt_path_offset((void *)dtb_pa, "/chosen");
+ if (node < 0)
+ return 0;
+
+ prop = fdt_getprop_w((void *)dtb_pa, node, "kaslr-seed", &len);
+ if (!prop || len != sizeof(u64))
+ return 0;
+
+ ret = fdt64_to_cpu(*prop);
+ *prop = 0;
+ return ret;
+}
diff --git a/arch/riscv/kernel/ptrace.c b/arch/riscv/kernel/ptrace.c
index 487303e3ef22..2afe460de16a 100644
--- a/arch/riscv/kernel/ptrace.c
+++ b/arch/riscv/kernel/ptrace.c
@@ -25,6 +25,9 @@ enum riscv_regset {
#ifdef CONFIG_FPU
REGSET_F,
#endif
+#ifdef CONFIG_RISCV_ISA_V
+ REGSET_V,
+#endif
};
static int riscv_gpr_get(struct task_struct *target,
@@ -81,6 +84,71 @@ static int riscv_fpr_set(struct task_struct *target,
}
#endif
+#ifdef CONFIG_RISCV_ISA_V
+static int riscv_vr_get(struct task_struct *target,
+ const struct user_regset *regset,
+ struct membuf to)
+{
+ struct __riscv_v_ext_state *vstate = &target->thread.vstate;
+ struct __riscv_v_regset_state ptrace_vstate;
+
+ if (!riscv_v_vstate_query(task_pt_regs(target)))
+ return -EINVAL;
+
+ /*
+ * Ensure the vector registers have been saved to the memory before
+ * copying them to membuf.
+ */
+ if (target == current)
+ riscv_v_vstate_save(current, task_pt_regs(current));
+
+ ptrace_vstate.vstart = vstate->vstart;
+ ptrace_vstate.vl = vstate->vl;
+ ptrace_vstate.vtype = vstate->vtype;
+ ptrace_vstate.vcsr = vstate->vcsr;
+ ptrace_vstate.vlenb = vstate->vlenb;
+
+ /* Copy vector header from vstate. */
+ membuf_write(&to, &ptrace_vstate, sizeof(struct __riscv_v_regset_state));
+
+ /* Copy all the vector registers from vstate. */
+ return membuf_write(&to, vstate->datap, riscv_v_vsize);
+}
+
+static int riscv_vr_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ int ret;
+ struct __riscv_v_ext_state *vstate = &target->thread.vstate;
+ struct __riscv_v_regset_state ptrace_vstate;
+
+ if (!riscv_v_vstate_query(task_pt_regs(target)))
+ return -EINVAL;
+
+ /* Copy rest of the vstate except datap */
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ptrace_vstate, 0,
+ sizeof(struct __riscv_v_regset_state));
+ if (unlikely(ret))
+ return ret;
+
+ if (vstate->vlenb != ptrace_vstate.vlenb)
+ return -EINVAL;
+
+ vstate->vstart = ptrace_vstate.vstart;
+ vstate->vl = ptrace_vstate.vl;
+ vstate->vtype = ptrace_vstate.vtype;
+ vstate->vcsr = ptrace_vstate.vcsr;
+
+ /* Copy all the vector registers. */
+ pos = 0;
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vstate->datap,
+ 0, riscv_v_vsize);
+ return ret;
+}
+#endif
+
static const struct user_regset riscv_user_regset[] = {
[REGSET_X] = {
.core_note_type = NT_PRSTATUS,
@@ -100,6 +168,17 @@ static const struct user_regset riscv_user_regset[] = {
.set = riscv_fpr_set,
},
#endif
+#ifdef CONFIG_RISCV_ISA_V
+ [REGSET_V] = {
+ .core_note_type = NT_RISCV_VECTOR,
+ .align = 16,
+ .n = ((32 * RISCV_MAX_VLENB) +
+ sizeof(struct __riscv_v_regset_state)) / sizeof(__u32),
+ .size = sizeof(__u32),
+ .regset_get = riscv_vr_get,
+ .set = riscv_vr_set,
+ },
+#endif
};
static const struct user_regset_view riscv_user_native_view = {
diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
index 32c2e1eb71bd..e600aab116a4 100644
--- a/arch/riscv/kernel/setup.c
+++ b/arch/riscv/kernel/setup.c
@@ -21,6 +21,7 @@
#include <linux/smp.h>
#include <linux/efi.h>
#include <linux/crash_dump.h>
+#include <linux/panic_notifier.h>
#include <asm/acpi.h>
#include <asm/alternative.h>
@@ -347,3 +348,27 @@ void free_initmem(void)
free_initmem_default(POISON_FREE_INITMEM);
}
+
+static int dump_kernel_offset(struct notifier_block *self,
+ unsigned long v, void *p)
+{
+ pr_emerg("Kernel Offset: 0x%lx from 0x%lx\n",
+ kernel_map.virt_offset,
+ KERNEL_LINK_ADDR);
+
+ return 0;
+}
+
+static struct notifier_block kernel_offset_notifier = {
+ .notifier_call = dump_kernel_offset
+};
+
+static int __init register_kernel_offset_dumper(void)
+{
+ if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &kernel_offset_notifier);
+
+ return 0;
+}
+device_initcall(register_kernel_offset_dumper);
diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c
index f4d6acb38dd0..1b8da4e40a4d 100644
--- a/arch/riscv/kernel/smpboot.c
+++ b/arch/riscv/kernel/smpboot.c
@@ -26,6 +26,7 @@
#include <linux/sched/task_stack.h>
#include <linux/sched/mm.h>
#include <asm/cpu_ops.h>
+#include <asm/cpufeature.h>
#include <asm/irq.h>
#include <asm/mmu_context.h>
#include <asm/numa.h>
@@ -245,7 +246,7 @@ asmlinkage __visible void smp_callin(void)
numa_add_cpu(curr_cpuid);
set_cpu_online(curr_cpuid, 1);
- probe_vendor_features(curr_cpuid);
+ check_unaligned_access(curr_cpuid);
if (has_vector()) {
if (riscv_v_setup_vsize())
diff --git a/arch/riscv/mm/dma-noncoherent.c b/arch/riscv/mm/dma-noncoherent.c
index 7270b4d8c05b..b76e7e192eb1 100644
--- a/arch/riscv/mm/dma-noncoherent.c
+++ b/arch/riscv/mm/dma-noncoherent.c
@@ -9,26 +9,93 @@
#include <linux/dma-map-ops.h>
#include <linux/mm.h>
#include <asm/cacheflush.h>
+#include <asm/dma-noncoherent.h>
static bool noncoherent_supported __ro_after_init;
int dma_cache_alignment __ro_after_init = ARCH_DMA_MINALIGN;
EXPORT_SYMBOL_GPL(dma_cache_alignment);
-void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
- enum dma_data_direction dir)
+struct riscv_nonstd_cache_ops noncoherent_cache_ops __ro_after_init = {
+ .wback = NULL,
+ .inv = NULL,
+ .wback_inv = NULL,
+};
+
+static inline void arch_dma_cache_wback(phys_addr_t paddr, size_t size)
+{
+ void *vaddr = phys_to_virt(paddr);
+
+#ifdef CONFIG_RISCV_NONSTANDARD_CACHE_OPS
+ if (unlikely(noncoherent_cache_ops.wback)) {
+ noncoherent_cache_ops.wback(paddr, size);
+ return;
+ }
+#endif
+ ALT_CMO_OP(clean, vaddr, size, riscv_cbom_block_size);
+}
+
+static inline void arch_dma_cache_inv(phys_addr_t paddr, size_t size)
+{
+ void *vaddr = phys_to_virt(paddr);
+
+#ifdef CONFIG_RISCV_NONSTANDARD_CACHE_OPS
+ if (unlikely(noncoherent_cache_ops.inv)) {
+ noncoherent_cache_ops.inv(paddr, size);
+ return;
+ }
+#endif
+
+ ALT_CMO_OP(inval, vaddr, size, riscv_cbom_block_size);
+}
+
+static inline void arch_dma_cache_wback_inv(phys_addr_t paddr, size_t size)
{
void *vaddr = phys_to_virt(paddr);
+#ifdef CONFIG_RISCV_NONSTANDARD_CACHE_OPS
+ if (unlikely(noncoherent_cache_ops.wback_inv)) {
+ noncoherent_cache_ops.wback_inv(paddr, size);
+ return;
+ }
+#endif
+
+ ALT_CMO_OP(flush, vaddr, size, riscv_cbom_block_size);
+}
+
+static inline bool arch_sync_dma_clean_before_fromdevice(void)
+{
+ return true;
+}
+
+static inline bool arch_sync_dma_cpu_needs_post_dma_flush(void)
+{
+ return true;
+}
+
+void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
+{
switch (dir) {
case DMA_TO_DEVICE:
- ALT_CMO_OP(clean, vaddr, size, riscv_cbom_block_size);
+ arch_dma_cache_wback(paddr, size);
break;
+
case DMA_FROM_DEVICE:
- ALT_CMO_OP(clean, vaddr, size, riscv_cbom_block_size);
- break;
+ if (!arch_sync_dma_clean_before_fromdevice()) {
+ arch_dma_cache_inv(paddr, size);
+ break;
+ }
+ fallthrough;
+
case DMA_BIDIRECTIONAL:
- ALT_CMO_OP(flush, vaddr, size, riscv_cbom_block_size);
+ /* Skip the invalidate here if it's done later */
+ if (IS_ENABLED(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) &&
+ arch_sync_dma_cpu_needs_post_dma_flush())
+ arch_dma_cache_wback(paddr, size);
+ else
+ arch_dma_cache_wback_inv(paddr, size);
break;
+
default:
break;
}
@@ -37,15 +104,17 @@ void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
{
- void *vaddr = phys_to_virt(paddr);
-
switch (dir) {
case DMA_TO_DEVICE:
break;
+
case DMA_FROM_DEVICE:
case DMA_BIDIRECTIONAL:
- ALT_CMO_OP(flush, vaddr, size, riscv_cbom_block_size);
+ /* FROM_DEVICE invalidate needed if speculative CPU prefetch only */
+ if (arch_sync_dma_cpu_needs_post_dma_flush())
+ arch_dma_cache_inv(paddr, size);
break;
+
default:
break;
}
@@ -55,6 +124,13 @@ void arch_dma_prep_coherent(struct page *page, size_t size)
{
void *flush_addr = page_address(page);
+#ifdef CONFIG_RISCV_NONSTANDARD_CACHE_OPS
+ if (unlikely(noncoherent_cache_ops.wback_inv)) {
+ noncoherent_cache_ops.wback_inv(page_to_phys(page), size);
+ return;
+ }
+#endif
+
ALT_CMO_OP(flush, flush_addr, size, riscv_cbom_block_size);
}
@@ -86,3 +162,12 @@ void __init riscv_set_dma_cache_alignment(void)
if (!noncoherent_supported)
dma_cache_alignment = 1;
}
+
+void riscv_noncoherent_register_cache_ops(const struct riscv_nonstd_cache_ops *ops)
+{
+ if (!ops)
+ return;
+
+ noncoherent_cache_ops = *ops;
+}
+EXPORT_SYMBOL_GPL(riscv_noncoherent_register_cache_ops);
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index 943c18d6ef4d..0798bd861dcb 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -1014,11 +1014,45 @@ static void __init pt_ops_set_late(void)
#endif
}
+#ifdef CONFIG_RANDOMIZE_BASE
+extern bool __init __pi_set_nokaslr_from_cmdline(uintptr_t dtb_pa);
+extern u64 __init __pi_get_kaslr_seed(uintptr_t dtb_pa);
+
+static int __init print_nokaslr(char *p)
+{
+ pr_info("Disabled KASLR");
+ return 0;
+}
+early_param("nokaslr", print_nokaslr);
+
+unsigned long kaslr_offset(void)
+{
+ return kernel_map.virt_offset;
+}
+#endif
+
asmlinkage void __init setup_vm(uintptr_t dtb_pa)
{
pmd_t __maybe_unused fix_bmap_spmd, fix_bmap_epmd;
- kernel_map.virt_addr = KERNEL_LINK_ADDR;
+#ifdef CONFIG_RANDOMIZE_BASE
+ if (!__pi_set_nokaslr_from_cmdline(dtb_pa)) {
+ u64 kaslr_seed = __pi_get_kaslr_seed(dtb_pa);
+ u32 kernel_size = (uintptr_t)(&_end) - (uintptr_t)(&_start);
+ u32 nr_pos;
+
+ /*
+ * Compute the number of positions available: we are limited
+ * by the early page table that only has one PUD and we must
+ * be aligned on PMD_SIZE.
+ */
+ nr_pos = (PUD_SIZE - kernel_size) / PMD_SIZE;
+
+ kernel_map.virt_offset = (kaslr_seed % nr_pos) * PMD_SIZE;
+ }
+#endif
+
+ kernel_map.virt_addr = KERNEL_LINK_ADDR + kernel_map.virt_offset;
kernel_map.page_offset = _AC(CONFIG_PAGE_OFFSET, UL);
#ifdef CONFIG_XIP_KERNEL
diff --git a/arch/riscv/mm/pmem.c b/arch/riscv/mm/pmem.c
index 089df92ae876..c5fc5ec96f6d 100644
--- a/arch/riscv/mm/pmem.c
+++ b/arch/riscv/mm/pmem.c
@@ -7,15 +7,28 @@
#include <linux/libnvdimm.h>
#include <asm/cacheflush.h>
+#include <asm/dma-noncoherent.h>
void arch_wb_cache_pmem(void *addr, size_t size)
{
+#ifdef CONFIG_RISCV_NONSTANDARD_CACHE_OPS
+ if (unlikely(noncoherent_cache_ops.wback)) {
+ noncoherent_cache_ops.wback(virt_to_phys(addr), size);
+ return;
+ }
+#endif
ALT_CMO_OP(clean, addr, size, riscv_cbom_block_size);
}
EXPORT_SYMBOL_GPL(arch_wb_cache_pmem);
void arch_invalidate_pmem(void *addr, size_t size)
{
+#ifdef CONFIG_RISCV_NONSTANDARD_CACHE_OPS
+ if (unlikely(noncoherent_cache_ops.inv)) {
+ noncoherent_cache_ops.inv(virt_to_phys(addr), size);
+ return;
+ }
+#endif
ALT_CMO_OP(inval, addr, size, riscv_cbom_block_size);
}
EXPORT_SYMBOL_GPL(arch_invalidate_pmem);
diff --git a/arch/riscv/net/bpf_jit.h b/arch/riscv/net/bpf_jit.h
index d21c6c92a683..a5ce1ab76ece 100644
--- a/arch/riscv/net/bpf_jit.h
+++ b/arch/riscv/net/bpf_jit.h
@@ -68,6 +68,7 @@ static inline bool is_creg(u8 reg)
struct rv_jit_context {
struct bpf_prog *prog;
u16 *insns; /* RV insns */
+ u16 *ro_insns;
int ninsns;
int prologue_len;
int epilogue_offset;
@@ -85,7 +86,9 @@ static inline int ninsns_rvoff(int ninsns)
struct rv_jit_data {
struct bpf_binary_header *header;
+ struct bpf_binary_header *ro_header;
u8 *image;
+ u8 *ro_image;
struct rv_jit_context ctx;
};
diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c
index 8423f4ddf8f5..ecd3ae6f4116 100644
--- a/arch/riscv/net/bpf_jit_comp64.c
+++ b/arch/riscv/net/bpf_jit_comp64.c
@@ -144,7 +144,11 @@ static bool in_auipc_jalr_range(s64 val)
/* Emit fixed-length instructions for address */
static int emit_addr(u8 rd, u64 addr, bool extra_pass, struct rv_jit_context *ctx)
{
- u64 ip = (u64)(ctx->insns + ctx->ninsns);
+ /*
+ * Use the ro_insns(RX) to calculate the offset as the BPF program will
+ * finally run from this memory region.
+ */
+ u64 ip = (u64)(ctx->ro_insns + ctx->ninsns);
s64 off = addr - ip;
s64 upper = (off + (1 << 11)) >> 12;
s64 lower = off & 0xfff;
@@ -464,8 +468,12 @@ static int emit_call(u64 addr, bool fixed_addr, struct rv_jit_context *ctx)
s64 off = 0;
u64 ip;
- if (addr && ctx->insns) {
- ip = (u64)(long)(ctx->insns + ctx->ninsns);
+ if (addr && ctx->insns && ctx->ro_insns) {
+ /*
+ * Use the ro_insns(RX) to calculate the offset as the BPF
+ * program will finally run from this memory region.
+ */
+ ip = (u64)(long)(ctx->ro_insns + ctx->ninsns);
off = addr - ip;
}
@@ -578,9 +586,10 @@ static int add_exception_handler(const struct bpf_insn *insn,
{
struct exception_table_entry *ex;
unsigned long pc;
- off_t offset;
+ off_t ins_offset;
+ off_t fixup_offset;
- if (!ctx->insns || !ctx->prog->aux->extable ||
+ if (!ctx->insns || !ctx->ro_insns || !ctx->prog->aux->extable ||
(BPF_MODE(insn->code) != BPF_PROBE_MEM && BPF_MODE(insn->code) != BPF_PROBE_MEMSX))
return 0;
@@ -594,12 +603,17 @@ static int add_exception_handler(const struct bpf_insn *insn,
return -EINVAL;
ex = &ctx->prog->aux->extable[ctx->nexentries];
- pc = (unsigned long)&ctx->insns[ctx->ninsns - insn_len];
+ pc = (unsigned long)&ctx->ro_insns[ctx->ninsns - insn_len];
- offset = pc - (long)&ex->insn;
- if (WARN_ON_ONCE(offset >= 0 || offset < INT_MIN))
+ /*
+ * This is the relative offset of the instruction that may fault from
+ * the exception table itself. This will be written to the exception
+ * table and if this instruction faults, the destination register will
+ * be set to '0' and the execution will jump to the next instruction.
+ */
+ ins_offset = pc - (long)&ex->insn;
+ if (WARN_ON_ONCE(ins_offset >= 0 || ins_offset < INT_MIN))
return -ERANGE;
- ex->insn = offset;
/*
* Since the extable follows the program, the fixup offset is always
@@ -608,12 +622,25 @@ static int add_exception_handler(const struct bpf_insn *insn,
* bits. We don't need to worry about buildtime or runtime sort
* modifying the upper bits because the table is already sorted, and
* isn't part of the main exception table.
+ *
+ * The fixup_offset is set to the next instruction from the instruction
+ * that may fault. The execution will jump to this after handling the
+ * fault.
*/
- offset = (long)&ex->fixup - (pc + insn_len * sizeof(u16));
- if (!FIELD_FIT(BPF_FIXUP_OFFSET_MASK, offset))
+ fixup_offset = (long)&ex->fixup - (pc + insn_len * sizeof(u16));
+ if (!FIELD_FIT(BPF_FIXUP_OFFSET_MASK, fixup_offset))
return -ERANGE;
- ex->fixup = FIELD_PREP(BPF_FIXUP_OFFSET_MASK, offset) |
+ /*
+ * The offsets above have been calculated using the RO buffer but we
+ * need to use the R/W buffer for writes.
+ * switch ex to rw buffer for writing.
+ */
+ ex = (void *)ctx->insns + ((void *)ex - (void *)ctx->ro_insns);
+
+ ex->insn = ins_offset;
+
+ ex->fixup = FIELD_PREP(BPF_FIXUP_OFFSET_MASK, fixup_offset) |
FIELD_PREP(BPF_FIXUP_REG_MASK, dst_reg);
ex->type = EX_TYPE_BPF;
@@ -1007,6 +1034,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image,
ctx.ninsns = 0;
ctx.insns = NULL;
+ ctx.ro_insns = NULL;
ret = __arch_prepare_bpf_trampoline(im, m, tlinks, func_addr, flags, &ctx);
if (ret < 0)
return ret;
@@ -1015,7 +1043,15 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image,
return -EFBIG;
ctx.ninsns = 0;
+ /*
+ * The bpf_int_jit_compile() uses a RW buffer (ctx.insns) to write the
+ * JITed instructions and later copies it to a RX region (ctx.ro_insns).
+ * It also uses ctx.ro_insns to calculate offsets for jumps etc. As the
+ * trampoline image uses the same memory area for writing and execution,
+ * both ctx.insns and ctx.ro_insns can be set to image.
+ */
ctx.insns = image;
+ ctx.ro_insns = image;
ret = __arch_prepare_bpf_trampoline(im, m, tlinks, func_addr, flags, &ctx);
if (ret < 0)
return ret;
diff --git a/arch/riscv/net/bpf_jit_core.c b/arch/riscv/net/bpf_jit_core.c
index 7a26a3e1c73c..7b70ccb7fec3 100644
--- a/arch/riscv/net/bpf_jit_core.c
+++ b/arch/riscv/net/bpf_jit_core.c
@@ -8,6 +8,8 @@
#include <linux/bpf.h>
#include <linux/filter.h>
+#include <linux/memory.h>
+#include <asm/patch.h>
#include "bpf_jit.h"
/* Number of iterations to try until offsets converge. */
@@ -117,16 +119,24 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
sizeof(struct exception_table_entry);
prog_size = sizeof(*ctx->insns) * ctx->ninsns;
- jit_data->header =
- bpf_jit_binary_alloc(prog_size + extable_size,
- &jit_data->image,
- sizeof(u32),
- bpf_fill_ill_insns);
- if (!jit_data->header) {
+ jit_data->ro_header =
+ bpf_jit_binary_pack_alloc(prog_size + extable_size,
+ &jit_data->ro_image, sizeof(u32),
+ &jit_data->header, &jit_data->image,
+ bpf_fill_ill_insns);
+ if (!jit_data->ro_header) {
prog = orig_prog;
goto out_offset;
}
+ /*
+ * Use the image(RW) for writing the JITed instructions. But also save
+ * the ro_image(RX) for calculating the offsets in the image. The RW
+ * image will be later copied to the RX image from where the program
+ * will run. The bpf_jit_binary_pack_finalize() will do this copy in the
+ * final step.
+ */
+ ctx->ro_insns = (u16 *)jit_data->ro_image;
ctx->insns = (u16 *)jit_data->image;
/*
* Now, when the image is allocated, the image can
@@ -138,14 +148,12 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
if (i == NR_JIT_ITERATIONS) {
pr_err("bpf-jit: image did not converge in <%d passes!\n", i);
- if (jit_data->header)
- bpf_jit_binary_free(jit_data->header);
prog = orig_prog;
- goto out_offset;
+ goto out_free_hdr;
}
if (extable_size)
- prog->aux->extable = (void *)ctx->insns + prog_size;
+ prog->aux->extable = (void *)ctx->ro_insns + prog_size;
skip_init_ctx:
pass++;
@@ -154,23 +162,33 @@ skip_init_ctx:
bpf_jit_build_prologue(ctx);
if (build_body(ctx, extra_pass, NULL)) {
- bpf_jit_binary_free(jit_data->header);
prog = orig_prog;
- goto out_offset;
+ goto out_free_hdr;
}
bpf_jit_build_epilogue(ctx);
if (bpf_jit_enable > 1)
bpf_jit_dump(prog->len, prog_size, pass, ctx->insns);
- prog->bpf_func = (void *)ctx->insns;
+ prog->bpf_func = (void *)ctx->ro_insns;
prog->jited = 1;
prog->jited_len = prog_size;
- bpf_flush_icache(jit_data->header, ctx->insns + ctx->ninsns);
-
if (!prog->is_func || extra_pass) {
- bpf_jit_binary_lock_ro(jit_data->header);
+ if (WARN_ON(bpf_jit_binary_pack_finalize(prog, jit_data->ro_header,
+ jit_data->header))) {
+ /* ro_header has been freed */
+ jit_data->ro_header = NULL;
+ prog = orig_prog;
+ goto out_offset;
+ }
+ /*
+ * The instructions have now been copied to the ROX region from
+ * where they will execute.
+ * Write any modified data cache blocks out to memory and
+ * invalidate the corresponding blocks in the instruction cache.
+ */
+ bpf_flush_icache(jit_data->ro_header, ctx->ro_insns + ctx->ninsns);
for (i = 0; i < prog->len; i++)
ctx->offset[i] = ninsns_rvoff(ctx->offset[i]);
bpf_prog_fill_jited_linfo(prog, ctx->offset);
@@ -185,6 +203,14 @@ out:
bpf_jit_prog_release_other(prog, prog == orig_prog ?
tmp : orig_prog);
return prog;
+
+out_free_hdr:
+ if (jit_data->header) {
+ bpf_arch_text_copy(&jit_data->ro_header->size, &jit_data->header->size,
+ sizeof(jit_data->header->size));
+ bpf_jit_binary_pack_free(jit_data->ro_header, jit_data->header);
+ }
+ goto out_offset;
}
u64 bpf_jit_alloc_exec_limit(void)
@@ -204,3 +230,51 @@ void bpf_jit_free_exec(void *addr)
{
return vfree(addr);
}
+
+void *bpf_arch_text_copy(void *dst, void *src, size_t len)
+{
+ int ret;
+
+ mutex_lock(&text_mutex);
+ ret = patch_text_nosync(dst, src, len);
+ mutex_unlock(&text_mutex);
+
+ if (ret)
+ return ERR_PTR(-EINVAL);
+
+ return dst;
+}
+
+int bpf_arch_text_invalidate(void *dst, size_t len)
+{
+ int ret;
+
+ mutex_lock(&text_mutex);
+ ret = patch_text_set_nosync(dst, 0, len);
+ mutex_unlock(&text_mutex);
+
+ return ret;
+}
+
+void bpf_jit_free(struct bpf_prog *prog)
+{
+ if (prog->jited) {
+ struct rv_jit_data *jit_data = prog->aux->jit_data;
+ struct bpf_binary_header *hdr;
+
+ /*
+ * If we fail the final pass of JIT (from jit_subprogs),
+ * the program may not be finalized yet. Call finalize here
+ * before freeing it.
+ */
+ if (jit_data) {
+ bpf_jit_binary_pack_finalize(prog, jit_data->ro_header, jit_data->header);
+ kfree(jit_data);
+ }
+ hdr = bpf_jit_binary_pack_hdr(prog);
+ bpf_jit_binary_pack_free(hdr, NULL);
+ WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(prog));
+ }
+
+ bpf_prog_unlock_free(prog);
+}
diff --git a/arch/sh/boards/mach-ap325rxa/setup.c b/arch/sh/boards/mach-ap325rxa/setup.c
index 151792162152..645cccf3da88 100644
--- a/arch/sh/boards/mach-ap325rxa/setup.c
+++ b/arch/sh/boards/mach-ap325rxa/setup.c
@@ -531,7 +531,7 @@ static int __init ap325rxa_devices_setup(void)
device_initialize(&ap325rxa_ceu_device.dev);
dma_declare_coherent_memory(&ap325rxa_ceu_device.dev,
ceu_dma_membase, ceu_dma_membase,
- ceu_dma_membase + CEU_BUFFER_MEMORY_SIZE - 1);
+ CEU_BUFFER_MEMORY_SIZE);
platform_device_add(&ap325rxa_ceu_device);
diff --git a/arch/sh/boards/mach-ecovec24/setup.c b/arch/sh/boards/mach-ecovec24/setup.c
index 310513646c9b..3be293335de5 100644
--- a/arch/sh/boards/mach-ecovec24/setup.c
+++ b/arch/sh/boards/mach-ecovec24/setup.c
@@ -1454,15 +1454,13 @@ static int __init arch_setup(void)
device_initialize(&ecovec_ceu_devices[0]->dev);
dma_declare_coherent_memory(&ecovec_ceu_devices[0]->dev,
ceu0_dma_membase, ceu0_dma_membase,
- ceu0_dma_membase +
- CEU_BUFFER_MEMORY_SIZE - 1);
+ CEU_BUFFER_MEMORY_SIZE);
platform_device_add(ecovec_ceu_devices[0]);
device_initialize(&ecovec_ceu_devices[1]->dev);
dma_declare_coherent_memory(&ecovec_ceu_devices[1]->dev,
ceu1_dma_membase, ceu1_dma_membase,
- ceu1_dma_membase +
- CEU_BUFFER_MEMORY_SIZE - 1);
+ CEU_BUFFER_MEMORY_SIZE);
platform_device_add(ecovec_ceu_devices[1]);
gpiod_add_lookup_table(&cn12_power_gpiod_table);
diff --git a/arch/sh/boards/mach-kfr2r09/setup.c b/arch/sh/boards/mach-kfr2r09/setup.c
index a18e80394aed..6b775eae85c0 100644
--- a/arch/sh/boards/mach-kfr2r09/setup.c
+++ b/arch/sh/boards/mach-kfr2r09/setup.c
@@ -603,7 +603,7 @@ static int __init kfr2r09_devices_setup(void)
device_initialize(&kfr2r09_ceu_device.dev);
dma_declare_coherent_memory(&kfr2r09_ceu_device.dev,
ceu_dma_membase, ceu_dma_membase,
- ceu_dma_membase + CEU_BUFFER_MEMORY_SIZE - 1);
+ CEU_BUFFER_MEMORY_SIZE);
platform_device_add(&kfr2r09_ceu_device);
diff --git a/arch/sh/boards/mach-migor/setup.c b/arch/sh/boards/mach-migor/setup.c
index f60061283c48..773ee767d0c4 100644
--- a/arch/sh/boards/mach-migor/setup.c
+++ b/arch/sh/boards/mach-migor/setup.c
@@ -604,7 +604,7 @@ static int __init migor_devices_setup(void)
device_initialize(&migor_ceu_device.dev);
dma_declare_coherent_memory(&migor_ceu_device.dev,
ceu_dma_membase, ceu_dma_membase,
- ceu_dma_membase + CEU_BUFFER_MEMORY_SIZE - 1);
+ CEU_BUFFER_MEMORY_SIZE);
platform_device_add(&migor_ceu_device);
diff --git a/arch/sh/boards/mach-se/7724/setup.c b/arch/sh/boards/mach-se/7724/setup.c
index b60a2626e18b..6495f9354065 100644
--- a/arch/sh/boards/mach-se/7724/setup.c
+++ b/arch/sh/boards/mach-se/7724/setup.c
@@ -940,15 +940,13 @@ static int __init devices_setup(void)
device_initialize(&ms7724se_ceu_devices[0]->dev);
dma_declare_coherent_memory(&ms7724se_ceu_devices[0]->dev,
ceu0_dma_membase, ceu0_dma_membase,
- ceu0_dma_membase +
- CEU_BUFFER_MEMORY_SIZE - 1);
+ CEU_BUFFER_MEMORY_SIZE);
platform_device_add(ms7724se_ceu_devices[0]);
device_initialize(&ms7724se_ceu_devices[1]->dev);
dma_declare_coherent_memory(&ms7724se_ceu_devices[1]->dev,
ceu1_dma_membase, ceu1_dma_membase,
- ceu1_dma_membase +
- CEU_BUFFER_MEMORY_SIZE - 1);
+ CEU_BUFFER_MEMORY_SIZE);
platform_device_add(ms7724se_ceu_devices[1]);
return platform_add_devices(ms7724se_devices,
diff --git a/arch/sh/drivers/push-switch.c b/arch/sh/drivers/push-switch.c
index c95f48ff3f6f..6ecba5f521eb 100644
--- a/arch/sh/drivers/push-switch.c
+++ b/arch/sh/drivers/push-switch.c
@@ -101,8 +101,8 @@ static int switch_drv_remove(struct platform_device *pdev)
device_remove_file(&pdev->dev, &dev_attr_switch);
platform_set_drvdata(pdev, NULL);
- flush_work(&psw->work);
timer_shutdown_sync(&psw->debounce);
+ flush_work(&psw->work);
free_irq(irq, pdev);
kfree(psw);
diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
index 4d349986f76a..8250f0f59c2b 100644
--- a/arch/x86/events/intel/uncore_snbep.c
+++ b/arch/x86/events/intel/uncore_snbep.c
@@ -6474,8 +6474,18 @@ void spr_uncore_cpu_init(void)
type = uncore_find_type_by_id(uncore_msr_uncores, UNCORE_SPR_CHA);
if (type) {
+ /*
+ * The value from the discovery table (stored in the type->num_boxes
+ * of UNCORE_SPR_CHA) is incorrect on some SPR variants because of a
+ * firmware bug. Using the value from SPR_MSR_UNC_CBO_CONFIG to replace it.
+ */
rdmsrl(SPR_MSR_UNC_CBO_CONFIG, num_cbo);
- type->num_boxes = num_cbo;
+ /*
+ * The MSR doesn't work on the EMR XCC, but the firmware bug doesn't impact
+ * the EMR XCC. Don't let the value from the MSR replace the existing value.
+ */
+ if (num_cbo)
+ type->num_boxes = num_cbo;
}
spr_uncore_iio_free_running.num_boxes = uncore_type_max_boxes(uncore_msr_uncores, UNCORE_SPR_IIO);
}
diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
new file mode 100644
index 000000000000..12b820259b9f
--- /dev/null
+++ b/arch/x86/include/asm/mman.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_MMAN_H__
+#define __ASM_MMAN_H__
+
+#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
+#define arch_calc_vm_prot_bits(prot, key) ( \
+ ((key) & 0x1 ? VM_PKEY_BIT0 : 0) | \
+ ((key) & 0x2 ? VM_PKEY_BIT1 : 0) | \
+ ((key) & 0x4 ? VM_PKEY_BIT2 : 0) | \
+ ((key) & 0x8 ? VM_PKEY_BIT3 : 0))
+#endif
+
+#include <uapi/asm/mman.h>
+
+#endif /* __ASM_MMAN_H__ */
diff --git a/arch/x86/include/uapi/asm/mman.h b/arch/x86/include/uapi/asm/mman.h
index 8148bdddbd2c..46cdc941f958 100644
--- a/arch/x86/include/uapi/asm/mman.h
+++ b/arch/x86/include/uapi/asm/mman.h
@@ -5,14 +5,6 @@
#define MAP_32BIT 0x40 /* only give out 32bit addresses */
#define MAP_ABOVE4G 0x80 /* only map above 4GB */
-#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
-#define arch_calc_vm_prot_bits(prot, key) ( \
- ((key) & 0x1 ? VM_PKEY_BIT0 : 0) | \
- ((key) & 0x2 ? VM_PKEY_BIT1 : 0) | \
- ((key) & 0x4 ? VM_PKEY_BIT2 : 0) | \
- ((key) & 0x8 ? VM_PKEY_BIT3 : 0))
-#endif
-
/* Flags for map_shadow_stack(2) */
#define SHADOW_STACK_SET_TOKEN (1ULL << 0) /* Set up a restore token in the shadow stack */
diff --git a/arch/x86/kernel/cpu/sgx/virt.c b/arch/x86/kernel/cpu/sgx/virt.c
index c3e37eaec8ec..7aaa3652e31d 100644
--- a/arch/x86/kernel/cpu/sgx/virt.c
+++ b/arch/x86/kernel/cpu/sgx/virt.c
@@ -204,6 +204,7 @@ static int sgx_vepc_release(struct inode *inode, struct file *file)
continue;
xa_erase(&vepc->page_array, index);
+ cond_resched();
}
/*
@@ -222,6 +223,7 @@ static int sgx_vepc_release(struct inode *inode, struct file *file)
list_add_tail(&epc_page->list, &secs_pages);
xa_erase(&vepc->page_array, index);
+ cond_resched();
}
/*
@@ -243,6 +245,7 @@ static int sgx_vepc_release(struct inode *inode, struct file *file)
if (sgx_vepc_free_page(epc_page))
list_add_tail(&epc_page->list, &secs_pages);
+ cond_resched();
}
if (!list_empty(&secs_pages))
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index d7667a29acf3..4e45ff44aa07 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1250,7 +1250,7 @@ bool smp_park_other_cpus_in_init(void)
if (this_cpu)
return false;
- for_each_present_cpu(cpu) {
+ for_each_cpu_and(cpu, &cpus_booted_once_mask, cpu_present_mask) {
if (cpu == this_cpu)
continue;
apicid = apic->cpu_present_to_apicid(cpu);
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index 83d41c2601d7..f15fb71f280e 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -156,7 +156,7 @@ SECTIONS
ALIGN_ENTRY_TEXT_END
*(.gnu.warning)
- } :text =0xcccc
+ } :text = 0xcccccccc
/* End of text section, which should occupy whole number of pages */
_etext = .;
diff --git a/block/blk-map.c b/block/blk-map.c
index 44d74a30ddac..8584babf3ea0 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -315,12 +315,11 @@ static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
n = bytes;
if (!bio_add_hw_page(rq->q, bio, page, n, offs,
- max_sectors, &same_page)) {
- if (same_page)
- bio_release_page(bio, page);
+ max_sectors, &same_page))
break;
- }
+ if (same_page)
+ bio_release_page(bio, page);
bytes -= n;
offs = 0;
}
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 7397ff199d66..38a881cf97d0 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -697,11 +697,41 @@ static bool throtl_slice_used(struct throtl_grp *tg, bool rw)
return true;
}
+static unsigned int calculate_io_allowed(u32 iops_limit,
+ unsigned long jiffy_elapsed)
+{
+ unsigned int io_allowed;
+ u64 tmp;
+
+ /*
+ * jiffy_elapsed should not be a big value as minimum iops can be
+ * 1 then at max jiffy elapsed should be equivalent of 1 second as we
+ * will allow dispatch after 1 second and after that slice should
+ * have been trimmed.
+ */
+
+ tmp = (u64)iops_limit * jiffy_elapsed;
+ do_div(tmp, HZ);
+
+ if (tmp > UINT_MAX)
+ io_allowed = UINT_MAX;
+ else
+ io_allowed = tmp;
+
+ return io_allowed;
+}
+
+static u64 calculate_bytes_allowed(u64 bps_limit, unsigned long jiffy_elapsed)
+{
+ return mul_u64_u64_div_u64(bps_limit, (u64)jiffy_elapsed, (u64)HZ);
+}
+
/* Trim the used slices and adjust slice start accordingly */
static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw)
{
- unsigned long nr_slices, time_elapsed, io_trim;
- u64 bytes_trim, tmp;
+ unsigned long time_elapsed;
+ long long bytes_trim;
+ int io_trim;
BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw]));
@@ -723,67 +753,38 @@ static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw)
throtl_set_slice_end(tg, rw, jiffies + tg->td->throtl_slice);
- time_elapsed = jiffies - tg->slice_start[rw];
-
- nr_slices = time_elapsed / tg->td->throtl_slice;
-
- if (!nr_slices)
+ time_elapsed = rounddown(jiffies - tg->slice_start[rw],
+ tg->td->throtl_slice);
+ if (!time_elapsed)
return;
- tmp = tg_bps_limit(tg, rw) * tg->td->throtl_slice * nr_slices;
- do_div(tmp, HZ);
- bytes_trim = tmp;
- io_trim = (tg_iops_limit(tg, rw) * tg->td->throtl_slice * nr_slices) /
- HZ;
-
- if (!bytes_trim && !io_trim)
+ bytes_trim = calculate_bytes_allowed(tg_bps_limit(tg, rw),
+ time_elapsed) +
+ tg->carryover_bytes[rw];
+ io_trim = calculate_io_allowed(tg_iops_limit(tg, rw), time_elapsed) +
+ tg->carryover_ios[rw];
+ if (bytes_trim <= 0 && io_trim <= 0)
return;
- if (tg->bytes_disp[rw] >= bytes_trim)
+ tg->carryover_bytes[rw] = 0;
+ if ((long long)tg->bytes_disp[rw] >= bytes_trim)
tg->bytes_disp[rw] -= bytes_trim;
else
tg->bytes_disp[rw] = 0;
- if (tg->io_disp[rw] >= io_trim)
+ tg->carryover_ios[rw] = 0;
+ if ((int)tg->io_disp[rw] >= io_trim)
tg->io_disp[rw] -= io_trim;
else
tg->io_disp[rw] = 0;
- tg->slice_start[rw] += nr_slices * tg->td->throtl_slice;
+ tg->slice_start[rw] += time_elapsed;
throtl_log(&tg->service_queue,
- "[%c] trim slice nr=%lu bytes=%llu io=%lu start=%lu end=%lu jiffies=%lu",
- rw == READ ? 'R' : 'W', nr_slices, bytes_trim, io_trim,
- tg->slice_start[rw], tg->slice_end[rw], jiffies);
-}
-
-static unsigned int calculate_io_allowed(u32 iops_limit,
- unsigned long jiffy_elapsed)
-{
- unsigned int io_allowed;
- u64 tmp;
-
- /*
- * jiffy_elapsed should not be a big value as minimum iops can be
- * 1 then at max jiffy elapsed should be equivalent of 1 second as we
- * will allow dispatch after 1 second and after that slice should
- * have been trimmed.
- */
-
- tmp = (u64)iops_limit * jiffy_elapsed;
- do_div(tmp, HZ);
-
- if (tmp > UINT_MAX)
- io_allowed = UINT_MAX;
- else
- io_allowed = tmp;
-
- return io_allowed;
-}
-
-static u64 calculate_bytes_allowed(u64 bps_limit, unsigned long jiffy_elapsed)
-{
- return mul_u64_u64_div_u64(bps_limit, (u64)jiffy_elapsed, (u64)HZ);
+ "[%c] trim slice nr=%lu bytes=%lld io=%d start=%lu end=%lu jiffies=%lu",
+ rw == READ ? 'R' : 'W', time_elapsed / tg->td->throtl_slice,
+ bytes_trim, io_trim, tg->slice_start[rw], tg->slice_end[rw],
+ jiffies);
}
static void __tg_update_carryover(struct throtl_grp *tg, bool rw)
@@ -816,7 +817,7 @@ static void tg_update_carryover(struct throtl_grp *tg)
__tg_update_carryover(tg, WRITE);
/* see comments in struct throtl_grp for meaning of these fields. */
- throtl_log(&tg->service_queue, "%s: %llu %llu %u %u\n", __func__,
+ throtl_log(&tg->service_queue, "%s: %lld %lld %d %d\n", __func__,
tg->carryover_bytes[READ], tg->carryover_bytes[WRITE],
tg->carryover_ios[READ], tg->carryover_ios[WRITE]);
}
@@ -825,7 +826,7 @@ static unsigned long tg_within_iops_limit(struct throtl_grp *tg, struct bio *bio
u32 iops_limit)
{
bool rw = bio_data_dir(bio);
- unsigned int io_allowed;
+ int io_allowed;
unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
if (iops_limit == UINT_MAX) {
@@ -838,9 +839,8 @@ static unsigned long tg_within_iops_limit(struct throtl_grp *tg, struct bio *bio
jiffy_elapsed_rnd = roundup(jiffy_elapsed + 1, tg->td->throtl_slice);
io_allowed = calculate_io_allowed(iops_limit, jiffy_elapsed_rnd) +
tg->carryover_ios[rw];
- if (tg->io_disp[rw] + 1 <= io_allowed) {
+ if (io_allowed > 0 && tg->io_disp[rw] + 1 <= io_allowed)
return 0;
- }
/* Calc approx time to dispatch */
jiffy_wait = jiffy_elapsed_rnd - jiffy_elapsed;
@@ -851,7 +851,8 @@ static unsigned long tg_within_bps_limit(struct throtl_grp *tg, struct bio *bio,
u64 bps_limit)
{
bool rw = bio_data_dir(bio);
- u64 bytes_allowed, extra_bytes;
+ long long bytes_allowed;
+ u64 extra_bytes;
unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
unsigned int bio_size = throtl_bio_data_size(bio);
@@ -869,9 +870,8 @@ static unsigned long tg_within_bps_limit(struct throtl_grp *tg, struct bio *bio,
jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, tg->td->throtl_slice);
bytes_allowed = calculate_bytes_allowed(bps_limit, jiffy_elapsed_rnd) +
tg->carryover_bytes[rw];
- if (tg->bytes_disp[rw] + bio_size <= bytes_allowed) {
+ if (bytes_allowed > 0 && tg->bytes_disp[rw] + bio_size <= bytes_allowed)
return 0;
- }
/* Calc approx time to dispatch */
extra_bytes = tg->bytes_disp[rw] + bio_size - bytes_allowed;
diff --git a/block/blk-throttle.h b/block/blk-throttle.h
index d1ccbfe9f797..bffbc9cfc8ab 100644
--- a/block/blk-throttle.h
+++ b/block/blk-throttle.h
@@ -127,8 +127,8 @@ struct throtl_grp {
* bytes/ios are waited already in previous configuration, and they will
* be used to calculate wait time under new configuration.
*/
- uint64_t carryover_bytes[2];
- unsigned int carryover_ios[2];
+ long long carryover_bytes[2];
+ int carryover_ios[2];
unsigned long last_check_time;
diff --git a/block/fops.c b/block/fops.c
index a24a624d3bf7..acff3d5d22d4 100644
--- a/block/fops.c
+++ b/block/fops.c
@@ -671,10 +671,6 @@ static ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
iov_iter_truncate(from, size);
}
- ret = file_remove_privs(file);
- if (ret)
- return ret;
-
ret = file_update_time(file);
if (ret)
return ret;
diff --git a/block/ioctl.c b/block/ioctl.c
index 648670ddb164..d5f5cd61efd7 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -20,6 +20,8 @@ static int blkpg_do_ioctl(struct block_device *bdev,
struct blkpg_partition p;
long long start, length;
+ if (disk->flags & GENHD_FL_NO_PART)
+ return -EINVAL;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (copy_from_user(&p, upart, sizeof(struct blkpg_partition)))
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 496ca02ee18f..efb66e25fa2d 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -15,6 +15,8 @@ source "drivers/base/Kconfig"
source "drivers/bus/Kconfig"
+source "drivers/cache/Kconfig"
+
source "drivers/connector/Kconfig"
source "drivers/firmware/Kconfig"
diff --git a/drivers/Makefile b/drivers/Makefile
index 0957f63ecb42..1bec7819a837 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -11,6 +11,7 @@ ifdef building_out_of_srctree
MAKEFLAGS += --include-dir=$(srctree)
endif
+obj-y += cache/
obj-y += irqchip/
obj-y += bus/
@@ -45,7 +46,7 @@ obj-$(CONFIG_DMADEVICES) += dma/
# SOC specific infrastructure drivers.
obj-y += soc/
-obj-$(CONFIG_PM_GENERIC_DOMAINS) += genpd/
+obj-$(CONFIG_PM_GENERIC_DOMAINS) += pmdomain/
obj-y += virtio/
obj-$(CONFIG_VDPA) += vdpa/
diff --git a/drivers/accel/ivpu/ivpu_jsm_msg.c b/drivers/accel/ivpu/ivpu_jsm_msg.c
index 831bfd2b2d39..bdddef2c59ee 100644
--- a/drivers/accel/ivpu/ivpu_jsm_msg.c
+++ b/drivers/accel/ivpu/ivpu_jsm_msg.c
@@ -118,8 +118,7 @@ int ivpu_jsm_dyndbg_control(struct ivpu_device *vdev, char *command, size_t size
struct vpu_jsm_msg resp;
int ret;
- if (!strncpy(req.payload.dyndbg_control.dyndbg_cmd, command, VPU_DYNDBG_CMD_MAX_LEN - 1))
- return -ENOMEM;
+ strscpy(req.payload.dyndbg_control.dyndbg_cmd, command, VPU_DYNDBG_CMD_MAX_LEN);
ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_DYNDBG_CONTROL_RSP, &resp,
VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index e4e4175e3e83..d3f28b82c97b 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -4752,7 +4752,7 @@ void ata_scsi_dev_rescan(struct work_struct *work)
}
spin_unlock_irqrestore(ap->lock, flags);
- scsi_rescan_device(&(sdev->sdev_gendev));
+ scsi_rescan_device(sdev);
scsi_device_put(sdev);
spin_lock_irqsave(ap->lock, flags);
}
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 79ab532aabaf..6bc86106c7b2 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -1557,7 +1557,7 @@ static int _drbd_send_page(struct drbd_peer_device *peer_device, struct page *pa
do {
int sent;
- bvec_set_page(&bvec, page, offset, len);
+ bvec_set_page(&bvec, page, len, offset);
iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, len);
sent = sock_sendmsg(socket, &msg);
diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
index 864013019d6b..968090935eb2 100644
--- a/drivers/block/null_blk/main.c
+++ b/drivers/block/null_blk/main.c
@@ -1643,9 +1643,12 @@ static int null_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
struct nullb_queue *nq = hctx->driver_data;
LIST_HEAD(list);
int nr = 0;
+ struct request *rq;
spin_lock(&nq->poll_lock);
list_splice_init(&nq->poll_list, &list);
+ list_for_each_entry(rq, &list, queuelist)
+ blk_mq_set_request_complete(rq);
spin_unlock(&nq->poll_lock);
while (!list_empty(&list)) {
@@ -1671,16 +1674,21 @@ static enum blk_eh_timer_return null_timeout_rq(struct request *rq)
struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq);
- pr_info("rq %p timed out\n", rq);
-
if (hctx->type == HCTX_TYPE_POLL) {
struct nullb_queue *nq = hctx->driver_data;
spin_lock(&nq->poll_lock);
+ /* The request may have completed meanwhile. */
+ if (blk_mq_request_completed(rq)) {
+ spin_unlock(&nq->poll_lock);
+ return BLK_EH_DONE;
+ }
list_del_init(&rq->queuelist);
spin_unlock(&nq->poll_lock);
}
+ pr_info("rq %p timed out\n", rq);
+
/*
* If the device is marked as blocking (i.e. memory backed or zoned
* device), the submission path may be blocked waiting for resources
diff --git a/drivers/cache/Kconfig b/drivers/cache/Kconfig
new file mode 100644
index 000000000000..a57677f908f3
--- /dev/null
+++ b/drivers/cache/Kconfig
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+menu "Cache Drivers"
+
+config AX45MP_L2_CACHE
+ bool "Andes Technology AX45MP L2 Cache controller"
+ depends on RISCV_DMA_NONCOHERENT
+ select RISCV_NONSTANDARD_CACHE_OPS
+ help
+ Support for the L2 cache controller on Andes Technology AX45MP platforms.
+
+endmenu
diff --git a/drivers/cache/Makefile b/drivers/cache/Makefile
new file mode 100644
index 000000000000..2012e7fb978d
--- /dev/null
+++ b/drivers/cache/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_AX45MP_L2_CACHE) += ax45mp_cache.o
diff --git a/drivers/cache/ax45mp_cache.c b/drivers/cache/ax45mp_cache.c
new file mode 100644
index 000000000000..57186c58dc84
--- /dev/null
+++ b/drivers/cache/ax45mp_cache.c
@@ -0,0 +1,213 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * non-coherent cache functions for Andes AX45MP
+ *
+ * Copyright (C) 2023 Renesas Electronics Corp.
+ */
+
+#include <linux/cacheflush.h>
+#include <linux/cacheinfo.h>
+#include <linux/dma-direction.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+
+#include <asm/dma-noncoherent.h>
+
+/* L2 cache registers */
+#define AX45MP_L2C_REG_CTL_OFFSET 0x8
+
+#define AX45MP_L2C_REG_C0_CMD_OFFSET 0x40
+#define AX45MP_L2C_REG_C0_ACC_OFFSET 0x48
+#define AX45MP_L2C_REG_STATUS_OFFSET 0x80
+
+/* D-cache operation */
+#define AX45MP_CCTL_L1D_VA_INVAL 0 /* Invalidate an L1 cache entry */
+#define AX45MP_CCTL_L1D_VA_WB 1 /* Write-back an L1 cache entry */
+
+/* L2 CCTL status */
+#define AX45MP_CCTL_L2_STATUS_IDLE 0
+
+/* L2 CCTL status cores mask */
+#define AX45MP_CCTL_L2_STATUS_C0_MASK 0xf
+
+/* L2 cache operation */
+#define AX45MP_CCTL_L2_PA_INVAL 0x8 /* Invalidate an L2 cache entry */
+#define AX45MP_CCTL_L2_PA_WB 0x9 /* Write-back an L2 cache entry */
+
+#define AX45MP_L2C_REG_PER_CORE_OFFSET 0x10
+#define AX45MP_CCTL_L2_STATUS_PER_CORE_OFFSET 4
+
+#define AX45MP_L2C_REG_CN_CMD_OFFSET(n) \
+ (AX45MP_L2C_REG_C0_CMD_OFFSET + ((n) * AX45MP_L2C_REG_PER_CORE_OFFSET))
+#define AX45MP_L2C_REG_CN_ACC_OFFSET(n) \
+ (AX45MP_L2C_REG_C0_ACC_OFFSET + ((n) * AX45MP_L2C_REG_PER_CORE_OFFSET))
+#define AX45MP_CCTL_L2_STATUS_CN_MASK(n) \
+ (AX45MP_CCTL_L2_STATUS_C0_MASK << ((n) * AX45MP_CCTL_L2_STATUS_PER_CORE_OFFSET))
+
+#define AX45MP_CCTL_REG_UCCTLBEGINADDR_NUM 0x80b
+#define AX45MP_CCTL_REG_UCCTLCOMMAND_NUM 0x80c
+
+#define AX45MP_CACHE_LINE_SIZE 64
+
+struct ax45mp_priv {
+ void __iomem *l2c_base;
+ u32 ax45mp_cache_line_size;
+};
+
+static struct ax45mp_priv ax45mp_priv;
+
+/* L2 Cache operations */
+static inline uint32_t ax45mp_cpu_l2c_get_cctl_status(void)
+{
+ return readl(ax45mp_priv.l2c_base + AX45MP_L2C_REG_STATUS_OFFSET);
+}
+
+static void ax45mp_cpu_cache_operation(unsigned long start, unsigned long end,
+ unsigned int l1_op, unsigned int l2_op)
+{
+ unsigned long line_size = ax45mp_priv.ax45mp_cache_line_size;
+ void __iomem *base = ax45mp_priv.l2c_base;
+ int mhartid = smp_processor_id();
+ unsigned long pa;
+
+ while (end > start) {
+ csr_write(AX45MP_CCTL_REG_UCCTLBEGINADDR_NUM, start);
+ csr_write(AX45MP_CCTL_REG_UCCTLCOMMAND_NUM, l1_op);
+
+ pa = virt_to_phys((void *)start);
+ writel(pa, base + AX45MP_L2C_REG_CN_ACC_OFFSET(mhartid));
+ writel(l2_op, base + AX45MP_L2C_REG_CN_CMD_OFFSET(mhartid));
+ while ((ax45mp_cpu_l2c_get_cctl_status() &
+ AX45MP_CCTL_L2_STATUS_CN_MASK(mhartid)) !=
+ AX45MP_CCTL_L2_STATUS_IDLE)
+ ;
+
+ start += line_size;
+ }
+}
+
+/* Write-back L1 and L2 cache entry */
+static inline void ax45mp_cpu_dcache_wb_range(unsigned long start, unsigned long end)
+{
+ ax45mp_cpu_cache_operation(start, end, AX45MP_CCTL_L1D_VA_WB,
+ AX45MP_CCTL_L2_PA_WB);
+}
+
+/* Invalidate the L1 and L2 cache entry */
+static inline void ax45mp_cpu_dcache_inval_range(unsigned long start, unsigned long end)
+{
+ ax45mp_cpu_cache_operation(start, end, AX45MP_CCTL_L1D_VA_INVAL,
+ AX45MP_CCTL_L2_PA_INVAL);
+}
+
+static void ax45mp_dma_cache_inv(phys_addr_t paddr, size_t size)
+{
+ unsigned long start = (unsigned long)phys_to_virt(paddr);
+ unsigned long end = start + size;
+ unsigned long line_size;
+ unsigned long flags;
+
+ if (unlikely(start == end))
+ return;
+
+ line_size = ax45mp_priv.ax45mp_cache_line_size;
+
+ start = start & (~(line_size - 1));
+ end = ((end + line_size - 1) & (~(line_size - 1)));
+
+ local_irq_save(flags);
+
+ ax45mp_cpu_dcache_inval_range(start, end);
+
+ local_irq_restore(flags);
+}
+
+static void ax45mp_dma_cache_wback(phys_addr_t paddr, size_t size)
+{
+ unsigned long start = (unsigned long)phys_to_virt(paddr);
+ unsigned long end = start + size;
+ unsigned long line_size;
+ unsigned long flags;
+
+ line_size = ax45mp_priv.ax45mp_cache_line_size;
+ start = start & (~(line_size - 1));
+ local_irq_save(flags);
+ ax45mp_cpu_dcache_wb_range(start, end);
+ local_irq_restore(flags);
+}
+
+static void ax45mp_dma_cache_wback_inv(phys_addr_t paddr, size_t size)
+{
+ ax45mp_dma_cache_wback(paddr, size);
+ ax45mp_dma_cache_inv(paddr, size);
+}
+
+static int ax45mp_get_l2_line_size(struct device_node *np)
+{
+ int ret;
+
+ ret = of_property_read_u32(np, "cache-line-size", &ax45mp_priv.ax45mp_cache_line_size);
+ if (ret) {
+ pr_err("Failed to get cache-line-size, defaulting to 64 bytes\n");
+ return ret;
+ }
+
+ if (ax45mp_priv.ax45mp_cache_line_size != AX45MP_CACHE_LINE_SIZE) {
+ pr_err("Expected cache-line-size to be 64 bytes (found:%u)\n",
+ ax45mp_priv.ax45mp_cache_line_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct riscv_nonstd_cache_ops ax45mp_cmo_ops __initdata = {
+ .wback = &ax45mp_dma_cache_wback,
+ .inv = &ax45mp_dma_cache_inv,
+ .wback_inv = &ax45mp_dma_cache_wback_inv,
+};
+
+static const struct of_device_id ax45mp_cache_ids[] = {
+ { .compatible = "andestech,ax45mp-cache" },
+ { /* sentinel */ }
+};
+
+static int __init ax45mp_cache_init(void)
+{
+ struct device_node *np;
+ struct resource res;
+ int ret;
+
+ np = of_find_matching_node(NULL, ax45mp_cache_ids);
+ if (!of_device_is_available(np))
+ return -ENODEV;
+
+ ret = of_address_to_resource(np, 0, &res);
+ if (ret)
+ return ret;
+
+ /*
+ * If IOCP is present on the Andes AX45MP core riscv_cbom_block_size
+ * will be 0 for sure, so we can definitely rely on it. If
+ * riscv_cbom_block_size = 0 we don't need to handle CMO using SW any
+ * more so we just return success here and only if its being set we
+ * continue further in the probe path.
+ */
+ if (!riscv_cbom_block_size)
+ return 0;
+
+ ax45mp_priv.l2c_base = ioremap(res.start, resource_size(&res));
+ if (!ax45mp_priv.l2c_base)
+ return -ENOMEM;
+
+ ret = ax45mp_get_l2_line_size(np);
+ if (ret) {
+ iounmap(ax45mp_priv.l2c_base);
+ return ret;
+ }
+
+ riscv_noncoherent_register_cache_ops(&ax45mp_cmo_ops);
+
+ return 0;
+}
+early_initcall(ax45mp_cache_init);
diff --git a/drivers/char/agp/parisc-agp.c b/drivers/char/agp/parisc-agp.c
index 514f9f287a78..c6f181702b9a 100644
--- a/drivers/char/agp/parisc-agp.c
+++ b/drivers/char/agp/parisc-agp.c
@@ -394,8 +394,6 @@ find_quicksilver(struct device *dev, void *data)
static int __init
parisc_agp_init(void)
{
- extern struct sba_device *sba_list;
-
int err = -1;
struct parisc_device *sba = NULL, *lba = NULL;
struct lba_device *lbadev = NULL;
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
index 23f6f2eda84c..42b1062e33cd 100644
--- a/drivers/char/tpm/tpm-chip.c
+++ b/drivers/char/tpm/tpm-chip.c
@@ -33,7 +33,7 @@ const struct class tpm_class = {
.shutdown_pre = tpm_class_shutdown,
};
const struct class tpmrm_class = {
- .name = "tmprm",
+ .name = "tpmrm",
};
dev_t tpm_devt;
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index 92389a5481ff..a1157c2a7170 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -86,10 +86,10 @@ lib-$(CONFIG_EFI_GENERIC_STUB) += efi-stub.o string.o intrinsics.o systable.o \
screen_info.o efi-stub-entry.o
lib-$(CONFIG_ARM) += arm32-stub.o
-lib-$(CONFIG_ARM64) += arm64.o arm64-stub.o smbios.o
+lib-$(CONFIG_ARM64) += kaslr.o arm64.o arm64-stub.o smbios.o
lib-$(CONFIG_X86) += x86-stub.o
lib-$(CONFIG_X86_64) += x86-5lvl.o
-lib-$(CONFIG_RISCV) += riscv.o riscv-stub.o
+lib-$(CONFIG_RISCV) += kaslr.o riscv.o riscv-stub.o
lib-$(CONFIG_LOONGARCH) += loongarch.o loongarch-stub.o
CFLAGS_arm32-stub.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c
index 8c40fc89f5f9..452b7ccd330e 100644
--- a/drivers/firmware/efi/libstub/arm64-stub.c
+++ b/drivers/firmware/efi/libstub/arm64-stub.c
@@ -14,42 +14,6 @@
#include "efistub.h"
-/*
- * Distro versions of GRUB may ignore the BSS allocation entirely (i.e., fail
- * to provide space, and fail to zero it). Check for this condition by double
- * checking that the first and the last byte of the image are covered by the
- * same EFI memory map entry.
- */
-static bool check_image_region(u64 base, u64 size)
-{
- struct efi_boot_memmap *map;
- efi_status_t status;
- bool ret = false;
- int map_offset;
-
- status = efi_get_memory_map(&map, false);
- if (status != EFI_SUCCESS)
- return false;
-
- for (map_offset = 0; map_offset < map->map_size; map_offset += map->desc_size) {
- efi_memory_desc_t *md = (void *)map->map + map_offset;
- u64 end = md->phys_addr + md->num_pages * EFI_PAGE_SIZE;
-
- /*
- * Find the region that covers base, and return whether
- * it covers base+size bytes.
- */
- if (base >= md->phys_addr && base < end) {
- ret = (base + size) <= end;
- break;
- }
- }
-
- efi_bs_call(free_pool, map);
-
- return ret;
-}
-
efi_status_t handle_kernel_image(unsigned long *image_addr,
unsigned long *image_size,
unsigned long *reserve_addr,
@@ -59,31 +23,6 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
{
efi_status_t status;
unsigned long kernel_size, kernel_codesize, kernel_memsize;
- u32 phys_seed = 0;
- u64 min_kimg_align = efi_get_kimg_min_align();
-
- if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
- efi_guid_t li_fixed_proto = LINUX_EFI_LOADED_IMAGE_FIXED_GUID;
- void *p;
-
- if (efi_nokaslr) {
- efi_info("KASLR disabled on kernel command line\n");
- } else if (efi_bs_call(handle_protocol, image_handle,
- &li_fixed_proto, &p) == EFI_SUCCESS) {
- efi_info("Image placement fixed by loader\n");
- } else {
- status = efi_get_random_bytes(sizeof(phys_seed),
- (u8 *)&phys_seed);
- if (status == EFI_NOT_FOUND) {
- efi_info("EFI_RNG_PROTOCOL unavailable\n");
- efi_nokaslr = true;
- } else if (status != EFI_SUCCESS) {
- efi_err("efi_get_random_bytes() failed (0x%lx)\n",
- status);
- efi_nokaslr = true;
- }
- }
- }
if (image->image_base != _text) {
efi_err("FIRMWARE BUG: efi_loaded_image_t::image_base has bogus value\n");
@@ -98,50 +37,15 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
kernel_codesize = __inittext_end - _text;
kernel_memsize = kernel_size + (_end - _edata);
*reserve_size = kernel_memsize;
+ *image_addr = (unsigned long)_text;
- if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && phys_seed != 0) {
- /*
- * If KASLR is enabled, and we have some randomness available,
- * locate the kernel at a randomized offset in physical memory.
- */
- status = efi_random_alloc(*reserve_size, min_kimg_align,
- reserve_addr, phys_seed,
- EFI_LOADER_CODE, EFI_ALLOC_LIMIT);
- if (status != EFI_SUCCESS)
- efi_warn("efi_random_alloc() failed: 0x%lx\n", status);
- } else {
- status = EFI_OUT_OF_RESOURCES;
- }
-
- if (status != EFI_SUCCESS) {
- if (!check_image_region((u64)_text, kernel_memsize)) {
- efi_err("FIRMWARE BUG: Image BSS overlaps adjacent EFI memory region\n");
- } else if (IS_ALIGNED((u64)_text, min_kimg_align) &&
- (u64)_end < EFI_ALLOC_LIMIT) {
- /*
- * Just execute from wherever we were loaded by the
- * UEFI PE/COFF loader if the placement is suitable.
- */
- *image_addr = (u64)_text;
- *reserve_size = 0;
- return EFI_SUCCESS;
- }
-
- status = efi_allocate_pages_aligned(*reserve_size, reserve_addr,
- ULONG_MAX, min_kimg_align,
- EFI_LOADER_CODE);
-
- if (status != EFI_SUCCESS) {
- efi_err("Failed to relocate kernel\n");
- *reserve_size = 0;
- return status;
- }
- }
-
- *image_addr = *reserve_addr;
- memcpy((void *)*image_addr, _text, kernel_size);
- caches_clean_inval_pou(*image_addr, *image_addr + kernel_codesize);
- efi_remap_image(*image_addr, *reserve_size, kernel_codesize);
+ status = efi_kaslr_relocate_kernel(image_addr,
+ reserve_addr, reserve_size,
+ kernel_size, kernel_codesize,
+ kernel_memsize,
+ efi_kaslr_get_phys_seed(image_handle));
+ if (status != EFI_SUCCESS)
+ return status;
return EFI_SUCCESS;
}
@@ -159,3 +63,8 @@ unsigned long primary_entry_offset(void)
*/
return (char *)primary_entry - _text;
}
+
+void efi_icache_sync(unsigned long start, unsigned long end)
+{
+ caches_clean_inval_pou(start, end);
+}
diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
index 9823f6fb3e01..212687c30d79 100644
--- a/drivers/firmware/efi/libstub/efistub.h
+++ b/drivers/firmware/efi/libstub/efistub.h
@@ -1133,6 +1133,14 @@ const u8 *__efi_get_smbios_string(const struct efi_smbios_record *record,
void efi_remap_image(unsigned long image_base, unsigned alloc_size,
unsigned long code_size);
+efi_status_t efi_kaslr_relocate_kernel(unsigned long *image_addr,
+ unsigned long *reserve_addr,
+ unsigned long *reserve_size,
+ unsigned long kernel_size,
+ unsigned long kernel_codesize,
+ unsigned long kernel_memsize,
+ u32 phys_seed);
+u32 efi_kaslr_get_phys_seed(efi_handle_t image_handle);
asmlinkage efi_status_t __efiapi
efi_zboot_entry(efi_handle_t handle, efi_system_table_t *systab);
diff --git a/drivers/firmware/efi/libstub/kaslr.c b/drivers/firmware/efi/libstub/kaslr.c
new file mode 100644
index 000000000000..62d63f7a2645
--- /dev/null
+++ b/drivers/firmware/efi/libstub/kaslr.c
@@ -0,0 +1,159 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Helper functions used by the EFI stub on multiple
+ * architectures to deal with physical address space randomization.
+ */
+#include <linux/efi.h>
+
+#include "efistub.h"
+
+/**
+ * efi_kaslr_get_phys_seed() - Get random seed for physical kernel KASLR
+ * @image_handle: Handle to the image
+ *
+ * If KASLR is not disabled, obtain a random seed using EFI_RNG_PROTOCOL
+ * that will be used to move the kernel physical mapping.
+ *
+ * Return: the random seed
+ */
+u32 efi_kaslr_get_phys_seed(efi_handle_t image_handle)
+{
+ efi_status_t status;
+ u32 phys_seed;
+ efi_guid_t li_fixed_proto = LINUX_EFI_LOADED_IMAGE_FIXED_GUID;
+ void *p;
+
+ if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE))
+ return 0;
+
+ if (efi_nokaslr) {
+ efi_info("KASLR disabled on kernel command line\n");
+ } else if (efi_bs_call(handle_protocol, image_handle,
+ &li_fixed_proto, &p) == EFI_SUCCESS) {
+ efi_info("Image placement fixed by loader\n");
+ } else {
+ status = efi_get_random_bytes(sizeof(phys_seed),
+ (u8 *)&phys_seed);
+ if (status == EFI_SUCCESS) {
+ return phys_seed;
+ } else if (status == EFI_NOT_FOUND) {
+ efi_info("EFI_RNG_PROTOCOL unavailable\n");
+ efi_nokaslr = true;
+ } else if (status != EFI_SUCCESS) {
+ efi_err("efi_get_random_bytes() failed (0x%lx)\n",
+ status);
+ efi_nokaslr = true;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Distro versions of GRUB may ignore the BSS allocation entirely (i.e., fail
+ * to provide space, and fail to zero it). Check for this condition by double
+ * checking that the first and the last byte of the image are covered by the
+ * same EFI memory map entry.
+ */
+static bool check_image_region(u64 base, u64 size)
+{
+ struct efi_boot_memmap *map;
+ efi_status_t status;
+ bool ret = false;
+ int map_offset;
+
+ status = efi_get_memory_map(&map, false);
+ if (status != EFI_SUCCESS)
+ return false;
+
+ for (map_offset = 0; map_offset < map->map_size; map_offset += map->desc_size) {
+ efi_memory_desc_t *md = (void *)map->map + map_offset;
+ u64 end = md->phys_addr + md->num_pages * EFI_PAGE_SIZE;
+
+ /*
+ * Find the region that covers base, and return whether
+ * it covers base+size bytes.
+ */
+ if (base >= md->phys_addr && base < end) {
+ ret = (base + size) <= end;
+ break;
+ }
+ }
+
+ efi_bs_call(free_pool, map);
+
+ return ret;
+}
+
+/**
+ * efi_kaslr_relocate_kernel() - Relocate the kernel (random if KASLR enabled)
+ * @image_addr: Pointer to the current kernel location
+ * @reserve_addr: Pointer to the relocated kernel location
+ * @reserve_size: Size of the relocated kernel
+ * @kernel_size: Size of the text + data
+ * @kernel_codesize: Size of the text
+ * @kernel_memsize: Size of the text + data + bss
+ * @phys_seed: Random seed used for the relocation
+ *
+ * If KASLR is not enabled, this function relocates the kernel to a fixed
+ * address (or leave it as its current location). If KASLR is enabled, the
+ * kernel physical location is randomized using the seed in parameter.
+ *
+ * Return: status code, EFI_SUCCESS if relocation is successful
+ */
+efi_status_t efi_kaslr_relocate_kernel(unsigned long *image_addr,
+ unsigned long *reserve_addr,
+ unsigned long *reserve_size,
+ unsigned long kernel_size,
+ unsigned long kernel_codesize,
+ unsigned long kernel_memsize,
+ u32 phys_seed)
+{
+ efi_status_t status;
+ u64 min_kimg_align = efi_get_kimg_min_align();
+
+ if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && phys_seed != 0) {
+ /*
+ * If KASLR is enabled, and we have some randomness available,
+ * locate the kernel at a randomized offset in physical memory.
+ */
+ status = efi_random_alloc(*reserve_size, min_kimg_align,
+ reserve_addr, phys_seed,
+ EFI_LOADER_CODE, EFI_ALLOC_LIMIT);
+ if (status != EFI_SUCCESS)
+ efi_warn("efi_random_alloc() failed: 0x%lx\n", status);
+ } else {
+ status = EFI_OUT_OF_RESOURCES;
+ }
+
+ if (status != EFI_SUCCESS) {
+ if (!check_image_region(*image_addr, kernel_memsize)) {
+ efi_err("FIRMWARE BUG: Image BSS overlaps adjacent EFI memory region\n");
+ } else if (IS_ALIGNED(*image_addr, min_kimg_align) &&
+ (unsigned long)_end < EFI_ALLOC_LIMIT) {
+ /*
+ * Just execute from wherever we were loaded by the
+ * UEFI PE/COFF loader if the placement is suitable.
+ */
+ *reserve_size = 0;
+ return EFI_SUCCESS;
+ }
+
+ status = efi_allocate_pages_aligned(*reserve_size, reserve_addr,
+ ULONG_MAX, min_kimg_align,
+ EFI_LOADER_CODE);
+
+ if (status != EFI_SUCCESS) {
+ efi_err("Failed to relocate kernel\n");
+ *reserve_size = 0;
+ return status;
+ }
+ }
+
+ memcpy((void *)*reserve_addr, (void *)*image_addr, kernel_size);
+ *image_addr = *reserve_addr;
+ efi_icache_sync(*image_addr, *image_addr + kernel_codesize);
+ efi_remap_image(*image_addr, *reserve_size, kernel_codesize);
+
+ return status;
+}
diff --git a/drivers/firmware/efi/libstub/riscv-stub.c b/drivers/firmware/efi/libstub/riscv-stub.c
index 145c9f0ba217..c96d6dcee86c 100644
--- a/drivers/firmware/efi/libstub/riscv-stub.c
+++ b/drivers/firmware/efi/libstub/riscv-stub.c
@@ -30,32 +30,29 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
efi_loaded_image_t *image,
efi_handle_t image_handle)
{
- unsigned long kernel_size = 0;
- unsigned long preferred_addr;
+ unsigned long kernel_size, kernel_codesize, kernel_memsize;
efi_status_t status;
kernel_size = _edata - _start;
+ kernel_codesize = __init_text_end - _start;
+ kernel_memsize = kernel_size + (_end - _edata);
*image_addr = (unsigned long)_start;
- *image_size = kernel_size + (_end - _edata);
-
- /*
- * RISC-V kernel maps PAGE_OFFSET virtual address to the same physical
- * address where kernel is booted. That's why kernel should boot from
- * as low as possible to avoid wastage of memory. Currently, dram_base
- * is occupied by the firmware. So the preferred address for kernel to
- * boot is next aligned address. If preferred address is not available,
- * relocate_kernel will fall back to efi_low_alloc_above to allocate
- * lowest possible memory region as long as the address and size meets
- * the alignment constraints.
- */
- preferred_addr = EFI_KIMG_PREFERRED_ADDRESS;
- status = efi_relocate_kernel(image_addr, kernel_size, *image_size,
- preferred_addr, efi_get_kimg_min_align(),
- 0x0);
+ *image_size = kernel_memsize;
+ *reserve_size = *image_size;
+ status = efi_kaslr_relocate_kernel(image_addr,
+ reserve_addr, reserve_size,
+ kernel_size, kernel_codesize, kernel_memsize,
+ efi_kaslr_get_phys_seed(image_handle));
if (status != EFI_SUCCESS) {
efi_err("Failed to relocate kernel\n");
*image_size = 0;
}
+
return status;
}
+
+void efi_icache_sync(unsigned long start, unsigned long end)
+{
+ asm volatile ("fence.i" ::: "memory");
+}
diff --git a/drivers/gpio/gpio-zynq.c b/drivers/gpio/gpio-zynq.c
index 0a7264aabe48..324e942c0650 100644
--- a/drivers/gpio/gpio-zynq.c
+++ b/drivers/gpio/gpio-zynq.c
@@ -575,6 +575,26 @@ static int zynq_gpio_set_wake(struct irq_data *data, unsigned int on)
return 0;
}
+static int zynq_gpio_irq_reqres(struct irq_data *d)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
+ int ret;
+
+ ret = pm_runtime_resume_and_get(chip->parent);
+ if (ret < 0)
+ return ret;
+
+ return gpiochip_reqres_irq(chip, d->hwirq);
+}
+
+static void zynq_gpio_irq_relres(struct irq_data *d)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
+
+ gpiochip_relres_irq(chip, d->hwirq);
+ pm_runtime_put(chip->parent);
+}
+
/* irq chip descriptor */
static const struct irq_chip zynq_gpio_level_irqchip = {
.name = DRIVER_NAME,
@@ -584,9 +604,10 @@ static const struct irq_chip zynq_gpio_level_irqchip = {
.irq_unmask = zynq_gpio_irq_unmask,
.irq_set_type = zynq_gpio_set_irq_type,
.irq_set_wake = zynq_gpio_set_wake,
+ .irq_request_resources = zynq_gpio_irq_reqres,
+ .irq_release_resources = zynq_gpio_irq_relres,
.flags = IRQCHIP_EOI_THREADED | IRQCHIP_EOI_IF_HANDLED |
IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_IMMUTABLE,
- GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static const struct irq_chip zynq_gpio_edge_irqchip = {
@@ -597,8 +618,9 @@ static const struct irq_chip zynq_gpio_edge_irqchip = {
.irq_unmask = zynq_gpio_irq_unmask,
.irq_set_type = zynq_gpio_set_irq_type,
.irq_set_wake = zynq_gpio_set_wake,
+ .irq_request_resources = zynq_gpio_irq_reqres,
+ .irq_release_resources = zynq_gpio_irq_relres,
.flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_IMMUTABLE,
- GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static void zynq_gpio_handle_bank_irq(struct zynq_gpio *gpio,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index df633e9ce920..cdf6087706aa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -442,9 +442,7 @@ void amdgpu_amdkfd_get_local_mem_info(struct amdgpu_device *adev,
mem_info->local_mem_size_public,
mem_info->local_mem_size_private);
- if (amdgpu_sriov_vf(adev))
- mem_info->mem_clk_max = adev->clock.default_mclk / 100;
- else if (adev->pm.dpm_enabled) {
+ if (adev->pm.dpm_enabled) {
if (amdgpu_emu_mode == 1)
mem_info->mem_clk_max = 0;
else
@@ -463,9 +461,7 @@ uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct amdgpu_device *adev)
uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct amdgpu_device *adev)
{
/* the sclk is in quantas of 10kHz */
- if (amdgpu_sriov_vf(adev))
- return adev->clock.default_sclk / 100;
- else if (adev->pm.dpm_enabled)
+ if (adev->pm.dpm_enabled)
return amdgpu_dpm_get_sclk(adev, false) / 100;
else
return 100;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
index 835980e94b9e..fb2681dd6b33 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
@@ -217,6 +217,7 @@ union umc_info {
struct atom_umc_info_v3_1 v31;
struct atom_umc_info_v3_2 v32;
struct atom_umc_info_v3_3 v33;
+ struct atom_umc_info_v4_0 v40;
};
union vram_info {
@@ -508,9 +509,8 @@ bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev)
if (amdgpu_atom_parse_data_header(mode_info->atom_context,
index, &size, &frev, &crev, &data_offset)) {
+ umc_info = (union umc_info *)(mode_info->atom_context->bios + data_offset);
if (frev == 3) {
- umc_info = (union umc_info *)
- (mode_info->atom_context->bios + data_offset);
switch (crev) {
case 1:
umc_config = le32_to_cpu(umc_info->v31.umc_config);
@@ -533,6 +533,20 @@ bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev)
/* unsupported crev */
return false;
}
+ } else if (frev == 4) {
+ switch (crev) {
+ case 0:
+ umc_config1 = le32_to_cpu(umc_info->v40.umc_config1);
+ ecc_default_enabled =
+ (umc_config1 & UMC_CONFIG1__ENABLE_ECC_CAPABLE) ? true : false;
+ break;
+ default:
+ /* unsupported crev */
+ return false;
+ }
+ } else {
+ /* unsupported frev */
+ return false;
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 49dd9aa8da70..efdb1c48f431 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -127,7 +127,6 @@ static int amdgpu_cs_p1_user_fence(struct amdgpu_cs_parser *p,
{
struct drm_gem_object *gobj;
unsigned long size;
- int r;
gobj = drm_gem_object_lookup(p->filp, data->handle);
if (gobj == NULL)
@@ -137,23 +136,14 @@ static int amdgpu_cs_p1_user_fence(struct amdgpu_cs_parser *p,
drm_gem_object_put(gobj);
size = amdgpu_bo_size(p->uf_bo);
- if (size != PAGE_SIZE || (data->offset + 8) > size) {
- r = -EINVAL;
- goto error_unref;
- }
+ if (size != PAGE_SIZE || data->offset > (size - 8))
+ return -EINVAL;
- if (amdgpu_ttm_tt_get_usermm(p->uf_bo->tbo.ttm)) {
- r = -EINVAL;
- goto error_unref;
- }
+ if (amdgpu_ttm_tt_get_usermm(p->uf_bo->tbo.ttm))
+ return -EINVAL;
*offset = data->offset;
-
return 0;
-
-error_unref:
- amdgpu_bo_unref(&p->uf_bo);
- return r;
}
static int amdgpu_cs_p1_bo_handles(struct amdgpu_cs_parser *p,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index e77f048c99d8..3f001a50b34a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -885,13 +885,20 @@ static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
*/
static int amdgpu_device_asic_init(struct amdgpu_device *adev)
{
+ int ret;
+
amdgpu_asic_pre_asic_init(adev);
if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3) ||
- adev->ip_versions[GC_HWIP][0] >= IP_VERSION(11, 0, 0))
- return amdgpu_atomfirmware_asic_init(adev, true);
- else
+ adev->ip_versions[GC_HWIP][0] >= IP_VERSION(11, 0, 0)) {
+ amdgpu_psp_wait_for_bootloader(adev);
+ ret = amdgpu_atomfirmware_asic_init(adev, true);
+ return ret;
+ } else {
return amdgpu_atom_asic_init(adev->mode_info.atom_context);
+ }
+
+ return 0;
}
/**
@@ -4694,9 +4701,12 @@ int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
}
if (ret)
- dev_err(adev->dev, "GPU mode1 reset failed\n");
+ goto mode1_reset_failed;
amdgpu_device_load_pci_state(adev->pdev);
+ ret = amdgpu_psp_wait_for_bootloader(adev);
+ if (ret)
+ goto mode1_reset_failed;
/* wait for asic to come out of reset */
for (i = 0; i < adev->usec_timeout; i++) {
@@ -4707,7 +4717,17 @@ int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
udelay(1);
}
+ if (i >= adev->usec_timeout) {
+ ret = -ETIMEDOUT;
+ goto mode1_reset_failed;
+ }
+
amdgpu_atombios_scratch_regs_engine_hung(adev, false);
+
+ return 0;
+
+mode1_reset_failed:
+ dev_err(adev->dev, "GPU mode1 reset failed\n");
return ret;
}
@@ -4849,7 +4869,7 @@ static void amdgpu_reset_capture_coredumpm(struct amdgpu_device *adev)
struct drm_device *dev = adev_to_drm(adev);
ktime_get_ts64(&adev->reset_time);
- dev_coredumpm(dev->dev, THIS_MODULE, adev, 0, GFP_KERNEL,
+ dev_coredumpm(dev->dev, THIS_MODULE, adev, 0, GFP_NOWAIT,
amdgpu_devcoredump_read, amdgpu_devcoredump_free);
}
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index 74ffe6581c85..7d5e7ad28ba8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -1390,6 +1390,7 @@ union gc_info {
struct gc_info_v1_1 v1_1;
struct gc_info_v1_2 v1_2;
struct gc_info_v2_0 v2;
+ struct gc_info_v2_1 v2_1;
};
static int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
@@ -1465,6 +1466,15 @@ static int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v2.gc_num_sc_per_se) /
le32_to_cpu(gc_info->v2.gc_num_sh_per_se);
adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v2.gc_num_packer_per_sc);
+ if (gc_info->v2.header.version_minor == 1) {
+ adev->gfx.config.gc_num_tcp_per_sa = le32_to_cpu(gc_info->v2_1.gc_num_tcp_per_sh);
+ adev->gfx.config.gc_tcp_size_per_cu = le32_to_cpu(gc_info->v2_1.gc_tcp_size_per_cu);
+ adev->gfx.config.gc_num_sdp_interface = le32_to_cpu(gc_info->v2_1.gc_num_sdp_interface); /* per XCD */
+ adev->gfx.config.gc_num_cu_per_sqc = le32_to_cpu(gc_info->v2_1.gc_num_cu_per_sqc);
+ adev->gfx.config.gc_l1_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v2_1.gc_instruction_cache_size_per_sqc);
+ adev->gfx.config.gc_l1_data_cache_size_per_sqc = le32_to_cpu(gc_info->v2_1.gc_scalar_data_cache_size_per_sqc);
+ adev->gfx.config.gc_tcc_size = le32_to_cpu(gc_info->v2_1.gc_tcc_size); /* per XCD */
+ }
break;
default:
dev_err(adev->dev,
@@ -1478,6 +1488,7 @@ static int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
union mall_info {
struct mall_info_v1_0 v1;
+ struct mall_info_v2_0 v2;
};
static int amdgpu_discovery_get_mall_info(struct amdgpu_device *adev)
@@ -1518,6 +1529,10 @@ static int amdgpu_discovery_get_mall_info(struct amdgpu_device *adev)
adev->gmc.mall_size = mall_size;
adev->gmc.m_half_use = half_use;
break;
+ case 2:
+ mall_size_per_umc = le32_to_cpu(mall_info->v2.mall_size_per_umc);
+ adev->gmc.mall_size = mall_size_per_umc * adev->gmc.num_umc;
+ break;
default:
dev_err(adev->dev,
"Unhandled MALL info table %d.%d\n",
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index d20dd3f852fc..363e6a2cad8c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -38,6 +38,8 @@
#include <linux/pci.h>
#include <linux/pm_runtime.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
@@ -532,11 +534,29 @@ bool amdgpu_display_ddc_probe(struct amdgpu_connector *amdgpu_connector,
return true;
}
+static int amdgpu_dirtyfb(struct drm_framebuffer *fb, struct drm_file *file,
+ unsigned int flags, unsigned int color,
+ struct drm_clip_rect *clips, unsigned int num_clips)
+{
+
+ if (file)
+ return -ENOSYS;
+
+ return drm_atomic_helper_dirtyfb(fb, file, flags, color, clips,
+ num_clips);
+}
+
static const struct drm_framebuffer_funcs amdgpu_fb_funcs = {
.destroy = drm_gem_fb_destroy,
.create_handle = drm_gem_fb_create_handle,
};
+static const struct drm_framebuffer_funcs amdgpu_fb_funcs_atomic = {
+ .destroy = drm_gem_fb_destroy,
+ .create_handle = drm_gem_fb_create_handle,
+ .dirty = amdgpu_dirtyfb
+};
+
uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev,
uint64_t bo_flags)
{
@@ -1139,7 +1159,11 @@ static int amdgpu_display_gem_fb_verify_and_init(struct drm_device *dev,
if (ret)
goto err;
- ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
+ if (drm_drv_uses_atomic_modeset(dev))
+ ret = drm_framebuffer_init(dev, &rfb->base,
+ &amdgpu_fb_funcs_atomic);
+ else
+ ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
if (ret)
goto err;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index a4ff515ce896..395c1768b9fc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -241,6 +241,9 @@ struct amdgpu_gfx_config {
uint32_t gc_gl1c_per_sa;
uint32_t gc_gl1c_size_per_instance;
uint32_t gc_gl2c_per_gpu;
+ uint32_t gc_tcp_size_per_cu;
+ uint32_t gc_num_cu_per_sqc;
+ uint32_t gc_tcc_size;
};
struct amdgpu_cu_info {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 8fdca54bb8a1..429ef212c1f2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -2078,6 +2078,17 @@ int psp_securedisplay_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
}
/* SECUREDISPLAY end */
+int amdgpu_psp_wait_for_bootloader(struct amdgpu_device *adev)
+{
+ struct psp_context *psp = &adev->psp;
+ int ret = 0;
+
+ if (!amdgpu_sriov_vf(adev) && psp->funcs && psp->funcs->wait_for_bootloader != NULL)
+ ret = psp->funcs->wait_for_bootloader(psp);
+
+ return ret;
+}
+
static int psp_hw_start(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index 3384eb94fde0..3e67ed63e638 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -109,6 +109,7 @@ enum psp_reg_prog_id {
struct psp_funcs {
int (*init_microcode)(struct psp_context *psp);
+ int (*wait_for_bootloader)(struct psp_context *psp);
int (*bootloader_load_kdb)(struct psp_context *psp);
int (*bootloader_load_spl)(struct psp_context *psp);
int (*bootloader_load_sysdrv)(struct psp_context *psp);
@@ -533,4 +534,6 @@ int psp_spatial_partition(struct psp_context *psp, int mode);
int is_psp_fw_valid(struct psp_bin_desc bin);
+int amdgpu_psp_wait_for_bootloader(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index 7689395e44fd..3c4600e15b86 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -764,7 +764,7 @@ int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
union ta_ras_cmd_input *info;
- int ret = 0;
+ int ret;
if (!con)
return -EINVAL;
@@ -773,7 +773,7 @@ int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
if (enable &&
head->block != AMDGPU_RAS_BLOCK__GFX &&
!amdgpu_ras_is_feature_allowed(adev, head))
- goto out;
+ return 0;
/* Only enable gfx ras feature from host side */
if (head->block == AMDGPU_RAS_BLOCK__GFX &&
@@ -801,16 +801,16 @@ int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
enable ? "enable":"disable",
get_ras_block_str(head),
amdgpu_ras_is_poison_mode_supported(adev), ret);
- goto out;
+ return ret;
}
+
+ kfree(info);
}
/* setup the obj */
__amdgpu_ras_feature_enable(adev, head, enable);
-out:
- if (head->block == AMDGPU_RAS_BLOCK__GFX)
- kfree(info);
- return ret;
+
+ return 0;
}
/* Only used in device probe stage and called only once. */
@@ -2399,6 +2399,7 @@ static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev)
if (amdgpu_sriov_vf(adev)) {
switch (adev->ip_versions[MP0_HWIP][0]) {
case IP_VERSION(13, 0, 2):
+ case IP_VERSION(13, 0, 6):
return true;
default:
return false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
index 4764d2171f92..595d5e535aca 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
@@ -158,9 +158,10 @@ static bool __is_ras_eeprom_supported(struct amdgpu_device *adev)
case IP_VERSION(11, 0, 7): /* Sienna cichlid */
case IP_VERSION(13, 0, 0):
case IP_VERSION(13, 0, 2): /* Aldebaran */
- case IP_VERSION(13, 0, 6):
case IP_VERSION(13, 0, 10):
return true;
+ case IP_VERSION(13, 0, 6):
+ return (adev->gmc.is_app_apu) ? false : true;
default:
return false;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
index 57ed4e5c294c..0a26a00074a6 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
@@ -203,6 +203,9 @@ static void gfx_v9_4_3_init_golden_registers(struct amdgpu_device *adev)
if (adev->rev_id == 0) {
WREG32_FIELD15_PREREG(GC, dev_inst, TCP_UTCL1_CNTL1,
REDUCE_FIFO_DEPTH_BY_2, 2);
+ } else {
+ WREG32_FIELD15_PREREG(GC, dev_inst, TCP_UTCL1_CNTL2,
+ SPARE, 0x1);
}
}
}
@@ -860,11 +863,15 @@ static int gfx_v9_4_3_sw_init(void *handle)
if (r)
return r;
- r = amdgpu_gfx_sysfs_init(adev);
+ r = amdgpu_gfx_ras_sw_init(adev);
if (r)
return r;
- return amdgpu_gfx_ras_sw_init(adev);
+
+ if (!amdgpu_sriov_vf(adev))
+ r = amdgpu_gfx_sysfs_init(adev);
+
+ return r;
}
static int gfx_v9_4_3_sw_fini(void *handle)
@@ -885,7 +892,8 @@ static int gfx_v9_4_3_sw_fini(void *handle)
gfx_v9_4_3_mec_fini(adev);
amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj);
gfx_v9_4_3_free_microcode(adev);
- amdgpu_gfx_sysfs_fini(adev);
+ if (!amdgpu_sriov_vf(adev))
+ amdgpu_gfx_sysfs_fini(adev);
return 0;
}
@@ -2219,15 +2227,6 @@ static void gfx_v9_4_3_xcc_update_sram_fgcg(struct amdgpu_device *adev,
WREG32_SOC15(GC, GET_INST(GC, xcc_id),
regRLC_CGTT_MGCG_OVERRIDE, data);
- def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CLK_CNTL);
-
- if (enable)
- data &= ~RLC_CLK_CNTL__RLC_SRAM_CLK_GATER_OVERRIDE_MASK;
- else
- data |= RLC_CLK_CNTL__RLC_SRAM_CLK_GATER_OVERRIDE_MASK;
-
- if (def != data)
- WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CLK_CNTL, data);
}
static void gfx_v9_4_3_xcc_update_repeater_fgcg(struct amdgpu_device *adev,
@@ -4048,7 +4047,8 @@ static void gfx_v9_4_3_inst_enable_watchdog_timer(struct amdgpu_device *adev,
uint32_t i;
uint32_t data;
- data = REG_SET_FIELD(0, SQ_TIMEOUT_CONFIG, TIMEOUT_FATAL_DISABLE,
+ data = RREG32_SOC15(GC, GET_INST(GC, 0), regSQ_TIMEOUT_CONFIG);
+ data = REG_SET_FIELD(data, SQ_TIMEOUT_CONFIG, TIMEOUT_FATAL_DISABLE,
amdgpu_watchdog_timer.timeout_fatal_disable ? 1 : 0);
if (amdgpu_watchdog_timer.timeout_fatal_disable &&
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
index 15612915bb6c..1de79d660285 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
@@ -360,8 +360,10 @@ static int jpeg_v4_0_3_hw_fini(void *handle)
cancel_delayed_work_sync(&adev->jpeg.idle_work);
- if (adev->jpeg.cur_state != AMD_PG_STATE_GATE)
- ret = jpeg_v4_0_3_set_powergating_state(adev, AMD_PG_STATE_GATE);
+ if (!amdgpu_sriov_vf(adev)) {
+ if (adev->jpeg.cur_state != AMD_PG_STATE_GATE)
+ ret = jpeg_v4_0_3_set_powergating_state(adev, AMD_PG_STATE_GATE);
+ }
return ret;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
index 9ea072374cb7..f85eec05d218 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
@@ -437,6 +437,24 @@ static void nbio_v7_9_init_registers(struct amdgpu_device *adev)
XCC_DOORBELL_FENCE__SHUB_SLV_MODE_MASK);
}
+
+ if (!amdgpu_sriov_vf(adev)) {
+ u32 baco_cntl;
+ for_each_inst(i, adev->aid_mask) {
+ baco_cntl = RREG32_SOC15(NBIO, i, regBIF_BX0_BACO_CNTL);
+ if (baco_cntl & (BIF_BX0_BACO_CNTL__BACO_DUMMY_EN_MASK |
+ BIF_BX0_BACO_CNTL__BACO_EN_MASK)) {
+ baco_cntl &= ~(
+ BIF_BX0_BACO_CNTL__BACO_DUMMY_EN_MASK |
+ BIF_BX0_BACO_CNTL__BACO_EN_MASK);
+ dev_dbg(adev->dev,
+ "Unsetting baco dummy mode %x",
+ baco_cntl);
+ WREG32_SOC15(NBIO, i, regBIF_BX0_BACO_CNTL,
+ baco_cntl);
+ }
+ }
+ }
}
static u64 nbio_v7_9_get_pcie_replay_count(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
index 10b17bd5aebe..469eed084976 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
@@ -133,12 +133,32 @@ static bool psp_v13_0_is_sos_alive(struct psp_context *psp)
return sol_reg != 0x0;
}
-static int psp_v13_0_wait_for_bootloader(struct psp_context *psp)
+static int psp_v13_0_wait_for_vmbx_ready(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
+ int retry_loop, ret;
- int ret;
- int retry_loop;
+ for (retry_loop = 0; retry_loop < 70; retry_loop++) {
+ /* Wait for bootloader to signify that is
+ ready having bit 31 of C2PMSG_33 set to 1 */
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_33),
+ 0x80000000, 0xffffffff, false);
+
+ if (ret == 0)
+ break;
+ }
+
+ if (ret)
+ dev_warn(adev->dev, "Bootloader wait timed out");
+
+ return ret;
+}
+
+static int psp_v13_0_wait_for_bootloader(struct psp_context *psp)
+{
+ struct amdgpu_device *adev = psp->adev;
+ int retry_loop, ret;
/* Wait for bootloader to signify that it is ready having bit 31 of
* C2PMSG_35 set to 1. All other bits are expected to be cleared.
@@ -157,6 +177,19 @@ static int psp_v13_0_wait_for_bootloader(struct psp_context *psp)
return ret;
}
+static int psp_v13_0_wait_for_bootloader_steady_state(struct psp_context *psp)
+{
+ struct amdgpu_device *adev = psp->adev;
+
+ if (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(13, 0, 6)) {
+ psp_v13_0_wait_for_vmbx_ready(psp);
+
+ return psp_v13_0_wait_for_bootloader(psp);
+ }
+
+ return 0;
+}
+
static int psp_v13_0_bootloader_load_component(struct psp_context *psp,
struct psp_bin_desc *bin_desc,
enum psp_bootloader_cmd bl_cmd)
@@ -714,6 +747,7 @@ static int psp_v13_0_fatal_error_recovery_quirk(struct psp_context *psp)
static const struct psp_funcs psp_v13_0_funcs = {
.init_microcode = psp_v13_0_init_microcode,
+ .wait_for_bootloader = psp_v13_0_wait_for_bootloader_steady_state,
.bootloader_load_kdb = psp_v13_0_bootloader_load_kdb,
.bootloader_load_spl = psp_v13_0_bootloader_load_spl,
.bootloader_load_sysdrv = psp_v13_0_bootloader_load_sysdrv,
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index c45721ca916e..f5be40d7ba36 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -559,8 +559,10 @@ soc15_asic_reset_method(struct amdgpu_device *adev)
*/
if (amdgpu_gpu_recovery == 4 || amdgpu_gpu_recovery == 5)
return AMD_RESET_METHOD_MODE2;
+ else if (!(adev->flags & AMD_IS_APU))
+ return AMD_RESET_METHOD_MODE1;
else
- return AMD_RESET_METHOD_NONE;
+ return AMD_RESET_METHOD_MODE2;
default:
break;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
index f0731a6a5306..830396b1c3b1 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
@@ -384,7 +384,7 @@ static void event_interrupt_wq_v9(struct kfd_node *dev,
default:
break;
}
- kfd_signal_event_interrupt(pasid, context_id0 & 0xffffff, 24);
+ kfd_signal_event_interrupt(pasid, sq_int_data, 24);
} else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE) {
kfd_set_dbg_ev_from_interrupt(dev, pasid,
KFD_DEBUG_DOORBELL_ID(context_id0),
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
index 2319467d2d95..0bbf0edbabd4 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
@@ -457,6 +457,7 @@ struct mqd_manager *mqd_manager_init_v11(enum KFD_MQD_TYPE type,
mqd->is_occupied = kfd_is_occupied_cp;
mqd->mqd_size = sizeof(struct v11_compute_mqd);
mqd->get_wave_state = get_wave_state;
+ mqd->mqd_stride = kfd_mqd_stride;
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd;
#endif
@@ -472,6 +473,7 @@ struct mqd_manager *mqd_manager_init_v11(enum KFD_MQD_TYPE type,
mqd->destroy_mqd = destroy_hiq_mqd;
mqd->is_occupied = kfd_is_occupied_cp;
mqd->mqd_size = sizeof(struct v11_compute_mqd);
+ mqd->mqd_stride = kfd_mqd_stride;
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd;
#endif
@@ -501,6 +503,7 @@ struct mqd_manager *mqd_manager_init_v11(enum KFD_MQD_TYPE type,
mqd->destroy_mqd = kfd_destroy_mqd_sdma;
mqd->is_occupied = kfd_is_occupied_sdma;
mqd->mqd_size = sizeof(struct v11_sdma_mqd);
+ mqd->mqd_stride = kfd_mqd_stride;
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd_sdma;
#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 011561605983..bb16b795d1bc 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -1686,6 +1686,8 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
WRITE_ONCE(p->svms.faulting_task, NULL);
if (r) {
pr_debug("failed %d to get svm range pages\n", r);
+ if (r == -EBUSY)
+ r = -EAGAIN;
goto unreserve_out;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 268cb99a4c4b..88ba8b66de1f 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -65,6 +65,7 @@
#include "amdgpu_dm_debugfs.h"
#endif
#include "amdgpu_dm_psr.h"
+#include "amdgpu_dm_replay.h"
#include "ivsrcid/ivsrcid_vislands30.h"
@@ -4265,6 +4266,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
enum dc_connection_type new_connection_type = dc_connection_none;
const struct dc_plane_cap *plane;
bool psr_feature_enabled = false;
+ bool replay_feature_enabled = false;
int max_overlay = dm->dc->caps.max_slave_planes;
dm->display_indexes_num = dm->dc->caps.max_streams;
@@ -4374,6 +4376,20 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
}
}
+ if (!(amdgpu_dc_debug_mask & DC_DISABLE_REPLAY)) {
+ switch (adev->ip_versions[DCE_HWIP][0]) {
+ case IP_VERSION(3, 1, 4):
+ case IP_VERSION(3, 1, 5):
+ case IP_VERSION(3, 1, 6):
+ case IP_VERSION(3, 2, 0):
+ case IP_VERSION(3, 2, 1):
+ replay_feature_enabled = true;
+ break;
+ default:
+ replay_feature_enabled = amdgpu_dc_feature_mask & DC_REPLAY_MASK;
+ break;
+ }
+ }
/* loops over all connectors on the board */
for (i = 0; i < link_cnt; i++) {
struct dc_link *link = NULL;
@@ -4422,6 +4438,12 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
amdgpu_dm_update_connector_after_detect(aconnector);
setup_backlight_device(dm, aconnector);
+ /*
+ * Disable psr if replay can be enabled
+ */
+ if (replay_feature_enabled && amdgpu_dm_setup_replay(link, aconnector))
+ psr_feature_enabled = false;
+
if (psr_feature_enabled)
amdgpu_dm_set_psr_caps(link);
@@ -6004,7 +6026,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
if (recalculate_timing)
drm_mode_set_crtcinfo(&saved_mode, 0);
- else
+ else if (!old_stream)
drm_mode_set_crtcinfo(&mode, 0);
/*
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
index 30d4c6fd95f5..97b7a0b8a1c2 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
@@ -29,6 +29,7 @@
#include "dc.h"
#include "amdgpu.h"
#include "amdgpu_dm_psr.h"
+#include "amdgpu_dm_replay.h"
#include "amdgpu_dm_crtc.h"
#include "amdgpu_dm_plane.h"
#include "amdgpu_dm_trace.h"
@@ -123,7 +124,12 @@ static void vblank_control_worker(struct work_struct *work)
* fill_dc_dirty_rects().
*/
if (vblank_work->stream && vblank_work->stream->link) {
- if (vblank_work->enable) {
+ /*
+ * Prioritize replay, instead of psr
+ */
+ if (vblank_work->stream->link->replay_settings.replay_feature_enabled)
+ amdgpu_dm_replay_enable(vblank_work->stream, false);
+ else if (vblank_work->enable) {
if (vblank_work->stream->link->psr_settings.psr_version < DC_PSR_VERSION_SU_1 &&
vblank_work->stream->link->psr_settings.psr_allow_active)
amdgpu_dm_psr_disable(vblank_work->stream);
@@ -132,6 +138,7 @@ static void vblank_control_worker(struct work_struct *work)
#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
!amdgpu_dm_crc_window_is_activated(&vblank_work->acrtc->base) &&
#endif
+ vblank_work->stream->link->panel_config.psr.disallow_replay &&
vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
amdgpu_dm_psr_enable(vblank_work->stream);
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
index 8eeca160d434..cc74dd69acf2 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
@@ -1269,6 +1269,13 @@ void amdgpu_dm_plane_handle_cursor_update(struct drm_plane *plane,
attributes.rotation_angle = 0;
attributes.attribute_flags.value = 0;
+ /* Enable cursor degamma ROM on DCN3+ for implicit sRGB degamma in DRM
+ * legacy gamma setup.
+ */
+ if (crtc_state->cm_is_degamma_srgb &&
+ adev->dm.dc->caps.color.dpp.gamma_corr)
+ attributes.attribute_flags.bits.ENABLE_CURSOR_DEGAMMA = 1;
+
attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
if (crtc_state->stream) {
@@ -1468,6 +1475,15 @@ int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
drm_plane_create_blend_mode_property(plane, blend_caps);
}
+ if (plane->type == DRM_PLANE_TYPE_PRIMARY) {
+ drm_plane_create_zpos_immutable_property(plane, 0);
+ } else if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
+ unsigned int zpos = 1 + drm_plane_index(plane);
+ drm_plane_create_zpos_property(plane, zpos, 1, 254);
+ } else if (plane->type == DRM_PLANE_TYPE_CURSOR) {
+ drm_plane_create_zpos_immutable_property(plane, 255);
+ }
+
if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
plane_cap &&
(plane_cap->pixel_format_support.nv12 ||
diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile
index 69ffd4424dc7..1b8c2aef4633 100644
--- a/drivers/gpu/drm/amd/display/dc/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/Makefile
@@ -78,3 +78,4 @@ DC_EDID += dc_edid_parser.o
AMD_DISPLAY_DMUB = $(addprefix $(AMDDALPATH)/dc/,$(DC_DMUB))
AMD_DISPLAY_EDID = $(addprefix $(AMDDALPATH)/dc/,$(DC_EDID))
AMD_DISPLAY_FILES += $(AMD_DISPLAY_DMUB) $(AMD_DISPLAY_EDID)
+
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c
index 3e0da873cf4c..1042cf1a3ab0 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c
@@ -32,6 +32,7 @@
#define MAX_INSTANCE 6
#define MAX_SEGMENT 6
+#define SMU_REGISTER_WRITE_RETRY_COUNT 5
struct IP_BASE_INSTANCE {
unsigned int segment[MAX_SEGMENT];
@@ -132,6 +133,8 @@ static int dcn315_smu_send_msg_with_param(
unsigned int msg_id, unsigned int param)
{
uint32_t result;
+ uint32_t i = 0;
+ uint32_t read_back_data;
result = dcn315_smu_wait_for_response(clk_mgr, 10, 200000);
@@ -148,10 +151,19 @@ static int dcn315_smu_send_msg_with_param(
/* Set the parameter register for the SMU message, unit is Mhz */
REG_WRITE(MP1_SMN_C2PMSG_37, param);
- /* Trigger the message transaction by writing the message ID */
- generic_write_indirect_reg(CTX,
- REG_NBIO(RSMU_INDEX), REG_NBIO(RSMU_DATA),
- mmMP1_C2PMSG_3, msg_id);
+ for (i = 0; i < SMU_REGISTER_WRITE_RETRY_COUNT; i++) {
+ /* Trigger the message transaction by writing the message ID */
+ generic_write_indirect_reg(CTX,
+ REG_NBIO(RSMU_INDEX), REG_NBIO(RSMU_DATA),
+ mmMP1_C2PMSG_3, msg_id);
+ read_back_data = generic_read_indirect_reg(CTX,
+ REG_NBIO(RSMU_INDEX), REG_NBIO(RSMU_DATA),
+ mmMP1_C2PMSG_3);
+ if (read_back_data == msg_id)
+ break;
+ udelay(2);
+ smu_print("SMU msg id write fail %x times. \n", i + 1);
+ }
result = dcn315_smu_wait_for_response(clk_mgr, 10, 200000);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 566d7045b2de..3a9077b60029 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -2073,12 +2073,12 @@ enum dc_status dc_commit_streams(struct dc *dc,
}
}
- /* Check for case where we are going from odm 2:1 to max
- * pipe scenario. For these cases, we will call
- * commit_minimal_transition_state() to exit out of odm 2:1
- * first before processing new streams
+ /* ODM Combine 2:1 power optimization is only applied for single stream
+ * scenario, it uses extra pipes than needed to reduce power consumption
+ * We need to switch off this feature to make room for new streams.
*/
- if (stream_count == dc->res_pool->pipe_count) {
+ if (stream_count > dc->current_state->stream_count &&
+ dc->current_state->stream_count == 1) {
for (i = 0; i < dc->res_pool->pipe_count; i++) {
pipe = &dc->current_state->res_ctx.pipe_ctx[i];
if (pipe->next_odm_pipe)
@@ -3501,6 +3501,45 @@ static void commit_planes_for_stream_fast(struct dc *dc,
top_pipe_to_program->stream->update_flags.raw = 0;
}
+static void wait_for_outstanding_hw_updates(struct dc *dc, const struct dc_state *dc_context)
+{
+/*
+ * This function calls HWSS to wait for any potentially double buffered
+ * operations to complete. It should be invoked as a pre-amble prior
+ * to full update programming before asserting any HW locks.
+ */
+ int pipe_idx;
+ int opp_inst;
+ int opp_count = dc->res_pool->pipe_count;
+ struct hubp *hubp;
+ int mpcc_inst;
+ const struct pipe_ctx *pipe_ctx;
+
+ for (pipe_idx = 0; pipe_idx < dc->res_pool->pipe_count; pipe_idx++) {
+ pipe_ctx = &dc_context->res_ctx.pipe_ctx[pipe_idx];
+
+ if (!pipe_ctx->stream)
+ continue;
+
+ if (pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear)
+ pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear(pipe_ctx->stream_res.tg);
+
+ hubp = pipe_ctx->plane_res.hubp;
+ if (!hubp)
+ continue;
+
+ mpcc_inst = hubp->inst;
+ // MPCC inst is equal to pipe index in practice
+ for (opp_inst = 0; opp_inst < opp_count; opp_inst++) {
+ if (dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst]) {
+ dc->res_pool->mpc->funcs->wait_for_idle(dc->res_pool->mpc, mpcc_inst);
+ dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst] = false;
+ break;
+ }
+ }
+ }
+}
+
static void commit_planes_for_stream(struct dc *dc,
struct dc_surface_update *srf_updates,
int surface_count,
@@ -3519,24 +3558,9 @@ static void commit_planes_for_stream(struct dc *dc,
// dc->current_state anymore, so we have to cache it before we apply
// the new SubVP context
subvp_prev_use = false;
-
-
dc_z10_restore(dc);
-
- if (update_type == UPDATE_TYPE_FULL) {
- /* wait for all double-buffer activity to clear on all pipes */
- int pipe_idx;
-
- for (pipe_idx = 0; pipe_idx < dc->res_pool->pipe_count; pipe_idx++) {
- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[pipe_idx];
-
- if (!pipe_ctx->stream)
- continue;
-
- if (pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear)
- pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear(pipe_ctx->stream_res.tg);
- }
- }
+ if (update_type == UPDATE_TYPE_FULL)
+ wait_for_outstanding_hw_updates(dc, context);
if (update_type == UPDATE_TYPE_FULL) {
dc_allow_idle_optimizations(dc, false);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index 65fa9e21ad9c..e72f15ac0048 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -1106,29 +1106,6 @@ void dcn20_blank_pixel_data(
v_active,
offset);
- if (!blank && dc->debug.enable_single_display_2to1_odm_policy) {
- /* when exiting dynamic ODM need to reinit DPG state for unused pipes */
- struct pipe_ctx *old_odm_pipe = dc->current_state->res_ctx.pipe_ctx[pipe_ctx->pipe_idx].next_odm_pipe;
-
- odm_pipe = pipe_ctx->next_odm_pipe;
-
- while (old_odm_pipe) {
- if (!odm_pipe || old_odm_pipe->pipe_idx != odm_pipe->pipe_idx)
- dc->hwss.set_disp_pattern_generator(dc,
- old_odm_pipe,
- CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
- CONTROLLER_DP_COLOR_SPACE_UDEFINED,
- COLOR_DEPTH_888,
- NULL,
- 0,
- 0,
- 0);
- old_odm_pipe = old_odm_pipe->next_odm_pipe;
- if (odm_pipe)
- odm_pipe = odm_pipe->next_odm_pipe;
- }
- }
-
if (!blank)
if (stream_res->abm) {
dc->hwss.set_pipe(pipe_ctx);
@@ -1584,17 +1561,6 @@ static void dcn20_update_dchubp_dpp(
|| plane_state->update_flags.bits.global_alpha_change
|| plane_state->update_flags.bits.per_pixel_alpha_change) {
// MPCC inst is equal to pipe index in practice
- int mpcc_inst = hubp->inst;
- int opp_inst;
- int opp_count = dc->res_pool->pipe_count;
-
- for (opp_inst = 0; opp_inst < opp_count; opp_inst++) {
- if (dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst]) {
- dc->res_pool->mpc->funcs->wait_for_idle(dc->res_pool->mpc, mpcc_inst);
- dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst] = false;
- break;
- }
- }
hws->funcs.update_mpcc(dc, pipe_ctx);
}
@@ -1722,11 +1688,16 @@ static void dcn20_program_pipe(
struct dc_state *context)
{
struct dce_hwseq *hws = dc->hwseq;
- /* Only need to unblank on top pipe */
- if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->stream->update_flags.bits.abm_level)
- && !pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe)
- hws->funcs.blank_pixel_data(dc, pipe_ctx, !pipe_ctx->plane_state->visible);
+ /* Only need to unblank on top pipe */
+ if (resource_is_pipe_type(pipe_ctx, OTG_MASTER)) {
+ if (pipe_ctx->update_flags.bits.enable ||
+ pipe_ctx->update_flags.bits.odm ||
+ pipe_ctx->stream->update_flags.bits.abm_level)
+ hws->funcs.blank_pixel_data(dc, pipe_ctx,
+ !pipe_ctx->plane_state ||
+ !pipe_ctx->plane_state->visible);
+ }
/* Only update TG on top pipe */
if (pipe_ctx->update_flags.bits.global_sync && !pipe_ctx->top_pipe
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
index 6cef62d7a2e5..255713ec29bb 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
@@ -987,3 +987,20 @@ void dcn30_prepare_bandwidth(struct dc *dc,
}
}
+void dcn30_set_static_screen_control(struct pipe_ctx **pipe_ctx,
+ int num_pipes, const struct dc_static_screen_params *params)
+{
+ unsigned int i;
+ unsigned int triggers = 0;
+
+ if (params->triggers.surface_update)
+ triggers |= 0x100;
+ if (params->triggers.cursor_update)
+ triggers |= 0x8;
+ if (params->triggers.force_trigger)
+ triggers |= 0x1;
+
+ for (i = 0; i < num_pipes; i++)
+ pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(pipe_ctx[i]->stream_res.tg,
+ triggers, params->num_frames);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.h
index a24a8e33a3d2..ce19c54097f8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.h
@@ -87,5 +87,7 @@ void dcn30_set_hubp_blank(const struct dc *dc,
void dcn30_prepare_bandwidth(struct dc *dc,
struct dc_state *context);
+void dcn30_set_static_screen_control(struct pipe_ctx **pipe_ctx,
+ int num_pipes, const struct dc_static_screen_params *params);
#endif /* __DC_HWSS_DCN30_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
index 3d19acaa12f3..0de8b2783cf6 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
@@ -64,7 +64,7 @@ static const struct hw_sequencer_funcs dcn30_funcs = {
.update_bandwidth = dcn20_update_bandwidth,
.set_drr = dcn10_set_drr,
.get_position = dcn10_get_position,
- .set_static_screen_control = dcn10_set_static_screen_control,
+ .set_static_screen_control = dcn30_set_static_screen_control,
.setup_stereo = dcn10_setup_stereo,
.set_avmute = dcn30_set_avmute,
.log_hw_state = dcn10_log_hw_state,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c
index 257df8660b4c..61205cdbe2d5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c
@@ -75,6 +75,7 @@ static const struct hw_sequencer_funcs dcn301_funcs = {
.get_hw_state = dcn10_get_hw_state,
.clear_status_bits = dcn10_clear_status_bits,
.wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect,
+ .edp_backlight_control = dce110_edp_backlight_control,
.edp_power_control = dce110_edp_power_control,
.edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready,
.set_cursor_position = dcn10_set_cursor_position,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
index fc25cc300a17..1d7bc1e39afe 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
@@ -67,7 +67,7 @@ static const struct hw_sequencer_funcs dcn31_funcs = {
.update_bandwidth = dcn20_update_bandwidth,
.set_drr = dcn10_set_drr,
.get_position = dcn10_get_position,
- .set_static_screen_control = dcn10_set_static_screen_control,
+ .set_static_screen_control = dcn30_set_static_screen_control,
.setup_stereo = dcn10_setup_stereo,
.set_avmute = dcn30_set_avmute,
.log_hw_state = dcn10_log_hw_state,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c
index ca8fe55c33b8..4ef85c3a0688 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c
@@ -69,7 +69,7 @@ static const struct hw_sequencer_funcs dcn314_funcs = {
.update_bandwidth = dcn20_update_bandwidth,
.set_drr = dcn10_set_drr,
.get_position = dcn10_get_position,
- .set_static_screen_control = dcn10_set_static_screen_control,
+ .set_static_screen_control = dcn30_set_static_screen_control,
.setup_stereo = dcn10_setup_stereo,
.set_avmute = dcn30_set_avmute,
.log_hw_state = dcn10_log_hw_state,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c
index 777b2fac20c4..c7417147dff1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c
@@ -65,7 +65,7 @@ static const struct hw_sequencer_funcs dcn32_funcs = {
.update_bandwidth = dcn20_update_bandwidth,
.set_drr = dcn10_set_drr,
.get_position = dcn10_get_position,
- .set_static_screen_control = dcn10_set_static_screen_control,
+ .set_static_screen_control = dcn30_set_static_screen_control,
.setup_stereo = dcn10_setup_stereo,
.set_avmute = dcn30_set_avmute,
.log_hw_state = dcn10_log_hw_state,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
index 935cd23e6a01..f9d601c8c721 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
@@ -2564,18 +2564,128 @@ static int find_optimal_free_pipe_as_secondary_dpp_pipe(
return free_pipe_idx;
}
+static struct pipe_ctx *find_idle_secondary_pipe_check_mpo(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ const struct pipe_ctx *primary_pipe)
+{
+ int i;
+ struct pipe_ctx *secondary_pipe = NULL;
+ struct pipe_ctx *next_odm_mpo_pipe = NULL;
+ int primary_index, preferred_pipe_idx;
+ struct pipe_ctx *old_primary_pipe = NULL;
+
+ /*
+ * Modified from find_idle_secondary_pipe
+ * With windowed MPO and ODM, we want to avoid the case where we want a
+ * free pipe for the left side but the free pipe is being used on the
+ * right side.
+ * Add check on current_state if the primary_pipe is the left side,
+ * to check the right side ( primary_pipe->next_odm_pipe ) to see if
+ * it is using a pipe for MPO ( primary_pipe->next_odm_pipe->bottom_pipe )
+ * - If so, then don't use this pipe
+ * EXCEPTION - 3 plane ( 2 MPO plane ) case
+ * - in this case, the primary pipe has already gotten a free pipe for the
+ * MPO window in the left
+ * - when it tries to get a free pipe for the MPO window on the right,
+ * it will see that it is already assigned to the right side
+ * ( primary_pipe->next_odm_pipe ). But in this case, we want this
+ * free pipe, since it will be for the right side. So add an
+ * additional condition, that skipping the free pipe on the right only
+ * applies if the primary pipe has no bottom pipe currently assigned
+ */
+ if (primary_pipe) {
+ primary_index = primary_pipe->pipe_idx;
+ old_primary_pipe = &primary_pipe->stream->ctx->dc->current_state->res_ctx.pipe_ctx[primary_index];
+ if ((old_primary_pipe->next_odm_pipe) && (old_primary_pipe->next_odm_pipe->bottom_pipe)
+ && (!primary_pipe->bottom_pipe))
+ next_odm_mpo_pipe = old_primary_pipe->next_odm_pipe->bottom_pipe;
+
+ preferred_pipe_idx = (pool->pipe_count - 1) - primary_pipe->pipe_idx;
+ if ((res_ctx->pipe_ctx[preferred_pipe_idx].stream == NULL) &&
+ !(next_odm_mpo_pipe && next_odm_mpo_pipe->pipe_idx == preferred_pipe_idx)) {
+ secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx];
+ secondary_pipe->pipe_idx = preferred_pipe_idx;
+ }
+ }
+
+ /*
+ * search backwards for the second pipe to keep pipe
+ * assignment more consistent
+ */
+ if (!secondary_pipe)
+ for (i = pool->pipe_count - 1; i >= 0; i--) {
+ if ((res_ctx->pipe_ctx[i].stream == NULL) &&
+ !(next_odm_mpo_pipe && next_odm_mpo_pipe->pipe_idx == i)) {
+ secondary_pipe = &res_ctx->pipe_ctx[i];
+ secondary_pipe->pipe_idx = i;
+ break;
+ }
+ }
+
+ return secondary_pipe;
+}
+
+static struct pipe_ctx *dcn32_acquire_idle_pipe_for_head_pipe_in_layer(
+ struct dc_state *state,
+ const struct resource_pool *pool,
+ struct dc_stream_state *stream,
+ const struct pipe_ctx *head_pipe)
+{
+ struct resource_context *res_ctx = &state->res_ctx;
+ struct pipe_ctx *idle_pipe, *pipe;
+ struct resource_context *old_ctx = &stream->ctx->dc->current_state->res_ctx;
+ int head_index;
+
+ if (!head_pipe)
+ ASSERT(0);
+
+ /*
+ * Modified from dcn20_acquire_idle_pipe_for_layer
+ * Check if head_pipe in old_context already has bottom_pipe allocated.
+ * - If so, check if that pipe is available in the current context.
+ * -- If so, reuse pipe from old_context
+ */
+ head_index = head_pipe->pipe_idx;
+ pipe = &old_ctx->pipe_ctx[head_index];
+ if (pipe->bottom_pipe && res_ctx->pipe_ctx[pipe->bottom_pipe->pipe_idx].stream == NULL) {
+ idle_pipe = &res_ctx->pipe_ctx[pipe->bottom_pipe->pipe_idx];
+ idle_pipe->pipe_idx = pipe->bottom_pipe->pipe_idx;
+ } else {
+ idle_pipe = find_idle_secondary_pipe_check_mpo(res_ctx, pool, head_pipe);
+ if (!idle_pipe)
+ return NULL;
+ }
+
+ idle_pipe->stream = head_pipe->stream;
+ idle_pipe->stream_res.tg = head_pipe->stream_res.tg;
+ idle_pipe->stream_res.opp = head_pipe->stream_res.opp;
+
+ idle_pipe->plane_res.hubp = pool->hubps[idle_pipe->pipe_idx];
+ idle_pipe->plane_res.ipp = pool->ipps[idle_pipe->pipe_idx];
+ idle_pipe->plane_res.dpp = pool->dpps[idle_pipe->pipe_idx];
+ idle_pipe->plane_res.mpcc_inst = pool->dpps[idle_pipe->pipe_idx]->inst;
+
+ return idle_pipe;
+}
+
struct pipe_ctx *dcn32_acquire_free_pipe_as_secondary_dpp_pipe(
const struct dc_state *cur_ctx,
struct dc_state *new_ctx,
const struct resource_pool *pool,
const struct pipe_ctx *opp_head_pipe)
{
- int free_pipe_idx =
- find_optimal_free_pipe_as_secondary_dpp_pipe(
- &cur_ctx->res_ctx, &new_ctx->res_ctx,
- pool, opp_head_pipe);
+
+ int free_pipe_idx;
struct pipe_ctx *free_pipe;
+ if (!opp_head_pipe->stream->ctx->dc->config.enable_windowed_mpo_odm)
+ return dcn32_acquire_idle_pipe_for_head_pipe_in_layer(
+ new_ctx, pool, opp_head_pipe->stream, opp_head_pipe);
+
+ free_pipe_idx = find_optimal_free_pipe_as_secondary_dpp_pipe(
+ &cur_ctx->res_ctx, &new_ctx->res_ctx,
+ pool, opp_head_pipe);
if (free_pipe_idx >= 0) {
free_pipe = &new_ctx->res_ctx.pipe_ctx[free_pipe_idx];
free_pipe->pipe_idx = free_pipe_idx;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
index 8afda5ecc0cd..5805fb02af14 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
@@ -1099,6 +1099,11 @@ void dcn20_calculate_dlg_params(struct dc *dc,
context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz =
pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000;
context->res_ctx.pipe_ctx[i].pipe_dlg_param = pipes[pipe_idx].pipe.dest;
+ if (dc->ctx->dce_version < DCN_VERSION_3_1 &&
+ context->res_ctx.pipe_ctx[i].stream->adaptive_sync_infopacket.valid)
+ dcn20_adjust_freesync_v_startup(
+ &context->res_ctx.pipe_ctx[i].stream->timing,
+ &context->res_ctx.pipe_ctx[i].pipe_dlg_param.vstartup_start);
pipe_idx++;
}
@@ -1927,7 +1932,6 @@ static bool dcn20_validate_bandwidth_internal(struct dc *dc, struct dc_state *co
int vlevel = 0;
int pipe_split_from[MAX_PIPES];
int pipe_cnt = 0;
- int i = 0;
display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_ATOMIC);
DC_LOGGER_INIT(dc->ctx->logger);
@@ -1951,15 +1955,6 @@ static bool dcn20_validate_bandwidth_internal(struct dc *dc, struct dc_state *co
dcn20_calculate_wm(dc, context, pipes, &pipe_cnt, pipe_split_from, vlevel, fast_validate);
dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- if (!context->res_ctx.pipe_ctx[i].stream)
- continue;
- if (context->res_ctx.pipe_ctx[i].stream->adaptive_sync_infopacket.valid)
- dcn20_adjust_freesync_v_startup(
- &context->res_ctx.pipe_ctx[i].stream->timing,
- &context->res_ctx.pipe_ctx[i].pipe_dlg_param.vstartup_start);
- }
-
BW_VAL_TRACE_END_WATERMARKS();
goto validate_out;
@@ -2232,7 +2227,6 @@ bool dcn21_validate_bandwidth_fp(struct dc *dc,
int vlevel = 0;
int pipe_split_from[MAX_PIPES];
int pipe_cnt = 0;
- int i = 0;
display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_ATOMIC);
DC_LOGGER_INIT(dc->ctx->logger);
@@ -2261,15 +2255,6 @@ bool dcn21_validate_bandwidth_fp(struct dc *dc,
dcn21_calculate_wm(dc, context, pipes, &pipe_cnt, pipe_split_from, vlevel, fast_validate);
dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- if (!context->res_ctx.pipe_ctx[i].stream)
- continue;
- if (context->res_ctx.pipe_ctx[i].stream->adaptive_sync_infopacket.valid)
- dcn20_adjust_freesync_v_startup(
- &context->res_ctx.pipe_ctx[i].stream->timing,
- &context->res_ctx.pipe_ctx[i].pipe_dlg_param.vstartup_start);
- }
-
BW_VAL_TRACE_END_WATERMARKS();
goto validate_out;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
index 07adb614366e..fb21572750e8 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
@@ -293,6 +293,17 @@ static unsigned int micro_sec_to_vert_lines(unsigned int num_us, struct dc_crtc_
return num_lines;
}
+static unsigned int get_vertical_back_porch(struct dc_crtc_timing *timing)
+{
+ unsigned int v_active = 0, v_blank = 0, v_back_porch = 0;
+
+ v_active = timing->v_border_top + timing->v_addressable + timing->v_border_bottom;
+ v_blank = timing->v_total - v_active;
+ v_back_porch = v_blank - timing->v_front_porch - timing->v_sync_width;
+
+ return v_back_porch;
+}
+
int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
bool fast_validate)
@@ -310,6 +321,7 @@ int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *c
for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
struct dc_crtc_timing *timing;
unsigned int num_lines = 0;
+ unsigned int v_back_porch = 0;
if (!res_ctx->pipe_ctx[i].stream)
continue;
@@ -323,9 +335,16 @@ int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *c
else
pipes[pipe_cnt].pipe.dest.vtotal = timing->v_total;
+ v_back_porch = get_vertical_back_porch(timing);
+
pipes[pipe_cnt].pipe.dest.vblank_nom = timing->v_total - pipes[pipe_cnt].pipe.dest.vactive;
pipes[pipe_cnt].pipe.dest.vblank_nom = min(pipes[pipe_cnt].pipe.dest.vblank_nom, num_lines);
- pipes[pipe_cnt].pipe.dest.vblank_nom = max(pipes[pipe_cnt].pipe.dest.vblank_nom, timing->v_sync_width);
+ // vblank_nom should not smaller than (VSync (timing->v_sync_width + v_back_porch) + 2)
+ // + 2 is because
+ // 1 -> VStartup_start should be 1 line before VSync
+ // 1 -> always reserve 1 line between start of vblank to vstartup signal
+ pipes[pipe_cnt].pipe.dest.vblank_nom =
+ max(pipes[pipe_cnt].pipe.dest.vblank_nom, timing->v_sync_width + v_back_porch + 2);
pipes[pipe_cnt].pipe.dest.vblank_nom = min(pipes[pipe_cnt].pipe.dest.vblank_nom, max_allowed_vblank_nom);
if (pipe->plane_state &&
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index dbd60811f95d..ef3a67409021 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -338,7 +338,9 @@ static void apply_below_the_range(struct core_freesync *core_freesync,
* - Delta for CEIL: delta_from_mid_point_in_us_1
* - Delta for FLOOR: delta_from_mid_point_in_us_2
*/
- if ((last_render_time_in_us / mid_point_frames_ceil) < in_out_vrr->min_duration_in_us) {
+ if (mid_point_frames_ceil &&
+ (last_render_time_in_us / mid_point_frames_ceil) <
+ in_out_vrr->min_duration_in_us) {
/* Check for out of range.
* If using CEIL produces a value that is out of range,
* then we are forced to use FLOOR.
@@ -385,8 +387,9 @@ static void apply_below_the_range(struct core_freesync *core_freesync,
/* Either we've calculated the number of frames to insert,
* or we need to insert min duration frames
*/
- if (last_render_time_in_us / frames_to_insert <
- in_out_vrr->min_duration_in_us){
+ if (frames_to_insert &&
+ (last_render_time_in_us / frames_to_insert) <
+ in_out_vrr->min_duration_in_us){
frames_to_insert -= (frames_to_insert > 1) ?
1 : 0;
}
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index abe829bbd54a..67d7b7ee8a2a 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -240,6 +240,7 @@ enum DC_FEATURE_MASK {
DC_DISABLE_LTTPR_DP2_0 = (1 << 6), //0x40, disabled by default
DC_PSR_ALLOW_SMU_OPT = (1 << 7), //0x80, disabled by default
DC_PSR_ALLOW_MULTI_DISP_OPT = (1 << 8), //0x100, disabled by default
+ DC_REPLAY_MASK = (1 << 9), //0x200, disabled by default for dcn < 3.1.4
};
enum DC_DEBUG_MASK {
@@ -250,6 +251,7 @@ enum DC_DEBUG_MASK {
DC_DISABLE_PSR = 0x10,
DC_FORCE_SUBVP_MCLK_SWITCH = 0x20,
DC_DISABLE_MPO = 0x40,
+ DC_DISABLE_REPLAY = 0x50,
DC_ENABLE_DPIA_TRACE = 0x80,
};
diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
index e68c1e280322..fa7d6ced786f 100644
--- a/drivers/gpu/drm/amd/include/atomfirmware.h
+++ b/drivers/gpu/drm/amd/include/atomfirmware.h
@@ -3117,6 +3117,24 @@ enum atom_umc_config1_def {
UMC_CONFIG1__ENABLE_ECC_CAPABLE = 0x00010000,
};
+struct atom_umc_info_v4_0 {
+ struct atom_common_table_header table_header;
+ uint32_t ucode_reserved[5];
+ uint8_t umcip_min_ver;
+ uint8_t umcip_max_ver;
+ uint8_t vram_type;
+ uint8_t umc_config;
+ uint32_t mem_refclk_10khz;
+ uint32_t clk_reserved[4];
+ uint32_t golden_reserved;
+ uint32_t umc_config1;
+ uint32_t reserved[2];
+ uint8_t channel_num;
+ uint8_t channel_width;
+ uint8_t channel_reserve[2];
+ uint8_t umc_info_reserved[16];
+};
+
/*
***************************************************************************
Data Table vram_info structure
diff --git a/drivers/gpu/drm/amd/include/discovery.h b/drivers/gpu/drm/amd/include/discovery.h
index f43e29722ef7..7a9d473d0917 100644
--- a/drivers/gpu/drm/amd/include/discovery.h
+++ b/drivers/gpu/drm/amd/include/discovery.h
@@ -30,7 +30,7 @@
#define GC_TABLE_ID 0x4347
#define HARVEST_TABLE_SIGNATURE 0x56524148
#define VCN_INFO_TABLE_ID 0x004E4356
-#define MALL_INFO_TABLE_ID 0x4D414C4C
+#define MALL_INFO_TABLE_ID 0x4C4C414D
typedef enum
{
@@ -280,6 +280,36 @@ struct gc_info_v2_0 {
uint32_t gc_num_packer_per_sc;
};
+struct gc_info_v2_1 {
+ struct gpu_info_header header;
+
+ uint32_t gc_num_se;
+ uint32_t gc_num_cu_per_sh;
+ uint32_t gc_num_sh_per_se;
+ uint32_t gc_num_rb_per_se;
+ uint32_t gc_num_tccs;
+ uint32_t gc_num_gprs;
+ uint32_t gc_num_max_gs_thds;
+ uint32_t gc_gs_table_depth;
+ uint32_t gc_gsprim_buff_depth;
+ uint32_t gc_parameter_cache_depth;
+ uint32_t gc_double_offchip_lds_buffer;
+ uint32_t gc_wave_size;
+ uint32_t gc_max_waves_per_simd;
+ uint32_t gc_max_scratch_slots_per_cu;
+ uint32_t gc_lds_size;
+ uint32_t gc_num_sc_per_se;
+ uint32_t gc_num_packer_per_sc;
+ /* new for v2_1 */
+ uint32_t gc_num_tcp_per_sh;
+ uint32_t gc_tcp_size_per_cu;
+ uint32_t gc_num_sdp_interface;
+ uint32_t gc_num_cu_per_sqc;
+ uint32_t gc_instruction_cache_size_per_sqc;
+ uint32_t gc_scalar_data_cache_size_per_sqc;
+ uint32_t gc_tcc_size;
+};
+
typedef struct harvest_info_header {
uint32_t signature; /* Table Signature */
uint32_t version; /* Table Version */
@@ -312,6 +342,12 @@ struct mall_info_v1_0 {
uint32_t reserved[5];
};
+struct mall_info_v2_0 {
+ struct mall_info_header header;
+ uint32_t mall_size_per_umc;
+ uint32_t reserved[8];
+};
+
#define VCN_INFO_TABLE_MAX_NUM_INSTANCES 4
struct vcn_info_header {
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index 5b1d73b00ef7..41147da54458 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -3311,8 +3311,10 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
(gc_ver != IP_VERSION(9, 4, 3)) &&
(attr == &sensor_dev_attr_temp2_input.dev_attr.attr ||
attr == &sensor_dev_attr_temp2_label.dev_attr.attr ||
+ attr == &sensor_dev_attr_temp2_crit.dev_attr.attr ||
attr == &sensor_dev_attr_temp3_input.dev_attr.attr ||
- attr == &sensor_dev_attr_temp3_label.dev_attr.attr))
+ attr == &sensor_dev_attr_temp3_label.dev_attr.attr ||
+ attr == &sensor_dev_attr_temp3_crit.dev_attr.attr))
return 0;
/* hotspot temperature for gc 9,4,3*/
@@ -3324,9 +3326,7 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
/* only SOC15 dGPUs support hotspot and mem temperatures */
if (((adev->flags & AMD_IS_APU) || gc_ver < IP_VERSION(9, 0, 0) ||
(gc_ver == IP_VERSION(9, 4, 3))) &&
- (attr == &sensor_dev_attr_temp2_crit.dev_attr.attr ||
- attr == &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr ||
- attr == &sensor_dev_attr_temp3_crit.dev_attr.attr ||
+ (attr == &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr ||
attr == &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr ||
attr == &sensor_dev_attr_temp1_emergency.dev_attr.attr ||
attr == &sensor_dev_attr_temp2_emergency.dev_attr.attr ||
@@ -3471,6 +3471,9 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a
size = sizeof(uint32_t);
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_AVG_POWER, (void *)&query, &size))
seq_printf(m, "\t%u.%u W (average GPU)\n", query >> 8, query & 0xff);
+ size = sizeof(uint32_t);
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_INPUT_POWER, (void *)&query, &size))
+ seq_printf(m, "\t%u.%u W (current GPU)\n", query >> 8, query & 0xff);
size = sizeof(value);
seq_printf(m, "\n");
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
index 95eb8a5eb54f..5a52098bcf16 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
@@ -1031,10 +1031,7 @@ struct pptable_funcs {
enum smu_feature_mask mask);
/**
- * @notify_display_change: Enable fast memory clock switching.
- *
- * Allows for fine grained memory clock switching but has more stringent
- * timing requirements.
+ * @notify_display_change: General interface call to let SMU know about DC change
*/
int (*notify_display_change)(struct smu_context *smu);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_0_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_0_ppsmc.h
index 10cff75b44d5..e2ee855c7748 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_0_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_0_ppsmc.h
@@ -138,7 +138,10 @@
#define PPSMC_MSG_SetBadMemoryPagesRetiredFlagsPerChannel 0x4A
#define PPSMC_MSG_SetPriorityDeltaGain 0x4B
#define PPSMC_MSG_AllowIHHostInterrupt 0x4C
-#define PPSMC_Message_Count 0x4D
+
+#define PPSMC_MSG_DALNotPresent 0x4E
+
+#define PPSMC_Message_Count 0x4F
//Debug Dump Message
#define DEBUGSMC_MSG_TestMessage 0x1
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h
index 252aef190c5c..9be4051c0865 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h
@@ -123,7 +123,7 @@ typedef enum {
VOLTAGE_GUARDBAND_COUNT
} GFX_GUARDBAND_e;
-#define SMU_METRICS_TABLE_VERSION 0x5
+#define SMU_METRICS_TABLE_VERSION 0x7
typedef struct __attribute__((packed, aligned(4))) {
uint32_t AccumulationCounter;
@@ -198,7 +198,7 @@ typedef struct __attribute__((packed, aligned(4))) {
uint32_t SocketThmResidencyAcc;
uint32_t VrThmResidencyAcc;
uint32_t HbmThmResidencyAcc;
- uint32_t spare;
+ uint32_t GfxLockXCDMak;
// New Items at end to maintain driver compatibility
uint32_t GfxclkFrequency[8];
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h
index ae4f44c4b877..70a4a717fd3f 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h
@@ -83,13 +83,27 @@
#define PPSMC_MSG_GetMinGfxDpmFreq 0x32
#define PPSMC_MSG_GetMaxGfxDpmFreq 0x33
#define PPSMC_MSG_PrepareForDriverUnload 0x34
-#define PPSMC_Message_Count 0x35
+#define PPSMC_MSG_ReadThrottlerLimit 0x35
+#define PPSMC_MSG_QueryValidMcaCount 0x36
+#define PPSMC_MSG_McaBankDumpDW 0x37
+#define PPSMC_MSG_GetCTFLimit 0x38
+#define PPSMC_Message_Count 0x39
//PPSMC Reset Types for driver msg argument
#define PPSMC_RESET_TYPE_DRIVER_MODE_1_RESET 0x1
#define PPSMC_RESET_TYPE_DRIVER_MODE_2_RESET 0x2
#define PPSMC_RESET_TYPE_DRIVER_MODE_3_RESET 0x3
+//PPSMC Reset Types for driver msg argument
+#define PPSMC_THROTTLING_LIMIT_TYPE_SOCKET 0x1
+#define PPSMC_THROTTLING_LIMIT_TYPE_HBM 0x2
+
+//CTF/Throttle Limit types
+#define PPSMC_AID_THM_TYPE 0x1
+#define PPSMC_CCD_THM_TYPE 0x2
+#define PPSMC_XCD_THM_TYPE 0x3
+#define PPSMC_HBM_THM_TYPE 0x4
+
typedef uint32_t PPSMC_Result;
typedef uint32_t PPSMC_MSG;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
index 297b70b9388f..e57265cf637c 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
@@ -84,6 +84,7 @@
__SMU_DUMMY_MAP(SetTjMax), \
__SMU_DUMMY_MAP(SetFanTemperatureTarget), \
__SMU_DUMMY_MAP(PrepareMp1ForUnload), \
+ __SMU_DUMMY_MAP(GetCTFLimit), \
__SMU_DUMMY_MAP(DramLogSetDramAddrHigh), \
__SMU_DUMMY_MAP(DramLogSetDramAddrLow), \
__SMU_DUMMY_MAP(DramLogSetDramSize), \
@@ -245,7 +246,8 @@
__SMU_DUMMY_MAP(AllowGpo), \
__SMU_DUMMY_MAP(Mode2Reset), \
__SMU_DUMMY_MAP(RequestI2cTransaction), \
- __SMU_DUMMY_MAP(GetMetricsTable),
+ __SMU_DUMMY_MAP(GetMetricsTable), \
+ __SMU_DUMMY_MAP(DALNotPresent),
#undef __SMU_DUMMY_MAP
#define __SMU_DUMMY_MAP(type) SMU_MSG_##type
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index f1282fc4b90a..0232adb95df3 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -837,12 +837,8 @@ int smu_v13_0_notify_display_change(struct smu_context *smu)
{
int ret = 0;
- if (!smu->pm_enabled)
- return ret;
-
- if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) &&
- smu->adev->gmc.vram_type == AMDGPU_VRAM_TYPE_HBM)
- ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1, NULL);
+ if (!amdgpu_device_has_dc_support(smu->adev))
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_DALNotPresent, NULL);
return ret;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index 8b7403ba89d7..3903a47669e4 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -162,6 +162,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_0_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(AllowGpo, PPSMC_MSG_SetGpoAllow, 0),
MSG_MAP(AllowIHHostInterrupt, PPSMC_MSG_AllowIHHostInterrupt, 0),
MSG_MAP(ReenableAcDcInterrupt, PPSMC_MSG_ReenableAcDcInterrupt, 0),
+ MSG_MAP(DALNotPresent, PPSMC_MSG_DALNotPresent, 0),
};
static struct cmn2asic_mapping smu_v13_0_0_clk_map[SMU_CLK_COUNT] = {
@@ -2687,6 +2688,7 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
.send_hbm_bad_channel_flag = smu_v13_0_0_send_bad_mem_channel_flag,
.gpo_control = smu_v13_0_gpo_control,
.get_ecc_info = smu_v13_0_0_get_ecc_info,
+ .notify_display_change = smu_v13_0_notify_display_change,
};
void smu_v13_0_0_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
index 6ed9cd0a1e4e..199a673b8120 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
@@ -132,6 +132,7 @@ static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COU
MSG_MAP(SetSoftMinGfxclk, PPSMC_MSG_SetSoftMinGfxClk, 0),
MSG_MAP(SetSoftMaxGfxClk, PPSMC_MSG_SetSoftMaxGfxClk, 0),
MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareForDriverUnload, 0),
+ MSG_MAP(GetCTFLimit, PPSMC_MSG_GetCTFLimit, 0),
};
static const struct cmn2asic_mapping smu_v13_0_6_clk_map[SMU_CLK_COUNT] = {
@@ -2081,6 +2082,55 @@ out:
return ret;
}
+static int smu_v13_0_6_get_thermal_temperature_range(struct smu_context *smu,
+ struct smu_temperature_range *range)
+{
+ struct amdgpu_device *adev = smu->adev;
+ u32 aid_temp, xcd_temp, mem_temp;
+ uint32_t smu_version;
+ u32 ccd_temp = 0;
+ int ret;
+
+ if (amdgpu_sriov_vf(smu->adev))
+ return 0;
+
+ if (!range)
+ return -EINVAL;
+
+ /*Check smu version, GetCtfLimit message only supported for smu version 85.69 or higher */
+ smu_cmn_get_smc_version(smu, NULL, &smu_version);
+ if (smu_version < 0x554500)
+ return 0;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetCTFLimit,
+ PPSMC_AID_THM_TYPE, &aid_temp);
+ if (ret)
+ goto failed;
+
+ if (adev->flags & AMD_IS_APU) {
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetCTFLimit,
+ PPSMC_CCD_THM_TYPE, &ccd_temp);
+ if (ret)
+ goto failed;
+ }
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetCTFLimit,
+ PPSMC_XCD_THM_TYPE, &xcd_temp);
+ if (ret)
+ goto failed;
+
+ range->hotspot_crit_max = max3(aid_temp, xcd_temp, ccd_temp) *
+ SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetCTFLimit,
+ PPSMC_HBM_THM_TYPE, &mem_temp);
+ if (ret)
+ goto failed;
+
+ range->mem_crit_max = mem_temp * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+failed:
+ return ret;
+}
+
static int smu_v13_0_6_mode1_reset(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
@@ -2108,8 +2158,7 @@ static int smu_v13_0_6_mode1_reset(struct smu_context *smu)
static bool smu_v13_0_6_is_mode1_reset_supported(struct smu_context *smu)
{
- /* TODO: Enable this when FW support is added */
- return false;
+ return true;
}
static bool smu_v13_0_6_is_mode2_reset_supported(struct smu_context *smu)
@@ -2177,6 +2226,7 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = {
.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
.set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
.get_gpu_metrics = smu_v13_0_6_get_gpu_metrics,
+ .get_thermal_temperature_range = smu_v13_0_6_get_thermal_temperature_range,
.mode1_reset_is_support = smu_v13_0_6_is_mode1_reset_supported,
.mode2_reset_is_support = smu_v13_0_6_is_mode2_reset_supported,
.mode1_reset = smu_v13_0_6_mode1_reset,
diff --git a/drivers/gpu/drm/ci/arm.config b/drivers/gpu/drm/ci/arm.config
new file mode 100644
index 000000000000..871f4de063ad
--- /dev/null
+++ b/drivers/gpu/drm/ci/arm.config
@@ -0,0 +1,69 @@
+CONFIG_LOCALVERSION_AUTO=y
+CONFIG_DEBUG_KERNEL=y
+
+CONFIG_CRYPTO_ZSTD=y
+CONFIG_ZRAM_MEMORY_TRACKING=y
+CONFIG_ZRAM_WRITEBACK=y
+CONFIG_ZRAM=y
+CONFIG_ZSMALLOC_STAT=y
+
+# abootimg with a 'dummy' rootfs fails with root=/dev/nfs
+CONFIG_BLK_DEV_INITRD=n
+
+CONFIG_DEVFREQ_GOV_PERFORMANCE=y
+CONFIG_DEVFREQ_GOV_POWERSAVE=y
+CONFIG_DEVFREQ_GOV_USERSPACE=y
+CONFIG_DEVFREQ_GOV_PASSIVE=y
+CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y
+
+CONFIG_DRM=y
+CONFIG_DRM_ETNAVIV=y
+CONFIG_DRM_ROCKCHIP=y
+CONFIG_DRM_PANFROST=y
+CONFIG_DRM_LIMA=y
+CONFIG_DRM_PANEL_SIMPLE=y
+CONFIG_PWM_CROS_EC=y
+CONFIG_BACKLIGHT_PWM=y
+
+CONFIG_ROCKCHIP_CDN_DP=n
+
+CONFIG_SPI_ROCKCHIP=y
+CONFIG_PWM_ROCKCHIP=y
+CONFIG_PHY_ROCKCHIP_DP=y
+CONFIG_DWMAC_ROCKCHIP=y
+
+CONFIG_MFD_RK808=y
+CONFIG_REGULATOR_RK808=y
+CONFIG_RTC_DRV_RK808=y
+CONFIG_COMMON_CLK_RK808=y
+
+CONFIG_REGULATOR_FAN53555=y
+CONFIG_REGULATOR=y
+
+CONFIG_REGULATOR_VCTRL=y
+
+CONFIG_KASAN=n
+CONFIG_KASAN_INLINE=n
+CONFIG_STACKTRACE=n
+
+CONFIG_TMPFS=y
+
+CONFIG_PROVE_LOCKING=n
+CONFIG_DEBUG_LOCKDEP=n
+CONFIG_SOFTLOCKUP_DETECTOR=n
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=n
+
+CONFIG_FW_LOADER_COMPRESS=y
+
+CONFIG_USB_USBNET=y
+CONFIG_NETDEVICES=y
+CONFIG_USB_NET_DRIVERS=y
+CONFIG_USB_RTL8152=y
+CONFIG_USB_NET_AX8817X=y
+CONFIG_USB_NET_SMSC95XX=y
+
+# TK1
+CONFIG_ARM_TEGRA_DEVFREQ=y
+
+# 32-bit build failure
+CONFIG_DRM_MSM=n
diff --git a/drivers/gpu/drm/ci/arm64.config b/drivers/gpu/drm/ci/arm64.config
new file mode 100644
index 000000000000..817e18ddfd4f
--- /dev/null
+++ b/drivers/gpu/drm/ci/arm64.config
@@ -0,0 +1,199 @@
+CONFIG_LOCALVERSION_AUTO=y
+CONFIG_DEBUG_KERNEL=y
+
+CONFIG_CRYPTO_ZSTD=y
+CONFIG_ZRAM_MEMORY_TRACKING=y
+CONFIG_ZRAM_WRITEBACK=y
+CONFIG_ZRAM=y
+CONFIG_ZSMALLOC_STAT=y
+
+# abootimg with a 'dummy' rootfs fails with root=/dev/nfs
+CONFIG_BLK_DEV_INITRD=n
+
+CONFIG_DEVFREQ_GOV_PERFORMANCE=y
+CONFIG_DEVFREQ_GOV_POWERSAVE=y
+CONFIG_DEVFREQ_GOV_USERSPACE=y
+CONFIG_DEVFREQ_GOV_PASSIVE=y
+
+CONFIG_DRM=y
+CONFIG_DRM_ROCKCHIP=y
+CONFIG_DRM_PANFROST=y
+CONFIG_DRM_LIMA=y
+CONFIG_DRM_PANEL_SIMPLE=y
+CONFIG_DRM_PANEL_EDP=y
+CONFIG_DRM_MSM=y
+CONFIG_DRM_ETNAVIV=y
+CONFIG_DRM_I2C_ADV7511=y
+CONFIG_PWM_CROS_EC=y
+CONFIG_BACKLIGHT_PWM=y
+
+CONFIG_ROCKCHIP_CDN_DP=n
+
+CONFIG_SPI_ROCKCHIP=y
+CONFIG_PWM_ROCKCHIP=y
+CONFIG_PHY_ROCKCHIP_DP=y
+CONFIG_DWMAC_ROCKCHIP=y
+CONFIG_STMMAC_ETH=y
+CONFIG_TYPEC_FUSB302=y
+CONFIG_TYPEC=y
+CONFIG_TYPEC_TCPM=y
+
+# MSM platform bits
+
+# For CONFIG_QCOM_LMH
+CONFIG_OF=y
+
+CONFIG_ARM_SMMU_QCOM=y
+CONFIG_QCOM_COMMAND_DB=y
+CONFIG_QCOM_RPMHPD=y
+CONFIG_QCOM_RPMPD=y
+CONFIG_QCOM_OCMEM=y
+CONFIG_SDM_GPUCC_845=y
+CONFIG_SDM_VIDEOCC_845=y
+CONFIG_SDM_DISPCC_845=y
+CONFIG_SDM_LPASSCC_845=y
+CONFIG_SDM_CAMCC_845=y
+CONFIG_RESET_QCOM_PDC=y
+CONFIG_DRM_TI_SN65DSI86=y
+CONFIG_I2C_QCOM_GENI=y
+CONFIG_SPI_QCOM_GENI=y
+CONFIG_PHY_QCOM_QUSB2=y
+CONFIG_PHY_QCOM_QMP=y
+CONFIG_MSM_GCC_8996=y
+CONFIG_QCOM_CLK_APCC_MSM8996=y
+CONFIG_QCOM_LLCC=y
+CONFIG_QCOM_LMH=y
+CONFIG_QCOM_SPMI_TEMP_ALARM=y
+CONFIG_QCOM_WDT=y
+CONFIG_POWER_RESET_QCOM_PON=y
+CONFIG_RTC_DRV_PM8XXX=y
+CONFIG_INTERCONNECT=y
+CONFIG_INTERCONNECT_QCOM=y
+CONFIG_INTERCONNECT_QCOM_MSM8996=y
+CONFIG_INTERCONNECT_QCOM_SDM845=y
+CONFIG_INTERCONNECT_QCOM_MSM8916=y
+CONFIG_INTERCONNECT_QCOM_MSM8996=y
+CONFIG_INTERCONNECT_QCOM_OSM_L3=y
+CONFIG_INTERCONNECT_QCOM_SC7180=y
+CONFIG_INTERCONNECT_QCOM_SM8350=y
+CONFIG_CRYPTO_DEV_QCOM_RNG=y
+CONFIG_SC_DISPCC_7180=y
+CONFIG_SC_GPUCC_7180=y
+CONFIG_SM_GPUCC_8350=y
+CONFIG_QCOM_SPMI_ADC5=y
+CONFIG_DRM_PARADE_PS8640=y
+CONFIG_DRM_LONTIUM_LT9611UXC=y
+CONFIG_PHY_QCOM_USB_HS=y
+CONFIG_QCOM_GPI_DMA=y
+CONFIG_USB_ONBOARD_HUB=y
+CONFIG_NVMEM_QCOM_QFPROM=y
+CONFIG_PHY_QCOM_USB_SNPS_FEMTO_V2=y
+
+
+# db410c ethernet
+CONFIG_USB_RTL8152=y
+# db820c ethernet
+CONFIG_ATL1C=y
+# Chromebooks ethernet
+CONFIG_USB_ONBOARD_HUB=y
+# 888 HDK ethernet
+CONFIG_USB_LAN78XX=y
+
+CONFIG_ARCH_ALPINE=n
+CONFIG_ARCH_BCM2835=y
+CONFIG_ARCH_BCM_IPROC=n
+CONFIG_ARCH_BERLIN=n
+CONFIG_ARCH_BRCMSTB=n
+CONFIG_ARCH_EXYNOS=n
+CONFIG_ARCH_K3=n
+CONFIG_ARCH_LAYERSCAPE=n
+CONFIG_ARCH_LG1K=n
+CONFIG_ARCH_HISI=n
+CONFIG_ARCH_MVEBU=n
+CONFIG_ARCH_SEATTLE=n
+CONFIG_ARCH_SYNQUACER=n
+CONFIG_ARCH_RENESAS=n
+CONFIG_ARCH_R8A774A1=n
+CONFIG_ARCH_R8A774C0=n
+CONFIG_ARCH_R8A7795=n
+CONFIG_ARCH_R8A7796=n
+CONFIG_ARCH_R8A77965=n
+CONFIG_ARCH_R8A77970=n
+CONFIG_ARCH_R8A77980=n
+CONFIG_ARCH_R8A77990=n
+CONFIG_ARCH_R8A77995=n
+CONFIG_ARCH_STRATIX10=n
+CONFIG_ARCH_TEGRA=n
+CONFIG_ARCH_SPRD=n
+CONFIG_ARCH_THUNDER=n
+CONFIG_ARCH_THUNDER2=n
+CONFIG_ARCH_UNIPHIER=n
+CONFIG_ARCH_VEXPRESS=n
+CONFIG_ARCH_XGENE=n
+CONFIG_ARCH_ZX=n
+CONFIG_ARCH_ZYNQMP=n
+
+# Strip out some stuff we don't need for graphics testing, to reduce
+# the build.
+CONFIG_CAN=n
+CONFIG_WIRELESS=n
+CONFIG_RFKILL=n
+CONFIG_WLAN=n
+
+CONFIG_REGULATOR_FAN53555=y
+CONFIG_REGULATOR=y
+
+CONFIG_REGULATOR_VCTRL=y
+
+CONFIG_KASAN=n
+CONFIG_KASAN_INLINE=n
+CONFIG_STACKTRACE=n
+
+CONFIG_TMPFS=y
+
+CONFIG_PROVE_LOCKING=n
+CONFIG_DEBUG_LOCKDEP=n
+CONFIG_SOFTLOCKUP_DETECTOR=y
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
+
+CONFIG_DETECT_HUNG_TASK=y
+
+CONFIG_FW_LOADER_COMPRESS=y
+CONFIG_FW_LOADER_USER_HELPER=n
+
+CONFIG_USB_USBNET=y
+CONFIG_NETDEVICES=y
+CONFIG_USB_NET_DRIVERS=y
+CONFIG_USB_RTL8152=y
+CONFIG_USB_NET_AX8817X=y
+CONFIG_USB_NET_SMSC95XX=y
+
+# For amlogic
+CONFIG_MESON_GXL_PHY=y
+CONFIG_MDIO_BUS_MUX_MESON_G12A=y
+CONFIG_DRM_MESON=y
+
+# For Mediatek
+CONFIG_DRM_MEDIATEK=y
+CONFIG_PWM_MEDIATEK=y
+CONFIG_DRM_MEDIATEK_HDMI=y
+CONFIG_GNSS=y
+CONFIG_GNSS_MTK_SERIAL=y
+CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_MTK=y
+CONFIG_MTK_DEVAPC=y
+CONFIG_PWM_MTK_DISP=y
+CONFIG_MTK_CMDQ=y
+
+# For nouveau. Note that DRM must be a module so that it's loaded after NFS is up to provide the firmware.
+CONFIG_ARCH_TEGRA=y
+CONFIG_DRM_NOUVEAU=m
+CONFIG_DRM_TEGRA=m
+CONFIG_R8169=y
+CONFIG_STAGING=y
+CONFIG_DRM_TEGRA_STAGING=y
+CONFIG_TEGRA_HOST1X=y
+CONFIG_ARM_TEGRA_DEVFREQ=y
+CONFIG_TEGRA_SOCTHERM=y
+CONFIG_DRM_TEGRA_DEBUG=y
+CONFIG_PWM_TEGRA=y
diff --git a/drivers/gpu/drm/ci/build-igt.sh b/drivers/gpu/drm/ci/build-igt.sh
new file mode 100644
index 000000000000..500fa4f5c30a
--- /dev/null
+++ b/drivers/gpu/drm/ci/build-igt.sh
@@ -0,0 +1,35 @@
+#!/bin/bash
+# SPDX-License-Identifier: MIT
+
+set -ex
+
+git clone https://gitlab.freedesktop.org/drm/igt-gpu-tools.git --single-branch --no-checkout
+cd igt-gpu-tools
+git checkout $IGT_VERSION
+
+if [[ "$KERNEL_ARCH" = "arm" ]]; then
+ . ../.gitlab-ci/container/create-cross-file.sh armhf
+ EXTRA_MESON_ARGS="--cross-file /cross_file-armhf.txt"
+fi
+
+MESON_OPTIONS="-Doverlay=disabled \
+ -Dchamelium=disabled \
+ -Dvalgrind=disabled \
+ -Dman=enabled \
+ -Dtests=enabled \
+ -Drunner=enabled \
+ -Dlibunwind=enabled \
+ -Dprefix=/igt"
+
+mkdir -p /igt
+meson build $MESON_OPTIONS $EXTRA_MESON_ARGS
+ninja -C build -j${FDO_CI_CONCURRENT:-4} || ninja -C build -j 1
+ninja -C build install
+
+mkdir -p artifacts/
+tar -cf artifacts/igt.tar /igt
+
+# Pass needed files to the test stage
+S3_ARTIFACT_NAME="igt.tar.gz"
+gzip -c artifacts/igt.tar > ${S3_ARTIFACT_NAME}
+ci-fairy s3cp --token-file "${CI_JOB_JWT_FILE}" ${S3_ARTIFACT_NAME} https://${PIPELINE_ARTIFACTS_BASE}/${KERNEL_ARCH}/${S3_ARTIFACT_NAME}
diff --git a/drivers/gpu/drm/ci/build.sh b/drivers/gpu/drm/ci/build.sh
new file mode 100644
index 000000000000..7b014287a041
--- /dev/null
+++ b/drivers/gpu/drm/ci/build.sh
@@ -0,0 +1,157 @@
+#!/bin/bash
+# SPDX-License-Identifier: MIT
+
+set -ex
+
+# Clean up stale rebases that GitLab might not have removed when reusing a checkout dir
+rm -rf .git/rebase-apply
+
+. .gitlab-ci/container/container_pre_build.sh
+
+# libssl-dev was uninstalled because it was considered an ephemeral package
+apt-get update
+apt-get install -y libssl-dev
+
+if [[ "$KERNEL_ARCH" = "arm64" ]]; then
+ GCC_ARCH="aarch64-linux-gnu"
+ DEBIAN_ARCH="arm64"
+ DEVICE_TREES="arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dtb"
+ DEVICE_TREES+=" arch/arm64/boot/dts/amlogic/meson-gxl-s805x-libretech-ac.dtb"
+ DEVICE_TREES+=" arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dtb"
+ DEVICE_TREES+=" arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dtb"
+ DEVICE_TREES+=" arch/arm64/boot/dts/qcom/apq8016-sbc.dtb"
+ DEVICE_TREES+=" arch/arm64/boot/dts/qcom/apq8096-db820c.dtb"
+ DEVICE_TREES+=" arch/arm64/boot/dts/amlogic/meson-g12b-a311d-khadas-vim3.dtb"
+ DEVICE_TREES+=" arch/arm64/boot/dts/mediatek/mt8173-elm-hana.dtb"
+ DEVICE_TREES+=" arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-juniper-sku16.dtb"
+ DEVICE_TREES+=" arch/arm64/boot/dts/mediatek/mt8192-asurada-spherion-r0.dtb"
+ DEVICE_TREES+=" arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-limozeen-nots-r5.dtb"
+elif [[ "$KERNEL_ARCH" = "arm" ]]; then
+ GCC_ARCH="arm-linux-gnueabihf"
+ DEBIAN_ARCH="armhf"
+ DEVICE_TREES="arch/arm/boot/dts/rockchip/rk3288-veyron-jaq.dtb"
+ DEVICE_TREES+=" arch/arm/boot/dts/allwinner/sun8i-h3-libretech-all-h3-cc.dtb"
+ DEVICE_TREES+=" arch/arm/boot/dts/nxp/imx/imx6q-cubox-i.dtb"
+ apt-get install -y libssl-dev:armhf
+else
+ GCC_ARCH="x86_64-linux-gnu"
+ DEBIAN_ARCH="x86_64"
+ DEVICE_TREES=""
+fi
+
+export ARCH=${KERNEL_ARCH}
+export CROSS_COMPILE="${GCC_ARCH}-"
+
+# The kernel doesn't like the gold linker (or the old lld in our debians).
+# Sneak in some override symlinks during kernel build until we can update
+# debian.
+mkdir -p ld-links
+for i in /usr/bin/*-ld /usr/bin/ld; do
+ i=$(basename $i)
+ ln -sf /usr/bin/$i.bfd ld-links/$i
+done
+
+NEWPATH=$(pwd)/ld-links
+export PATH=$NEWPATH:$PATH
+
+git config --global user.email "fdo@example.com"
+git config --global user.name "freedesktop.org CI"
+git config --global pull.rebase true
+
+# Try to merge fixes from target repo
+if [ "$(git ls-remote --exit-code --heads ${UPSTREAM_REPO} ${TARGET_BRANCH}-external-fixes)" ]; then
+ git pull ${UPSTREAM_REPO} ${TARGET_BRANCH}-external-fixes
+fi
+
+# Try to merge fixes from local repo if this isn't a merge request
+if [ -z "$CI_MERGE_REQUEST_PROJECT_PATH" ]; then
+ if [ "$(git ls-remote --exit-code --heads origin ${TARGET_BRANCH}-external-fixes)" ]; then
+ git pull origin ${TARGET_BRANCH}-external-fixes
+ fi
+fi
+
+for opt in $ENABLE_KCONFIGS; do
+ echo CONFIG_$opt=y >> drivers/gpu/drm/ci/${KERNEL_ARCH}.config
+done
+for opt in $DISABLE_KCONFIGS; do
+ echo CONFIG_$opt=n >> drivers/gpu/drm/ci/${KERNEL_ARCH}.config
+done
+
+if [[ -n "${MERGE_FRAGMENT}" ]]; then
+ ./scripts/kconfig/merge_config.sh ${DEFCONFIG} drivers/gpu/drm/ci/${MERGE_FRAGMENT}
+else
+ make `basename ${DEFCONFIG}`
+fi
+
+make ${KERNEL_IMAGE_NAME}
+
+mkdir -p /lava-files/
+for image in ${KERNEL_IMAGE_NAME}; do
+ cp arch/${KERNEL_ARCH}/boot/${image} /lava-files/.
+done
+
+if [[ -n ${DEVICE_TREES} ]]; then
+ make dtbs
+ cp ${DEVICE_TREES} /lava-files/.
+fi
+
+make modules
+mkdir -p install/modules/
+INSTALL_MOD_PATH=install/modules/ make modules_install
+
+if [[ ${DEBIAN_ARCH} = "arm64" ]]; then
+ make Image.lzma
+ mkimage \
+ -f auto \
+ -A arm \
+ -O linux \
+ -d arch/arm64/boot/Image.lzma \
+ -C lzma\
+ -b arch/arm64/boot/dts/qcom/sdm845-cheza-r3.dtb \
+ /lava-files/cheza-kernel
+ KERNEL_IMAGE_NAME+=" cheza-kernel"
+
+ # Make a gzipped copy of the Image for db410c.
+ gzip -k /lava-files/Image
+ KERNEL_IMAGE_NAME+=" Image.gz"
+fi
+
+# Pass needed files to the test stage
+mkdir -p install
+cp -rfv .gitlab-ci/* install/.
+cp -rfv install/common install/ci-common
+cp -rfv drivers/gpu/drm/ci/* install/.
+
+. .gitlab-ci/container/container_post_build.sh
+
+if [[ "$UPLOAD_TO_MINIO" = "1" ]]; then
+ xz -7 -c -T${FDO_CI_CONCURRENT:-4} vmlinux > /lava-files/vmlinux.xz
+ FILES_TO_UPLOAD="$KERNEL_IMAGE_NAME vmlinux.xz"
+
+ if [[ -n $DEVICE_TREES ]]; then
+ FILES_TO_UPLOAD="$FILES_TO_UPLOAD $(basename -a $DEVICE_TREES)"
+ fi
+
+ for f in $FILES_TO_UPLOAD; do
+ ci-fairy s3cp --token-file "${CI_JOB_JWT_FILE}" /lava-files/$f \
+ https://${PIPELINE_ARTIFACTS_BASE}/${DEBIAN_ARCH}/$f
+ done
+
+ S3_ARTIFACT_NAME="kernel-files.tar.zst"
+ tar --zstd -cf $S3_ARTIFACT_NAME install
+ ci-fairy s3cp --token-file "${CI_JOB_JWT_FILE}" ${S3_ARTIFACT_NAME} https://${PIPELINE_ARTIFACTS_BASE}/${DEBIAN_ARCH}/${S3_ARTIFACT_NAME}
+
+ echo "Download vmlinux.xz from https://${PIPELINE_ARTIFACTS_BASE}/${DEBIAN_ARCH}/vmlinux.xz"
+fi
+
+mkdir -p artifacts/install/lib
+mv install/* artifacts/install/.
+rm -rf artifacts/install/modules
+ln -s common artifacts/install/ci-common
+
+for image in ${KERNEL_IMAGE_NAME}; do
+ cp /lava-files/$image artifacts/install/.
+done
+
+tar -C artifacts -cf artifacts/install.tar install
+rm -rf artifacts/install
diff --git a/drivers/gpu/drm/ci/build.yml b/drivers/gpu/drm/ci/build.yml
new file mode 100644
index 000000000000..e6503f1c5927
--- /dev/null
+++ b/drivers/gpu/drm/ci/build.yml
@@ -0,0 +1,110 @@
+.build:
+ extends:
+ - .build-rules
+ stage: build
+ artifacts:
+ paths:
+ - artifacts
+ script:
+ - FDO_CI_CONCURRENT=${FDO_CI_CONCURRENT} bash drivers/gpu/drm/ci/build.sh
+
+.build:arm32:
+ extends:
+ - .build
+ - .use-debian/arm64_build
+ tags:
+ - aarch64
+ variables:
+ DEFCONFIG: "arch/arm/configs/multi_v7_defconfig"
+ KERNEL_IMAGE_NAME: "zImage"
+ KERNEL_ARCH: "arm"
+
+.build:arm64:
+ extends:
+ - .build
+ - .use-debian/arm64_build
+ tags:
+ - aarch64
+ variables:
+ DEFCONFIG: "arch/arm64/configs/defconfig"
+ KERNEL_IMAGE_NAME: "Image"
+ KERNEL_ARCH: "arm64"
+
+.build:x86_64:
+ extends:
+ - .build
+ - .use-debian/x86_64_build
+ variables:
+ DEFCONFIG: "arch/x86/configs/x86_64_defconfig"
+ KERNEL_IMAGE_NAME: "bzImage"
+ KERNEL_ARCH: "x86_64"
+
+
+# Build IGT for testing on devices
+
+igt:arm32:
+ extends: .build:arm32
+ script:
+ - FDO_CI_CONCURRENT=${FDO_CI_CONCURRENT} bash drivers/gpu/drm/ci/build-igt.sh
+
+igt:arm64:
+ extends: .build:arm64
+ script:
+ - FDO_CI_CONCURRENT=${FDO_CI_CONCURRENT} bash drivers/gpu/drm/ci/build-igt.sh
+
+igt:x86_64:
+ extends: .build:x86_64
+ script:
+ - FDO_CI_CONCURRENT=${FDO_CI_CONCURRENT} bash drivers/gpu/drm/ci/build-igt.sh
+
+# Build kernels for testing on devices
+
+testing:arm32:
+ extends: .build:arm32
+ variables:
+ # Would be good to have DEBUG_KMEMLEAK, but it doesn't work well with any of
+ # PROVE_LOCKING and KASAN as of 5.17.
+ #
+ # db410c and db820c don't boot with KASAN_INLINE, probably due to the kernel
+ # becoming too big for their bootloaders.
+ ENABLE_KCONFIGS: "PROVE_LOCKING DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT"
+ UPLOAD_TO_MINIO: 1
+ MERGE_FRAGMENT: arm.config
+
+testing:arm64:
+ extends: .build:arm64
+ variables:
+ # Would be good to have DEBUG_KMEMLEAK, but it doesn't work well with any of
+ # PROVE_LOCKING and KASAN as of 5.17.
+ #
+ # db410c and db820c don't boot with KASAN_INLINE, probably due to the kernel
+ # becoming too big for their bootloaders.
+ ENABLE_KCONFIGS: "PROVE_LOCKING DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT"
+ UPLOAD_TO_MINIO: 1
+ MERGE_FRAGMENT: arm64.config
+
+testing:x86_64:
+ extends: .build:x86_64
+ variables:
+ # Would be good to have DEBUG_KMEMLEAK, but it doesn't work well with any of
+ # PROVE_LOCKING and KASAN as of 5.17.
+ #
+ # db410c and db820c don't boot with KASAN_INLINE, probably due to the kernel
+ # becoming too big for their bootloaders.
+ ENABLE_KCONFIGS: "PROVE_LOCKING DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT"
+ UPLOAD_TO_MINIO: 1
+ MERGE_FRAGMENT: x86_64.config
+
+
+# Jobs for build-testing different configurations
+
+build:arm32:
+ extends: .build:arm32
+
+build-nodebugfs:arm64:
+ extends: .build:arm64
+ variables:
+ DISABLE_KCONFIGS: "DEBUG_FS"
+
+build:x86_64:
+ extends: .build:x86_64
diff --git a/drivers/gpu/drm/ci/check-patch.py b/drivers/gpu/drm/ci/check-patch.py
new file mode 100755
index 000000000000..a5f399a20e25
--- /dev/null
+++ b/drivers/gpu/drm/ci/check-patch.py
@@ -0,0 +1,57 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# check-patch.py: run checkpatch.pl across all commits in a branch
+#
+# Based on qemu/.gitlab-ci.d/check-patch.py
+#
+# Copyright (C) 2020 Red Hat, Inc.
+# Copyright (C) 2022 Collabora Ltd.
+
+import os
+import os.path
+import sys
+import subprocess
+
+repourl = "https://gitlab.freedesktop.org/%s.git" % os.environ["CI_MERGE_REQUEST_PROJECT_PATH"]
+
+# GitLab CI environment does not give us any direct info about the
+# base for the user's branch. We thus need to figure out a common
+# ancestor between the user's branch and current git master.
+os.environ["GIT_DEPTH"] = "1000"
+subprocess.call(["git", "remote", "remove", "check-patch"], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
+subprocess.check_call(["git", "remote", "add", "check-patch", repourl])
+subprocess.check_call(["git", "fetch", "check-patch", os.environ["CI_MERGE_REQUEST_TARGET_BRANCH_NAME"]],
+ stdout=subprocess.DEVNULL,
+ stderr=subprocess.DEVNULL)
+
+ancestor = subprocess.check_output(["git", "merge-base",
+ "check-patch/%s" % os.environ["CI_MERGE_REQUEST_TARGET_BRANCH_NAME"], "HEAD"],
+ universal_newlines=True)
+
+ancestor = ancestor.strip()
+
+log = subprocess.check_output(["git", "log", "--format=%H %s",
+ ancestor + "..."],
+ universal_newlines=True)
+
+subprocess.check_call(["git", "remote", "rm", "check-patch"])
+
+if log == "":
+ print("\nNo commits since %s, skipping checks\n" % ancestor)
+ sys.exit(0)
+
+errors = False
+
+print("\nChecking all commits since %s...\n" % ancestor, flush=True)
+
+ret = subprocess.run(["scripts/checkpatch.pl",
+ "--terse",
+ "--types", os.environ["CHECKPATCH_TYPES"],
+ "--git", ancestor + "..."])
+
+if ret.returncode != 0:
+ print(" ❌ FAIL one or more commits failed scripts/checkpatch.pl")
+ sys.exit(1)
+
+sys.exit(0)
diff --git a/drivers/gpu/drm/ci/container.yml b/drivers/gpu/drm/ci/container.yml
new file mode 100644
index 000000000000..9764e7921a4f
--- /dev/null
+++ b/drivers/gpu/drm/ci/container.yml
@@ -0,0 +1,65 @@
+.container:
+ variables:
+ CI_REPOSITORY_URL: ${DRM_CI_PROJECT_URL}.git # So ci-templates clones drm-ci instead of the repo to test
+ CI_COMMIT_SHA: ${DRM_CI_COMMIT_SHA}
+
+debian/x86_64_build-base:
+ variables:
+ EXTRA_LOCAL_PACKAGES: "libcairo-dev libdw-dev libjson-c-dev libkmod2 libkmod-dev libpciaccess-dev libproc2-dev libudev-dev libunwind-dev python3-docutils bc python3-ply libssl-dev bc"
+
+debian/x86_64_test-gl:
+ variables:
+ EXTRA_LOCAL_PACKAGES: "jq libasound2 libcairo2 libdw1 libglib2.0-0 libjson-c5 libkmod-dev libkmod2 libgles2 libproc2-dev"
+
+debian/arm64_build:
+ variables:
+ EXTRA_LOCAL_PACKAGES: "libcairo-dev libdw-dev libjson-c-dev libproc2-dev libkmod2 libkmod-dev libpciaccess-dev libudev-dev libunwind-dev python3-docutils libssl-dev crossbuild-essential-armhf libkmod-dev:armhf libproc2-dev:armhf libunwind-dev:armhf libdw-dev:armhf libpixman-1-dev:armhf libcairo-dev:armhf libudev-dev:armhf libjson-c-dev:armhf"
+
+.kernel+rootfs:
+ variables:
+ EXTRA_LOCAL_PACKAGES: "jq libasound2 libcairo2 libdw1 libglib2.0-0 libjson-c5"
+
+# Disable container jobs that we won't use
+alpine/x86_64_build:
+ rules:
+ - when: never
+
+debian/x86_64_test-vk:
+ rules:
+ - when: never
+
+fedora/x86_64_build:
+ rules:
+ - when: never
+
+debian/android_build:
+ rules:
+ - when: never
+
+debian/x86_64_test-android:
+ rules:
+ - when: never
+
+windows_build_vs2019:
+ rules:
+ - when: never
+
+windows_test_vs2019:
+ rules:
+ - when: never
+
+.debian/x86_64_build-mingw:
+ rules:
+ - when: never
+
+rustfmt:
+ rules:
+ - when: never
+
+windows_vs2019:
+ rules:
+ - when: never
+
+clang-format:
+ rules:
+ - when: never \ No newline at end of file
diff --git a/drivers/gpu/drm/ci/gitlab-ci.yml b/drivers/gpu/drm/ci/gitlab-ci.yml
new file mode 100644
index 000000000000..2c4df53f5dfe
--- /dev/null
+++ b/drivers/gpu/drm/ci/gitlab-ci.yml
@@ -0,0 +1,251 @@
+variables:
+ DRM_CI_PROJECT_PATH: &drm-ci-project-path mesa/mesa
+ DRM_CI_COMMIT_SHA: &drm-ci-commit-sha 0dc961645c4f0241f8512cb0ec3ad59635842072
+
+ UPSTREAM_REPO: git://anongit.freedesktop.org/drm/drm
+ TARGET_BRANCH: drm-next
+
+ IGT_VERSION: 471bfababd070e1dac0ebb87470ac4f2ae85e663
+
+ DEQP_RUNNER_GIT_URL: https://gitlab.freedesktop.org/anholt/deqp-runner.git
+ DEQP_RUNNER_GIT_TAG: v0.15.0
+
+ FDO_UPSTREAM_REPO: helen.fornazier/linux # The repo where the git-archive daily runs
+ MESA_TEMPLATES_COMMIT: &ci-templates-commit d5aa3941aa03c2f716595116354fb81eb8012acb
+ DRM_CI_PROJECT_URL: https://gitlab.freedesktop.org/${DRM_CI_PROJECT_PATH}
+ CI_PRE_CLONE_SCRIPT: |-
+ set -o xtrace
+ curl -L --retry 4 -f --retry-all-errors --retry-delay 60 -s ${DRM_CI_PROJECT_URL}/-/raw/${DRM_CI_COMMIT_SHA}/.gitlab-ci/download-git-cache.sh -o download-git-cache.sh
+ bash download-git-cache.sh
+ rm download-git-cache.sh
+ set +o xtrace
+ S3_HOST: s3.freedesktop.org
+ # per-pipeline artifact storage on MinIO
+ PIPELINE_ARTIFACTS_BASE: ${S3_HOST}/artifacts/${CI_PROJECT_PATH}/${CI_PIPELINE_ID}
+ # per-job artifact storage on MinIO
+ JOB_ARTIFACTS_BASE: ${PIPELINE_ARTIFACTS_BASE}/${CI_JOB_ID}
+
+ LAVA_JOB_PRIORITY: 30
+
+default:
+ before_script:
+ - export SCRIPTS_DIR=$(mktemp -d)
+ - curl -L -s --retry 4 -f --retry-all-errors --retry-delay 60 -O --output-dir "${SCRIPTS_DIR}" "${DRM_CI_PROJECT_URL}/-/raw/${DRM_CI_COMMIT_SHA}/.gitlab-ci/setup-test-env.sh"
+ - source ${SCRIPTS_DIR}/setup-test-env.sh
+ - echo -e "\e[0Ksection_start:$(date +%s):unset_env_vars_section[collapsed=true]\r\e[0KUnsetting vulnerable environment variables"
+ - export CI_JOB_JWT_FILE="${CI_JOB_JWT_FILE:-$(mktemp)}"
+ - echo -n "${CI_JOB_JWT}" > "${CI_JOB_JWT_FILE}"
+ - unset CI_JOB_JWT
+ - echo -e "\e[0Ksection_end:$(date +%s):unset_env_vars_section\r\e[0K"
+
+ - echo -e "\e[0Ksection_start:$(date +%s):drm_ci_download_section[collapsed=true]\r\e[0KDownloading mesa from $DRM_CI_PROJECT_URL/-/archive/$DRM_CI_COMMIT_SHA/mesa-$DRM_CI_COMMIT_SHA.tar.gz"
+ - cd $CI_PROJECT_DIR
+ - curl --output - $DRM_CI_PROJECT_URL/-/archive/$DRM_CI_COMMIT_SHA/mesa-$DRM_CI_COMMIT_SHA.tar.gz | tar -xz
+ - mv mesa-$DRM_CI_COMMIT_SHA/.gitlab-ci* .
+ - rm -rf mesa-$DRM_CI_COMMIT_SHA/
+ - echo -e "\e[0Ksection_end:$(date +%s):drm_ci_download_section\r\e[0K"
+
+ after_script:
+ - >
+ set +x
+
+ test -e "${CI_JOB_JWT_FILE}" &&
+ export CI_JOB_JWT="$(<${CI_JOB_JWT_FILE})" &&
+ rm "${CI_JOB_JWT_FILE}"
+
+ # Retry when job fails.
+ retry:
+ max: 1
+ # Ignore runner_unsupported, stale_schedule, archived_failure, or
+ # unmet_prerequisites
+ when:
+ - api_failure
+ - runner_system_failure
+ - script_failure
+ - job_execution_timeout
+ - scheduler_failure
+ - data_integrity_failure
+ - unknown_failure
+
+include:
+ - project: 'freedesktop/ci-templates'
+ ref: 16bc29078de5e0a067ff84a1a199a3760d3b3811
+ file:
+ - '/templates/ci-fairy.yml'
+ - project: 'freedesktop/ci-templates'
+ ref: *ci-templates-commit
+ file:
+ - '/templates/alpine.yml'
+ - '/templates/debian.yml'
+ - '/templates/fedora.yml'
+ - project: *drm-ci-project-path
+ ref: *drm-ci-commit-sha
+ file:
+ - '/.gitlab-ci/farm-rules.yml'
+ - '/.gitlab-ci/test-source-dep.yml'
+ - '/.gitlab-ci/container/gitlab-ci.yml'
+ - '/.gitlab-ci/test/gitlab-ci.yml'
+ - '/.gitlab-ci/lava/lava-gitlab-ci.yml'
+ - drivers/gpu/drm/ci/image-tags.yml
+ - drivers/gpu/drm/ci/container.yml
+ - drivers/gpu/drm/ci/static-checks.yml
+ - drivers/gpu/drm/ci/build.yml
+ - drivers/gpu/drm/ci/test.yml
+ - 'https://gitlab.freedesktop.org/gfx-ci/lab-status/-/raw/main/lab-status.yml'
+
+
+stages:
+ - sanity
+ - container
+ - git-archive
+ - build
+ - amdgpu
+ - i915
+ - mediatek
+ - meson
+ - msm
+ - rockchip
+ - virtio-gpu
+ - lint
+
+# YAML anchors for rule conditions
+# --------------------------------
+.rules-anchors:
+ rules:
+ # Pipeline for forked project branch
+ - if: &is-forked-branch '$CI_COMMIT_BRANCH && $CI_PROJECT_NAMESPACE != "mesa"'
+ when: manual
+ # Forked project branch / pre-merge pipeline not for Marge bot
+ - if: &is-forked-branch-or-pre-merge-not-for-marge '$CI_PROJECT_NAMESPACE != "mesa" || ($GITLAB_USER_LOGIN != "marge-bot" && $CI_PIPELINE_SOURCE == "merge_request_event")'
+ when: manual
+ # Pipeline runs for the main branch of the upstream Mesa project
+ - if: &is-mesa-main '$CI_PROJECT_NAMESPACE == "mesa" && $CI_COMMIT_REF_NAME == $CI_DEFAULT_BRANCH && $CI_COMMIT_BRANCH'
+ when: always
+ # Post-merge pipeline
+ - if: &is-post-merge '$CI_PROJECT_NAMESPACE == "mesa" && $CI_COMMIT_BRANCH'
+ when: on_success
+ # Post-merge pipeline, not for Marge Bot
+ - if: &is-post-merge-not-for-marge '$CI_PROJECT_NAMESPACE == "mesa" && $GITLAB_USER_LOGIN != "marge-bot" && $CI_COMMIT_BRANCH'
+ when: on_success
+ # Pre-merge pipeline
+ - if: &is-pre-merge '$CI_PIPELINE_SOURCE == "merge_request_event"'
+ when: on_success
+ # Pre-merge pipeline for Marge Bot
+ - if: &is-pre-merge-for-marge '$GITLAB_USER_LOGIN == "marge-bot" && $CI_PIPELINE_SOURCE == "merge_request_event"'
+ when: on_success
+
+# Rule to filter for only scheduled pipelines.
+.scheduled_pipeline-rules:
+ rules:
+ - if: &is-scheduled-pipeline '$CI_PIPELINE_SOURCE == "schedule"'
+ when: on_success
+
+# Generic rule to not run the job during scheduled pipelines. Jobs that aren't
+# something like a nightly run should include this rule.
+.no_scheduled_pipelines-rules:
+ rules:
+ - if: *is-scheduled-pipeline
+ when: never
+
+# When to automatically run the CI for build jobs
+.build-rules:
+ rules:
+ - !reference [.no_scheduled_pipelines-rules, rules]
+ # Run automatically once all dependency jobs have passed
+ - when: on_success
+
+
+.ci-deqp-artifacts:
+ artifacts:
+ name: "mesa_${CI_JOB_NAME}"
+ when: always
+ untracked: false
+ paths:
+ # Watch out! Artifacts are relative to the build dir.
+ # https://gitlab.com/gitlab-org/gitlab-ce/commit/8788fb925706cad594adf6917a6c5f6587dd1521
+ - artifacts
+ - _build/meson-logs/*.txt
+ - _build/meson-logs/strace
+
+
+.container-rules:
+ rules:
+ - !reference [.no_scheduled_pipelines-rules, rules]
+ # Run pipeline by default in the main project if any CI pipeline
+ # configuration files were changed, to ensure docker images are up to date
+ - if: *is-post-merge
+ changes:
+ - drivers/gpu/drm/ci/**/*
+ when: on_success
+ # Run pipeline by default if it was triggered by Marge Bot, is for a
+ # merge request, and any files affecting the pipeline were changed
+ - if: *is-pre-merge-for-marge
+ when: on_success
+ # Run pipeline by default in the main project if it was not triggered by
+ # Marge Bot, and any files affecting the pipeline were changed
+ - if: *is-post-merge-not-for-marge
+ when: on_success
+ # Allow triggering jobs manually in other cases
+ - when: manual
+
+
+
+# Git archive
+
+make git archive:
+ extends:
+ - .fdo.ci-fairy
+ stage: git-archive
+ rules:
+ - !reference [.scheduled_pipeline-rules, rules]
+ # ensure we are running on packet
+ tags:
+ - packet.net
+ script:
+ # Remove drm-ci files we just added
+ - rm -rf .gitlab-ci.*
+
+ # Compactify the .git directory
+ - git gc --aggressive
+ # compress the current folder
+ - tar -cvzf ../$CI_PROJECT_NAME.tar.gz .
+
+ # login with the JWT token file
+ - ci-fairy s3cp --token-file "${CI_JOB_JWT_FILE}" ../$CI_PROJECT_NAME.tar.gz https://$S3_HOST/git-cache/$CI_PROJECT_NAMESPACE/$CI_PROJECT_NAME/$CI_PROJECT_NAME.tar.gz
+
+
+# Sanity checks of MR settings and commit logs
+sanity:
+ extends:
+ - .fdo.ci-fairy
+ stage: sanity
+ rules:
+ - if: *is-pre-merge
+ when: on_success
+ # Other cases default to never
+ variables:
+ GIT_STRATEGY: none
+ script:
+ # ci-fairy check-commits --junit-xml=check-commits.xml
+ - ci-fairy check-merge-request --require-allow-collaboration --junit-xml=check-merge-request.xml
+ artifacts:
+ when: on_failure
+ reports:
+ junit: check-*.xml
+
+# Rules for tests that should not block merging, but should be available to
+# optionally run with the "play" button in the UI in pre-merge non-marge
+# pipelines. This should appear in "extends:" after any includes of
+# test-source-dep.yml rules, so that these rules replace those.
+.test-manual-mr:
+ rules:
+ - !reference [.no_scheduled_pipelines-rules, rules]
+ - if: *is-forked-branch-or-pre-merge-not-for-marge
+ when: manual
+ variables:
+ JOB_TIMEOUT: 80
+
+
+# Jobs that need to pass before spending hardware resources on further testing
+.required-for-hardware-jobs:
+ needs: [] \ No newline at end of file
diff --git a/drivers/gpu/drm/ci/igt_runner.sh b/drivers/gpu/drm/ci/igt_runner.sh
new file mode 100755
index 000000000000..2bb759165063
--- /dev/null
+++ b/drivers/gpu/drm/ci/igt_runner.sh
@@ -0,0 +1,77 @@
+#!/bin/sh
+# SPDX-License-Identifier: MIT
+
+set -ex
+
+export IGT_FORCE_DRIVER=${DRIVER_NAME}
+export PATH=$PATH:/igt/bin/
+export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/igt/lib/aarch64-linux-gnu/:/igt/lib/x86_64-linux-gnu:/igt/lib:/igt/lib64
+
+# Uncomment the below to debug problems with driver probing
+: '
+ls -l /dev/dri/
+cat /sys/kernel/debug/devices_deferred
+cat /sys/kernel/debug/device_component/*
+'
+
+# Dump drm state to confirm that kernel was able to find a connected display:
+# TODO this path might not exist for all drivers.. maybe run modetest instead?
+set +e
+cat /sys/kernel/debug/dri/*/state
+set -e
+
+# Cannot use HWCI_KERNEL_MODULES as at that point we don't have the module in /lib
+if [ "$IGT_FORCE_DRIVER" = "amdgpu" ]; then
+ mv /install/modules/lib/modules/* /lib/modules/.
+ modprobe amdgpu
+fi
+
+if [ -e "/install/xfails/$DRIVER_NAME-$GPU_VERSION-skips.txt" ]; then
+ IGT_SKIPS="--skips /install/xfails/$DRIVER_NAME-$GPU_VERSION-skips.txt"
+fi
+
+if [ -e "/install/xfails/$DRIVER_NAME-$GPU_VERSION-flakes.txt" ]; then
+ IGT_FLAKES="--flakes /install/xfails/$DRIVER_NAME-$GPU_VERSION-flakes.txt"
+fi
+
+if [ -e "/install/xfails/$DRIVER_NAME-$GPU_VERSION-fails.txt" ]; then
+ IGT_FAILS="--baseline /install/xfails/$DRIVER_NAME-$GPU_VERSION-fails.txt"
+fi
+
+if [ "`uname -m`" = "aarch64" ]; then
+ ARCH="arm64"
+elif [ "`uname -m`" = "armv7l" ]; then
+ ARCH="arm"
+else
+ ARCH="x86_64"
+fi
+
+curl -L --retry 4 -f --retry-all-errors --retry-delay 60 -s ${FDO_HTTP_CACHE_URI:-}$PIPELINE_ARTIFACTS_BASE/$ARCH/igt.tar.gz | tar --zstd -v -x -C /
+
+set +e
+igt-runner \
+ run \
+ --igt-folder /igt/libexec/igt-gpu-tools \
+ --caselist /install/testlist.txt \
+ --output /results \
+ $IGT_SKIPS \
+ $IGT_FLAKES \
+ $IGT_FAILS \
+ --fraction-start $CI_NODE_INDEX \
+ --fraction $CI_NODE_TOTAL \
+ --jobs 1
+ret=$?
+set -e
+
+deqp-runner junit \
+ --testsuite IGT \
+ --results /results/failures.csv \
+ --output /results/junit.xml \
+ --limit 50 \
+ --template "See https://$CI_PROJECT_ROOT_NAMESPACE.pages.freedesktop.org/-/$CI_PROJECT_NAME/-/jobs/$CI_JOB_ID/artifacts/results/{{testcase}}.xml"
+
+# Store the results also in the simpler format used by the runner in ChromeOS CI
+#sed -r 's/(dmesg-warn|pass)/success/g' /results/results.txt > /results/results_simple.txt
+
+cd $oldpath
+exit $ret
diff --git a/drivers/gpu/drm/ci/image-tags.yml b/drivers/gpu/drm/ci/image-tags.yml
new file mode 100644
index 000000000000..f051b6c547c5
--- /dev/null
+++ b/drivers/gpu/drm/ci/image-tags.yml
@@ -0,0 +1,15 @@
+variables:
+ CONTAINER_TAG: "2023-08-10-mesa-uprev"
+ DEBIAN_X86_64_BUILD_BASE_IMAGE: "debian/x86_64_build-base"
+ DEBIAN_BASE_TAG: "${CONTAINER_TAG}"
+
+ DEBIAN_X86_64_BUILD_IMAGE_PATH: "debian/x86_64_build"
+ DEBIAN_BUILD_TAG: "${CONTAINER_TAG}"
+
+ KERNEL_ROOTFS_TAG: "${CONTAINER_TAG}"
+
+ DEBIAN_X86_64_TEST_BASE_IMAGE: "debian/x86_64_test-base"
+ DEBIAN_X86_64_TEST_IMAGE_GL_PATH: "debian/x86_64_test-gl"
+ DEBIAN_X86_64_TEST_GL_TAG: "${CONTAINER_TAG}"
+
+ ALPINE_X86_64_LAVA_SSH_TAG: "${CONTAINER_TAG}" \ No newline at end of file
diff --git a/drivers/gpu/drm/ci/lava-submit.sh b/drivers/gpu/drm/ci/lava-submit.sh
new file mode 100755
index 000000000000..0c4456b21b0f
--- /dev/null
+++ b/drivers/gpu/drm/ci/lava-submit.sh
@@ -0,0 +1,57 @@
+#!/bin/bash
+# SPDX-License-Identifier: MIT
+
+set -e
+set -x
+
+# Try to use the kernel and rootfs built in mainline first, so we're more
+# likely to hit cache
+if curl -L --retry 4 -f --retry-all-errors --retry-delay 60 -s "https://${BASE_SYSTEM_MAINLINE_HOST_PATH}/done"; then
+ BASE_SYSTEM_HOST_PATH="${BASE_SYSTEM_MAINLINE_HOST_PATH}"
+else
+ BASE_SYSTEM_HOST_PATH="${BASE_SYSTEM_FORK_HOST_PATH}"
+fi
+
+rm -rf results
+mkdir -p results/job-rootfs-overlay/
+
+cp artifacts/ci-common/capture-devcoredump.sh results/job-rootfs-overlay/
+cp artifacts/ci-common/init-*.sh results/job-rootfs-overlay/
+cp artifacts/ci-common/intel-gpu-freq.sh results/job-rootfs-overlay/
+cp "$SCRIPTS_DIR"/setup-test-env.sh results/job-rootfs-overlay/
+
+# Prepare env vars for upload.
+section_start variables "Variables passed through:"
+KERNEL_IMAGE_BASE_URL="https://${BASE_SYSTEM_HOST_PATH}" \
+ artifacts/ci-common/generate-env.sh | tee results/job-rootfs-overlay/set-job-env-vars.sh
+section_end variables
+
+tar zcf job-rootfs-overlay.tar.gz -C results/job-rootfs-overlay/ .
+ci-fairy s3cp --token-file "${CI_JOB_JWT_FILE}" job-rootfs-overlay.tar.gz "https://${JOB_ROOTFS_OVERLAY_PATH}"
+
+touch results/lava.log
+tail -f results/lava.log &
+
+PYTHONPATH=artifacts/ artifacts/lava/lava_job_submitter.py \
+ submit \
+ --dump-yaml \
+ --pipeline-info "$CI_JOB_NAME: $CI_PIPELINE_URL on $CI_COMMIT_REF_NAME ${CI_NODE_INDEX}/${CI_NODE_TOTAL}" \
+ --rootfs-url-prefix "https://${BASE_SYSTEM_HOST_PATH}" \
+ --kernel-url-prefix "https://${PIPELINE_ARTIFACTS_BASE}/${ARCH}" \
+ --build-url "${FDO_HTTP_CACHE_URI:-}https://${PIPELINE_ARTIFACTS_BASE}/${ARCH}/kernel-files.tar.zst" \
+ --job-rootfs-overlay-url "${FDO_HTTP_CACHE_URI:-}https://${JOB_ROOTFS_OVERLAY_PATH}" \
+ --job-timeout-min ${JOB_TIMEOUT:-80} \
+ --first-stage-init artifacts/ci-common/init-stage1.sh \
+ --ci-project-dir "${CI_PROJECT_DIR}" \
+ --device-type "${DEVICE_TYPE}" \
+ --dtb-filename "${DTB}" \
+ --jwt-file "${CI_JOB_JWT_FILE}" \
+ --kernel-image-name "${KERNEL_IMAGE_NAME}" \
+ --kernel-image-type "${KERNEL_IMAGE_TYPE}" \
+ --boot-method "${BOOT_METHOD}" \
+ --visibility-group "${VISIBILITY_GROUP}" \
+ --lava-tags "${LAVA_TAGS}" \
+ --mesa-job-name "$CI_JOB_NAME" \
+ --structured-log-file "results/lava_job_detail.json" \
+ --ssh-client-image "${LAVA_SSH_CLIENT_IMAGE}" \
+ >> results/lava.log
diff --git a/drivers/gpu/drm/ci/static-checks.yml b/drivers/gpu/drm/ci/static-checks.yml
new file mode 100644
index 000000000000..13ffa827b7fa
--- /dev/null
+++ b/drivers/gpu/drm/ci/static-checks.yml
@@ -0,0 +1,12 @@
+check-patch:
+ extends:
+ - .build
+ - .use-debian/x86_64_build
+ script:
+ - drivers/gpu/drm/ci/check-patch.py
+ variables:
+ CHECKPATCH_TYPES: "BAD_SIGN_OFF,BAD_STABLE_ADDRESS_STYLE,COMMIT_COMMENT_SYMBOL,COMMIT_MESSAGE,EMAIL_SUBJECT,FROM_SIGN_OFF_MISMATCH,MISSING_SIGN_OFF,NO_AUTHOR_SIGN_OFF,DIFF_IN_COMMIT_MSG,GERRIT_CHANGE_ID,GIT_COMMIT_ID,UNKNOWN_COMMIT_ID,CODE_INDENT,BIT_MACRO,DOS_LINE_ENDINGS"
+ rules:
+ - if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
+ when: on_success
+ # Other cases default to never
diff --git a/drivers/gpu/drm/ci/test.yml b/drivers/gpu/drm/ci/test.yml
new file mode 100644
index 000000000000..6473cddaa7a9
--- /dev/null
+++ b/drivers/gpu/drm/ci/test.yml
@@ -0,0 +1,335 @@
+.test-rules:
+ rules:
+ - if: '$FD_FARM == "offline" && $RUNNER_TAG =~ /^google-freedreno-/'
+ when: never
+ - if: '$COLLABORA_FARM == "offline" && $RUNNER_TAG =~ /^mesa-ci-x86-64-lava-/'
+ when: never
+ - !reference [.no_scheduled_pipelines-rules, rules]
+ - when: on_success
+
+.lava-test:
+ extends:
+ - .test-rules
+ script:
+ # Note: Build dir (and thus install) may be dirty due to GIT_STRATEGY
+ - rm -rf install
+ - tar -xf artifacts/install.tar
+ - mv install/* artifacts/.
+ # Override it with our lava-submit.sh script
+ - ./artifacts/lava-submit.sh
+
+.lava-igt:arm32:
+ extends:
+ - .lava-test:arm32
+ variables:
+ HWCI_TEST_SCRIPT: "/install/igt_runner.sh"
+ ARCH: "armhf"
+ dependencies:
+ - testing:arm32
+ needs:
+ - alpine/x86_64_lava_ssh_client
+ - kernel+rootfs_arm32
+ - debian/x86_64_build
+ - testing:arm32
+ - igt:arm32
+
+.lava-igt:arm64:
+ extends:
+ - .lava-test:arm64
+ variables:
+ HWCI_TEST_SCRIPT: "/install/igt_runner.sh"
+ ARCH: "arm64"
+ dependencies:
+ - testing:arm64
+ needs:
+ - alpine/x86_64_lava_ssh_client
+ - kernel+rootfs_arm64
+ - debian/x86_64_build
+ - testing:arm64
+ - igt:arm64
+
+.lava-igt:x86_64:
+ extends:
+ - .lava-test:x86_64
+ variables:
+ HWCI_TEST_SCRIPT: "/install/igt_runner.sh"
+ ARCH: "x86_64"
+ dependencies:
+ - testing:x86_64
+ needs:
+ - alpine/x86_64_lava_ssh_client
+ - kernel+rootfs_x86_64
+ - debian/x86_64_build
+ - testing:x86_64
+ - igt:x86_64
+
+.baremetal-igt-arm64:
+ extends:
+ - .baremetal-test-arm64
+ - .use-debian/arm64_test
+ - .test-rules
+ variables:
+ FDO_CI_CONCURRENT: 10
+ HWCI_TEST_SCRIPT: "/install/igt_runner.sh"
+ S3_ARTIFACT_NAME: "arm64/kernel-files"
+ BM_KERNEL: https://${PIPELINE_ARTIFACTS_BASE}/arm64/Image.gz
+ BM_CMDLINE: "ip=dhcp console=ttyMSM0,115200n8 $BM_KERNEL_EXTRA_ARGS root=/dev/nfs rw nfsrootdebug nfsroot=,tcp,nfsvers=4.2 init=/init $BM_KERNELARGS"
+ needs:
+ - debian/arm64_test
+ - job: testing:arm64
+ artifacts: false
+ - igt:arm64
+ tags:
+ - $RUNNER_TAG
+
+msm:sc7180:
+ extends:
+ - .lava-igt:arm64
+ stage: msm
+ parallel: 2
+ variables:
+ DRIVER_NAME: msm
+ DEVICE_TYPE: sc7180-trogdor-lazor-limozeen
+ DTB: sc7180-trogdor-lazor-limozeen-nots-r5
+ BOOT_METHOD: depthcharge
+ KERNEL_IMAGE_TYPE: ""
+ GPU_VERSION: sc7180
+ RUNNER_TAG: mesa-ci-x86-64-lava-sc7180-trogdor-lazor-limozeen
+
+msm:apq8016:
+ extends:
+ - .baremetal-igt-arm64
+ stage: msm
+ variables:
+ DRIVER_NAME: msm
+ BM_DTB: https://${PIPELINE_ARTIFACTS_BASE}/arm64/apq8016-sbc.dtb
+ GPU_VERSION: apq8016
+ BM_CMDLINE: "ip=dhcp console=ttyMSM0,115200n8 $BM_KERNEL_EXTRA_ARGS root=/dev/nfs rw nfsrootdebug nfsroot=,tcp,nfsvers=4.2 init=/init $BM_KERNELARGS"
+ RUNNER_TAG: google-freedreno-db410c
+ script:
+ - ./install/bare-metal/fastboot.sh
+ rules:
+ # TODO: current issue: it is not fiding the NFS root. Fix and remove this rule.
+ - when: never
+
+msm:apq8096:
+ extends:
+ - .baremetal-igt-arm64
+ stage: msm
+ variables:
+ DRIVER_NAME: msm
+ BM_KERNEL_EXTRA_ARGS: maxcpus=2
+ BM_DTB: https://${PIPELINE_ARTIFACTS_BASE}/arm64/apq8096-db820c.dtb
+ GPU_VERSION: apq8096
+ RUNNER_TAG: google-freedreno-db820c
+ script:
+ - ./install/bare-metal/fastboot.sh
+
+msm:sdm845:
+ extends:
+ - .baremetal-igt-arm64
+ stage: msm
+ parallel: 6
+ variables:
+ DRIVER_NAME: msm
+ BM_KERNEL: https://${PIPELINE_ARTIFACTS_BASE}/arm64/cheza-kernel
+ GPU_VERSION: sdm845
+ RUNNER_TAG: google-freedreno-cheza
+ script:
+ - ./install/bare-metal/cros-servo.sh
+
+rockchip:rk3288:
+ extends:
+ - .lava-igt:arm32
+ stage: rockchip
+ variables:
+ DRIVER_NAME: rockchip
+ DEVICE_TYPE: rk3288-veyron-jaq
+ DTB: ${DEVICE_TYPE}
+ BOOT_METHOD: depthcharge
+ KERNEL_IMAGE_TYPE: "zimage"
+ GPU_VERSION: rk3288
+ RUNNER_TAG: mesa-ci-x86-64-lava-rk3288-veyron-jaq
+
+rockchip:rk3399:
+ extends:
+ - .lava-igt:arm64
+ stage: rockchip
+ parallel: 3
+ variables:
+ DRIVER_NAME: rockchip
+ DEVICE_TYPE: rk3399-gru-kevin
+ DTB: ${DEVICE_TYPE}
+ BOOT_METHOD: depthcharge
+ KERNEL_IMAGE_TYPE: ""
+ GPU_VERSION: rk3399
+ RUNNER_TAG: mesa-ci-x86-64-lava-rk3399-gru-kevin
+
+.i915:
+ extends:
+ - .lava-igt:x86_64
+ stage: i915
+ variables:
+ DRIVER_NAME: i915
+ DTB: ""
+ BOOT_METHOD: depthcharge
+ KERNEL_IMAGE_TYPE: ""
+
+i915:apl:
+ extends:
+ - .i915
+ parallel: 12
+ variables:
+ DEVICE_TYPE: asus-C523NA-A20057-coral
+ GPU_VERSION: apl
+ RUNNER_TAG: mesa-ci-x86-64-lava-asus-C523NA-A20057-coral
+
+i915:glk:
+ extends:
+ - .i915
+ parallel: 5
+ variables:
+ DEVICE_TYPE: hp-x360-12b-ca0010nr-n4020-octopus
+ GPU_VERSION: glk
+ RUNNER_TAG: mesa-ci-x86-64-lava-hp-x360-12b-ca0010nr-n4020-octopus
+
+i915:amly:
+ extends:
+ - .i915
+ parallel: 8
+ variables:
+ DEVICE_TYPE: asus-C433TA-AJ0005-rammus
+ GPU_VERSION: amly
+ RUNNER_TAG: mesa-ci-x86-64-lava-asus-C433TA-AJ0005-rammus
+
+i915:kbl:
+ extends:
+ - .i915
+ parallel: 5
+ variables:
+ DEVICE_TYPE: hp-x360-14-G1-sona
+ GPU_VERSION: kbl
+ RUNNER_TAG: mesa-ci-x86-64-lava-hp-x360-14-G1-sona
+
+i915:whl:
+ extends:
+ - .i915
+ parallel: 8
+ variables:
+ DEVICE_TYPE: dell-latitude-5400-8665U-sarien
+ GPU_VERSION: whl
+ RUNNER_TAG: mesa-ci-x86-64-lava-dell-latitude-5400-8665U-sarien
+
+i915:cml:
+ extends:
+ - .i915
+ parallel: 6
+ variables:
+ DEVICE_TYPE: asus-C436FA-Flip-hatch
+ GPU_VERSION: cml
+ RUNNER_TAG: mesa-ci-x86-64-lava-asus-C436FA-flip-hatch
+
+i915:tgl:
+ extends:
+ - .i915
+ parallel: 6
+ variables:
+ DEVICE_TYPE: asus-cx9400-volteer
+ GPU_VERSION: tgl
+ RUNNER_TAG: mesa-ci-x86-64-lava-asus-cx9400-volteer
+
+.amdgpu:
+ extends:
+ - .lava-igt:x86_64
+ stage: amdgpu
+ variables:
+ DRIVER_NAME: amdgpu
+ DTB: ""
+ BOOT_METHOD: depthcharge
+ KERNEL_IMAGE_TYPE: ""
+
+amdgpu:stoney:
+ extends:
+ - .amdgpu
+ variables:
+ DEVICE_TYPE: hp-11A-G6-EE-grunt
+ GPU_VERSION: stoney
+ RUNNER_TAG: mesa-ci-x86-64-lava-hp-11A-G6-EE-grunt
+
+.mediatek:
+ extends:
+ - .lava-igt:arm64
+ stage: mediatek
+ variables:
+ DRIVER_NAME: mediatek
+ DTB: ${DEVICE_TYPE}
+ BOOT_METHOD: depthcharge
+ KERNEL_IMAGE_TYPE: ""
+
+mediatek:mt8173:
+ extends:
+ - .mediatek
+ variables:
+ DEVICE_TYPE: mt8173-elm-hana
+ GPU_VERSION: mt8173
+ RUNNER_TAG: mesa-ci-x86-64-lava-mt8173-elm-hana
+ rules:
+ # TODO: current issue: device is hanging. Fix and remove this rule.
+ - when: never
+
+mediatek:mt8183:
+ extends:
+ - .mediatek
+ variables:
+ DEVICE_TYPE: mt8183-kukui-jacuzzi-juniper-sku16
+ GPU_VERSION: mt8183
+ RUNNER_TAG: mesa-ci-x86-64-lava-mt8183-kukui-jacuzzi-juniper-sku16
+
+# drm-mtk doesn't even probe yet in mainline for mt8192
+.mediatek:mt8192:
+ extends:
+ - .mediatek
+ variables:
+ DEVICE_TYPE: mt8192-asurada-spherion-r0
+ GPU_VERSION: mt8192
+ RUNNER_TAG: mesa-ci-x86-64-lava-mt8192-asurada-spherion-r0
+
+.meson:
+ extends:
+ - .lava-igt:arm64
+ stage: meson
+ variables:
+ DRIVER_NAME: meson
+ DTB: ${DEVICE_TYPE}
+ BOOT_METHOD: u-boot
+ KERNEL_IMAGE_TYPE: "image"
+
+meson:g12b:
+ extends:
+ - .meson
+ variables:
+ DEVICE_TYPE: meson-g12b-a311d-khadas-vim3
+ GPU_VERSION: g12b
+ RUNNER_TAG: mesa-ci-x86-64-lava-meson-g12b-a311d-khadas-vim3
+
+virtio_gpu:none:
+ stage: virtio-gpu
+ variables:
+ CROSVM_GALLIUM_DRIVER: llvmpipe
+ DRIVER_NAME: virtio_gpu
+ GPU_VERSION: none
+ extends:
+ - .test-gl
+ tags:
+ - kvm
+ script:
+ - ln -sf $CI_PROJECT_DIR/install /install
+ - mv install/bzImage /lava-files/bzImage
+ - install/crosvm-runner.sh install/igt_runner.sh
+ needs:
+ - debian/x86_64_test-gl
+ - testing:x86_64
+ - igt:x86_64
+ rules:
+ # TODO: current issue: malloc(): corrupted top size. Fix and remove this rule.
+ - when: never \ No newline at end of file
diff --git a/drivers/gpu/drm/ci/testlist.txt b/drivers/gpu/drm/ci/testlist.txt
new file mode 100644
index 000000000000..f82cd90372f4
--- /dev/null
+++ b/drivers/gpu/drm/ci/testlist.txt
@@ -0,0 +1,2912 @@
+core_auth@getclient-simple
+core_auth@getclient-master-drop
+core_auth@basic-auth
+core_auth@many-magics
+core_getclient
+core_getstats
+core_getversion
+core_setmaster_vs_auth
+drm_read@invalid-buffer
+drm_read@fault-buffer
+drm_read@empty-block
+drm_read@empty-nonblock
+drm_read@short-buffer-block
+drm_read@short-buffer-nonblock
+drm_read@short-buffer-wakeup
+gem_eio@throttle
+gem_eio@create
+gem_eio@create-ext
+gem_eio@context-create
+gem_eio@execbuf
+gem_eio@banned
+gem_eio@suspend
+gem_eio@hibernate
+gem_eio@in-flight-external
+gem_eio@in-flight-suspend
+gem_eio@reset-stress
+gem_eio@unwedge-stress
+gem_eio@wait-immediate
+gem_eio@wait-wedge-immediate
+gem_eio@in-flight-immediate
+gem_eio@in-flight-contexts-immediate
+gem_eio@in-flight-internal-immediate
+gem_eio@wait-1us
+gem_eio@wait-wedge-1us
+gem_eio@in-flight-1us
+gem_eio@in-flight-contexts-1us
+gem_eio@in-flight-internal-1us
+gem_eio@wait-10ms
+gem_eio@wait-wedge-10ms
+gem_eio@in-flight-10ms
+gem_eio@in-flight-contexts-10ms
+gem_eio@in-flight-internal-10ms
+gem_eio@kms
+kms_3d
+kms_addfb_basic@unused-handle
+kms_addfb_basic@unused-pitches
+kms_addfb_basic@unused-offsets
+kms_addfb_basic@unused-modifier
+kms_addfb_basic@clobberred-modifier
+kms_addfb_basic@invalid-smem-bo-on-discrete
+kms_addfb_basic@legacy-format
+kms_addfb_basic@no-handle
+kms_addfb_basic@basic
+kms_addfb_basic@bad-pitch-0
+kms_addfb_basic@bad-pitch-32
+kms_addfb_basic@bad-pitch-63
+kms_addfb_basic@bad-pitch-128
+kms_addfb_basic@bad-pitch-256
+kms_addfb_basic@bad-pitch-1024
+kms_addfb_basic@bad-pitch-999
+kms_addfb_basic@bad-pitch-65536
+kms_addfb_basic@invalid-get-prop-any
+kms_addfb_basic@invalid-get-prop
+kms_addfb_basic@invalid-set-prop-any
+kms_addfb_basic@invalid-set-prop
+kms_addfb_basic@master-rmfb
+kms_addfb_basic@addfb25-modifier-no-flag
+kms_addfb_basic@addfb25-bad-modifier
+kms_addfb_basic@addfb25-x-tiled-mismatch-legacy
+kms_addfb_basic@addfb25-x-tiled-legacy
+kms_addfb_basic@addfb25-framebuffer-vs-set-tiling
+kms_addfb_basic@basic-x-tiled-legacy
+kms_addfb_basic@framebuffer-vs-set-tiling
+kms_addfb_basic@tile-pitch-mismatch
+kms_addfb_basic@basic-y-tiled-legacy
+kms_addfb_basic@size-max
+kms_addfb_basic@too-wide
+kms_addfb_basic@too-high
+kms_addfb_basic@bo-too-small
+kms_addfb_basic@small-bo
+kms_addfb_basic@bo-too-small-due-to-tiling
+kms_addfb_basic@addfb25-y-tiled-legacy
+kms_addfb_basic@addfb25-yf-tiled-legacy
+kms_addfb_basic@addfb25-y-tiled-small-legacy
+kms_addfb_basic@addfb25-4-tiled
+kms_async_flips@async-flip-with-page-flip-events
+kms_async_flips@alternate-sync-async-flip
+kms_async_flips@test-time-stamp
+kms_async_flips@test-cursor
+kms_async_flips@invalid-async-flip
+kms_async_flips@crc
+kms_atomic@plane-overlay-legacy
+kms_atomic@plane-primary-legacy
+kms_atomic@plane-primary-overlay-mutable-zpos
+kms_atomic@plane-immutable-zpos
+kms_atomic@test-only
+kms_atomic@plane-cursor-legacy
+kms_atomic@plane-invalid-params
+kms_atomic@plane-invalid-params-fence
+kms_atomic@crtc-invalid-params
+kms_atomic@crtc-invalid-params-fence
+kms_atomic@atomic-invalid-params
+kms_atomic@atomic_plane_damage
+kms_atomic_interruptible@legacy-setmode
+kms_atomic_interruptible@atomic-setmode
+kms_atomic_interruptible@legacy-dpms
+kms_atomic_interruptible@legacy-pageflip
+kms_atomic_interruptible@legacy-cursor
+kms_atomic_interruptible@universal-setplane-primary
+kms_atomic_interruptible@universal-setplane-cursor
+kms_atomic_transition@plane-primary-toggle-with-vblank-wait
+kms_atomic_transition@plane-all-transition
+kms_atomic_transition@plane-all-transition-fencing
+kms_atomic_transition@plane-all-transition-nonblocking
+kms_atomic_transition@plane-all-transition-nonblocking-fencing
+kms_atomic_transition@plane-use-after-nonblocking-unbind
+kms_atomic_transition@plane-use-after-nonblocking-unbind-fencing
+kms_atomic_transition@plane-all-modeset-transition
+kms_atomic_transition@plane-all-modeset-transition-fencing
+kms_atomic_transition@plane-all-modeset-transition-internal-panels
+kms_atomic_transition@plane-all-modeset-transition-fencing-internal-panels
+kms_atomic_transition@plane-toggle-modeset-transition
+kms_atomic_transition@modeset-transition
+kms_atomic_transition@modeset-transition-fencing
+kms_atomic_transition@modeset-transition-nonblocking
+kms_atomic_transition@modeset-transition-nonblocking-fencing
+kms_big_fb@x-tiled-addfb-size-overflow
+kms_big_fb@y-tiled-addfb-size-overflow
+kms_big_fb@yf-tiled-addfb-size-overflow
+kms_big_fb@4-tiled-addfb-size-overflow
+kms_big_fb@x-tiled-addfb-size-offset-overflow
+kms_big_fb@y-tiled-addfb-size-offset-overflow
+kms_big_fb@yf-tiled-addfb-size-offset-overflow
+kms_big_fb@4-tiled-addfb-size-offset-overflow
+kms_big_fb@linear-addfb
+kms_big_fb@x-tiled-addfb
+kms_big_fb@y-tiled-addfb
+kms_big_fb@yf-tiled-addfb
+kms_big_fb@4-tiled-addfb
+kms_big_fb@linear-8bpp-rotate-0
+kms_big_fb@linear-8bpp-rotate-90
+kms_big_fb@linear-8bpp-rotate-180
+kms_big_fb@linear-8bpp-rotate-270
+kms_big_fb@linear-16bpp-rotate-0
+kms_big_fb@linear-16bpp-rotate-90
+kms_big_fb@linear-16bpp-rotate-180
+kms_big_fb@linear-16bpp-rotate-270
+kms_big_fb@linear-32bpp-rotate-0
+kms_big_fb@linear-32bpp-rotate-90
+kms_big_fb@linear-32bpp-rotate-180
+kms_big_fb@linear-32bpp-rotate-270
+kms_big_fb@linear-64bpp-rotate-0
+kms_big_fb@linear-64bpp-rotate-90
+kms_big_fb@linear-64bpp-rotate-180
+kms_big_fb@linear-64bpp-rotate-270
+kms_big_fb@x-tiled-8bpp-rotate-0
+kms_big_fb@x-tiled-8bpp-rotate-90
+kms_big_fb@x-tiled-8bpp-rotate-180
+kms_big_fb@x-tiled-8bpp-rotate-270
+kms_big_fb@x-tiled-16bpp-rotate-0
+kms_big_fb@x-tiled-16bpp-rotate-90
+kms_big_fb@x-tiled-16bpp-rotate-180
+kms_big_fb@x-tiled-16bpp-rotate-270
+kms_big_fb@x-tiled-32bpp-rotate-0
+kms_big_fb@x-tiled-32bpp-rotate-90
+kms_big_fb@x-tiled-32bpp-rotate-180
+kms_big_fb@x-tiled-32bpp-rotate-270
+kms_big_fb@x-tiled-64bpp-rotate-0
+kms_big_fb@x-tiled-64bpp-rotate-90
+kms_big_fb@x-tiled-64bpp-rotate-180
+kms_big_fb@x-tiled-64bpp-rotate-270
+kms_big_fb@y-tiled-8bpp-rotate-0
+kms_big_fb@y-tiled-8bpp-rotate-90
+kms_big_fb@y-tiled-8bpp-rotate-180
+kms_big_fb@y-tiled-8bpp-rotate-270
+kms_big_fb@y-tiled-16bpp-rotate-0
+kms_big_fb@y-tiled-16bpp-rotate-90
+kms_big_fb@y-tiled-16bpp-rotate-180
+kms_big_fb@y-tiled-16bpp-rotate-270
+kms_big_fb@y-tiled-32bpp-rotate-0
+kms_big_fb@y-tiled-32bpp-rotate-90
+kms_big_fb@y-tiled-32bpp-rotate-180
+kms_big_fb@y-tiled-32bpp-rotate-270
+kms_big_fb@y-tiled-64bpp-rotate-0
+kms_big_fb@y-tiled-64bpp-rotate-90
+kms_big_fb@y-tiled-64bpp-rotate-180
+kms_big_fb@y-tiled-64bpp-rotate-270
+kms_big_fb@yf-tiled-8bpp-rotate-0
+kms_big_fb@yf-tiled-8bpp-rotate-90
+kms_big_fb@yf-tiled-8bpp-rotate-180
+kms_big_fb@yf-tiled-8bpp-rotate-270
+kms_big_fb@yf-tiled-16bpp-rotate-0
+kms_big_fb@yf-tiled-16bpp-rotate-90
+kms_big_fb@yf-tiled-16bpp-rotate-180
+kms_big_fb@yf-tiled-16bpp-rotate-270
+kms_big_fb@yf-tiled-32bpp-rotate-0
+kms_big_fb@yf-tiled-32bpp-rotate-90
+kms_big_fb@yf-tiled-32bpp-rotate-180
+kms_big_fb@yf-tiled-32bpp-rotate-270
+kms_big_fb@yf-tiled-64bpp-rotate-0
+kms_big_fb@yf-tiled-64bpp-rotate-90
+kms_big_fb@yf-tiled-64bpp-rotate-180
+kms_big_fb@yf-tiled-64bpp-rotate-270
+kms_big_fb@4-tiled-8bpp-rotate-0
+kms_big_fb@4-tiled-8bpp-rotate-90
+kms_big_fb@4-tiled-8bpp-rotate-180
+kms_big_fb@4-tiled-8bpp-rotate-270
+kms_big_fb@4-tiled-16bpp-rotate-0
+kms_big_fb@4-tiled-16bpp-rotate-90
+kms_big_fb@4-tiled-16bpp-rotate-180
+kms_big_fb@4-tiled-16bpp-rotate-270
+kms_big_fb@4-tiled-32bpp-rotate-0
+kms_big_fb@4-tiled-32bpp-rotate-90
+kms_big_fb@4-tiled-32bpp-rotate-180
+kms_big_fb@4-tiled-32bpp-rotate-270
+kms_big_fb@4-tiled-64bpp-rotate-0
+kms_big_fb@4-tiled-64bpp-rotate-90
+kms_big_fb@4-tiled-64bpp-rotate-180
+kms_big_fb@4-tiled-64bpp-rotate-270
+kms_big_fb@linear-max-hw-stride-32bpp-rotate-0
+kms_big_fb@linear-max-hw-stride-32bpp-rotate-180
+kms_big_fb@linear-max-hw-stride-64bpp-rotate-0
+kms_big_fb@linear-max-hw-stride-64bpp-rotate-180
+kms_big_fb@x-tiled-max-hw-stride-32bpp-rotate-0
+kms_big_fb@x-tiled-max-hw-stride-32bpp-rotate-0-async-flip
+kms_big_fb@x-tiled-max-hw-stride-32bpp-rotate-180
+kms_big_fb@x-tiled-max-hw-stride-32bpp-rotate-180-async-flip
+kms_big_fb@x-tiled-max-hw-stride-64bpp-rotate-0
+kms_big_fb@x-tiled-max-hw-stride-64bpp-rotate-0-async-flip
+kms_big_fb@x-tiled-max-hw-stride-64bpp-rotate-180
+kms_big_fb@x-tiled-max-hw-stride-64bpp-rotate-180-async-flip
+kms_big_fb@x-tiled-max-hw-stride-32bpp-rotate-0-hflip
+kms_big_fb@x-tiled-max-hw-stride-32bpp-rotate-0-hflip-async-flip
+kms_big_fb@x-tiled-max-hw-stride-32bpp-rotate-180-hflip
+kms_big_fb@x-tiled-max-hw-stride-32bpp-rotate-180-hflip-async-flip
+kms_big_fb@x-tiled-max-hw-stride-64bpp-rotate-0-hflip
+kms_big_fb@x-tiled-max-hw-stride-64bpp-rotate-0-hflip-async-flip
+kms_big_fb@x-tiled-max-hw-stride-64bpp-rotate-180-hflip
+kms_big_fb@x-tiled-max-hw-stride-64bpp-rotate-180-hflip-async-flip
+kms_big_fb@y-tiled-max-hw-stride-32bpp-rotate-0
+kms_big_fb@y-tiled-max-hw-stride-32bpp-rotate-0-async-flip
+kms_big_fb@y-tiled-max-hw-stride-32bpp-rotate-180
+kms_big_fb@y-tiled-max-hw-stride-32bpp-rotate-180-async-flip
+kms_big_fb@y-tiled-max-hw-stride-64bpp-rotate-0
+kms_big_fb@y-tiled-max-hw-stride-64bpp-rotate-0-async-flip
+kms_big_fb@y-tiled-max-hw-stride-64bpp-rotate-180
+kms_big_fb@y-tiled-max-hw-stride-64bpp-rotate-180-async-flip
+kms_big_fb@y-tiled-max-hw-stride-32bpp-rotate-0-hflip
+kms_big_fb@y-tiled-max-hw-stride-32bpp-rotate-0-hflip-async-flip
+kms_big_fb@y-tiled-max-hw-stride-32bpp-rotate-180-hflip
+kms_big_fb@y-tiled-max-hw-stride-32bpp-rotate-180-hflip-async-flip
+kms_big_fb@y-tiled-max-hw-stride-64bpp-rotate-0-hflip
+kms_big_fb@y-tiled-max-hw-stride-64bpp-rotate-0-hflip-async-flip
+kms_big_fb@y-tiled-max-hw-stride-64bpp-rotate-180-hflip
+kms_big_fb@y-tiled-max-hw-stride-64bpp-rotate-180-hflip-async-flip
+kms_big_fb@yf-tiled-max-hw-stride-32bpp-rotate-0
+kms_big_fb@yf-tiled-max-hw-stride-32bpp-rotate-0-async-flip
+kms_big_fb@yf-tiled-max-hw-stride-32bpp-rotate-180
+kms_big_fb@yf-tiled-max-hw-stride-32bpp-rotate-180-async-flip
+kms_big_fb@yf-tiled-max-hw-stride-64bpp-rotate-0
+kms_big_fb@yf-tiled-max-hw-stride-64bpp-rotate-0-async-flip
+kms_big_fb@yf-tiled-max-hw-stride-64bpp-rotate-180
+kms_big_fb@yf-tiled-max-hw-stride-64bpp-rotate-180-async-flip
+kms_big_fb@yf-tiled-max-hw-stride-32bpp-rotate-0-hflip
+kms_big_fb@yf-tiled-max-hw-stride-32bpp-rotate-0-hflip-async-flip
+kms_big_fb@yf-tiled-max-hw-stride-32bpp-rotate-180-hflip
+kms_big_fb@yf-tiled-max-hw-stride-32bpp-rotate-180-hflip-async-flip
+kms_big_fb@yf-tiled-max-hw-stride-64bpp-rotate-0-hflip
+kms_big_fb@yf-tiled-max-hw-stride-64bpp-rotate-0-hflip-async-flip
+kms_big_fb@yf-tiled-max-hw-stride-64bpp-rotate-180-hflip
+kms_big_fb@yf-tiled-max-hw-stride-64bpp-rotate-180-hflip-async-flip
+kms_big_fb@4-tiled-max-hw-stride-32bpp-rotate-0
+kms_big_fb@4-tiled-max-hw-stride-32bpp-rotate-0-async-flip
+kms_big_fb@4-tiled-max-hw-stride-32bpp-rotate-180
+kms_big_fb@4-tiled-max-hw-stride-32bpp-rotate-180-async-flip
+kms_big_fb@4-tiled-max-hw-stride-64bpp-rotate-0
+kms_big_fb@4-tiled-max-hw-stride-64bpp-rotate-0-async-flip
+kms_big_fb@4-tiled-max-hw-stride-64bpp-rotate-180
+kms_big_fb@4-tiled-max-hw-stride-64bpp-rotate-180-async-flip
+kms_big_fb@4-tiled-max-hw-stride-32bpp-rotate-0-hflip
+kms_big_fb@4-tiled-max-hw-stride-32bpp-rotate-0-hflip-async-flip
+kms_big_fb@4-tiled-max-hw-stride-32bpp-rotate-180-hflip
+kms_big_fb@4-tiled-max-hw-stride-32bpp-rotate-180-hflip-async-flip
+kms_big_fb@4-tiled-max-hw-stride-64bpp-rotate-0-hflip
+kms_big_fb@4-tiled-max-hw-stride-64bpp-rotate-0-hflip-async-flip
+kms_big_fb@4-tiled-max-hw-stride-64bpp-rotate-180-hflip
+kms_big_fb@4-tiled-max-hw-stride-64bpp-rotate-180-hflip-async-flip
+kms_big_joiner@basic
+kms_big_joiner@invalid-modeset
+kms_big_joiner@2x-modeset
+kms_busy@basic
+kms_busy@basic-hang
+kms_busy@extended-pageflip-modeset-hang-oldfb
+kms_busy@extended-pageflip-hang-oldfb
+kms_busy@extended-pageflip-hang-newfb
+kms_busy@extended-modeset-hang-oldfb
+kms_busy@extended-modeset-hang-newfb
+kms_busy@extended-modeset-hang-oldfb-with-reset
+kms_busy@extended-modeset-hang-newfb-with-reset
+kms_bw@linear-tiling-1-displays-1920x1080p
+kms_bw@linear-tiling-1-displays-2560x1440p
+kms_bw@linear-tiling-1-displays-3840x2160p
+kms_bw@linear-tiling-2-displays-1920x1080p
+kms_bw@linear-tiling-2-displays-2560x1440p
+kms_bw@linear-tiling-2-displays-3840x2160p
+kms_bw@linear-tiling-3-displays-1920x1080p
+kms_bw@linear-tiling-3-displays-2560x1440p
+kms_bw@linear-tiling-3-displays-3840x2160p
+kms_bw@linear-tiling-4-displays-1920x1080p
+kms_bw@linear-tiling-4-displays-2560x1440p
+kms_bw@linear-tiling-4-displays-3840x2160p
+kms_bw@linear-tiling-5-displays-1920x1080p
+kms_bw@linear-tiling-5-displays-2560x1440p
+kms_bw@linear-tiling-5-displays-3840x2160p
+kms_bw@linear-tiling-6-displays-1920x1080p
+kms_bw@linear-tiling-6-displays-2560x1440p
+kms_bw@linear-tiling-6-displays-3840x2160p
+kms_bw@linear-tiling-7-displays-1920x1080p
+kms_bw@linear-tiling-7-displays-2560x1440p
+kms_bw@linear-tiling-7-displays-3840x2160p
+kms_bw@linear-tiling-8-displays-1920x1080p
+kms_bw@linear-tiling-8-displays-2560x1440p
+kms_bw@linear-tiling-8-displays-3840x2160p
+kms_ccs@pipe-A-bad-pixel-format-y_tiled_ccs
+kms_ccs@pipe-A-bad-pixel-format-yf_tiled_ccs
+kms_ccs@pipe-A-bad-pixel-format-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-A-bad-pixel-format-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-A-bad-pixel-format-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-A-bad-pixel-format-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-A-bad-pixel-format-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-A-bad-pixel-format-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-A-bad-pixel-format-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-A-bad-pixel-format-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-A-bad-pixel-format-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-A-bad-rotation-90-y_tiled_ccs
+kms_ccs@pipe-A-bad-rotation-90-yf_tiled_ccs
+kms_ccs@pipe-A-bad-rotation-90-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-A-bad-rotation-90-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-A-bad-rotation-90-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-A-bad-rotation-90-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-A-bad-rotation-90-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-A-bad-rotation-90-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-A-bad-rotation-90-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-A-bad-rotation-90-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-A-bad-rotation-90-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-A-crc-primary-basic-y_tiled_ccs
+kms_ccs@pipe-A-crc-primary-basic-yf_tiled_ccs
+kms_ccs@pipe-A-crc-primary-basic-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-A-crc-primary-basic-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-A-crc-primary-basic-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-A-crc-primary-basic-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-A-crc-primary-basic-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-A-crc-primary-basic-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-A-crc-primary-basic-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-A-crc-primary-basic-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-A-crc-primary-basic-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-A-crc-primary-rotation-180-y_tiled_ccs
+kms_ccs@pipe-A-crc-primary-rotation-180-yf_tiled_ccs
+kms_ccs@pipe-A-crc-primary-rotation-180-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-A-crc-primary-rotation-180-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-A-crc-primary-rotation-180-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-A-crc-primary-rotation-180-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-A-crc-primary-rotation-180-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-A-crc-primary-rotation-180-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-A-crc-primary-rotation-180-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-A-crc-primary-rotation-180-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-A-crc-primary-rotation-180-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-A-random-ccs-data-y_tiled_ccs
+kms_ccs@pipe-A-random-ccs-data-yf_tiled_ccs
+kms_ccs@pipe-A-random-ccs-data-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-A-random-ccs-data-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-A-random-ccs-data-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-A-random-ccs-data-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-A-random-ccs-data-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-A-random-ccs-data-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-A-random-ccs-data-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-A-random-ccs-data-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-A-random-ccs-data-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-A-missing-ccs-buffer-y_tiled_ccs
+kms_ccs@pipe-A-missing-ccs-buffer-yf_tiled_ccs
+kms_ccs@pipe-A-missing-ccs-buffer-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-A-missing-ccs-buffer-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-A-missing-ccs-buffer-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-A-missing-ccs-buffer-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-A-missing-ccs-buffer-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-A-missing-ccs-buffer-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-A-ccs-on-another-bo-y_tiled_ccs
+kms_ccs@pipe-A-ccs-on-another-bo-yf_tiled_ccs
+kms_ccs@pipe-A-ccs-on-another-bo-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-A-ccs-on-another-bo-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-A-ccs-on-another-bo-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-A-ccs-on-another-bo-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-A-ccs-on-another-bo-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-A-ccs-on-another-bo-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-A-bad-aux-stride-y_tiled_ccs
+kms_ccs@pipe-A-bad-aux-stride-yf_tiled_ccs
+kms_ccs@pipe-A-bad-aux-stride-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-A-bad-aux-stride-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-A-bad-aux-stride-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-A-bad-aux-stride-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-A-bad-aux-stride-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-A-bad-aux-stride-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-A-crc-sprite-planes-basic-y_tiled_ccs
+kms_ccs@pipe-A-crc-sprite-planes-basic-yf_tiled_ccs
+kms_ccs@pipe-A-crc-sprite-planes-basic-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-A-crc-sprite-planes-basic-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-A-crc-sprite-planes-basic-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-A-crc-sprite-planes-basic-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-A-crc-sprite-planes-basic-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-A-crc-sprite-planes-basic-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-A-crc-sprite-planes-basic-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-A-crc-sprite-planes-basic-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-A-crc-sprite-planes-basic-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-B-bad-pixel-format-y_tiled_ccs
+kms_ccs@pipe-B-bad-pixel-format-yf_tiled_ccs
+kms_ccs@pipe-B-bad-pixel-format-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-B-bad-pixel-format-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-B-bad-pixel-format-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-B-bad-pixel-format-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-B-bad-pixel-format-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-B-bad-pixel-format-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-B-bad-pixel-format-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-B-bad-pixel-format-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-B-bad-pixel-format-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-B-bad-rotation-90-y_tiled_ccs
+kms_ccs@pipe-B-bad-rotation-90-yf_tiled_ccs
+kms_ccs@pipe-B-bad-rotation-90-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-B-bad-rotation-90-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-B-bad-rotation-90-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-B-bad-rotation-90-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-B-bad-rotation-90-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-B-bad-rotation-90-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-B-bad-rotation-90-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-B-bad-rotation-90-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-B-bad-rotation-90-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-B-crc-primary-basic-y_tiled_ccs
+kms_ccs@pipe-B-crc-primary-basic-yf_tiled_ccs
+kms_ccs@pipe-B-crc-primary-basic-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-B-crc-primary-basic-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-B-crc-primary-basic-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-B-crc-primary-basic-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-B-crc-primary-basic-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-B-crc-primary-basic-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-B-crc-primary-basic-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-B-crc-primary-basic-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-B-crc-primary-basic-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-B-crc-primary-rotation-180-y_tiled_ccs
+kms_ccs@pipe-B-crc-primary-rotation-180-yf_tiled_ccs
+kms_ccs@pipe-B-crc-primary-rotation-180-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-B-crc-primary-rotation-180-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-B-crc-primary-rotation-180-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-B-crc-primary-rotation-180-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-B-crc-primary-rotation-180-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-B-crc-primary-rotation-180-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-B-crc-primary-rotation-180-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-B-crc-primary-rotation-180-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-B-crc-primary-rotation-180-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-B-random-ccs-data-y_tiled_ccs
+kms_ccs@pipe-B-random-ccs-data-yf_tiled_ccs
+kms_ccs@pipe-B-random-ccs-data-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-B-random-ccs-data-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-B-random-ccs-data-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-B-random-ccs-data-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-B-random-ccs-data-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-B-random-ccs-data-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-B-random-ccs-data-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-B-random-ccs-data-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-B-random-ccs-data-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-B-missing-ccs-buffer-y_tiled_ccs
+kms_ccs@pipe-B-missing-ccs-buffer-yf_tiled_ccs
+kms_ccs@pipe-B-missing-ccs-buffer-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-B-missing-ccs-buffer-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-B-missing-ccs-buffer-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-B-missing-ccs-buffer-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-B-missing-ccs-buffer-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-B-missing-ccs-buffer-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-B-ccs-on-another-bo-y_tiled_ccs
+kms_ccs@pipe-B-ccs-on-another-bo-yf_tiled_ccs
+kms_ccs@pipe-B-ccs-on-another-bo-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-B-ccs-on-another-bo-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-B-ccs-on-another-bo-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-B-ccs-on-another-bo-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-B-ccs-on-another-bo-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-B-ccs-on-another-bo-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-B-bad-aux-stride-y_tiled_ccs
+kms_ccs@pipe-B-bad-aux-stride-yf_tiled_ccs
+kms_ccs@pipe-B-bad-aux-stride-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-B-bad-aux-stride-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-B-bad-aux-stride-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-B-bad-aux-stride-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-B-bad-aux-stride-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-B-bad-aux-stride-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-B-crc-sprite-planes-basic-y_tiled_ccs
+kms_ccs@pipe-B-crc-sprite-planes-basic-yf_tiled_ccs
+kms_ccs@pipe-B-crc-sprite-planes-basic-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-B-crc-sprite-planes-basic-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-B-crc-sprite-planes-basic-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-B-crc-sprite-planes-basic-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-B-crc-sprite-planes-basic-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-B-crc-sprite-planes-basic-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-B-crc-sprite-planes-basic-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-B-crc-sprite-planes-basic-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-B-crc-sprite-planes-basic-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-C-bad-pixel-format-y_tiled_ccs
+kms_ccs@pipe-C-bad-pixel-format-yf_tiled_ccs
+kms_ccs@pipe-C-bad-pixel-format-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-C-bad-pixel-format-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-C-bad-pixel-format-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-C-bad-pixel-format-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-C-bad-pixel-format-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-C-bad-pixel-format-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-C-bad-pixel-format-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-C-bad-pixel-format-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-C-bad-pixel-format-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-C-bad-rotation-90-y_tiled_ccs
+kms_ccs@pipe-C-bad-rotation-90-yf_tiled_ccs
+kms_ccs@pipe-C-bad-rotation-90-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-C-bad-rotation-90-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-C-bad-rotation-90-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-C-bad-rotation-90-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-C-bad-rotation-90-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-C-bad-rotation-90-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-C-bad-rotation-90-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-C-bad-rotation-90-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-C-bad-rotation-90-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-C-crc-primary-basic-y_tiled_ccs
+kms_ccs@pipe-C-crc-primary-basic-yf_tiled_ccs
+kms_ccs@pipe-C-crc-primary-basic-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-C-crc-primary-basic-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-C-crc-primary-basic-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-C-crc-primary-basic-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-C-crc-primary-basic-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-C-crc-primary-basic-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-C-crc-primary-basic-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-C-crc-primary-basic-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-C-crc-primary-basic-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-C-crc-primary-rotation-180-y_tiled_ccs
+kms_ccs@pipe-C-crc-primary-rotation-180-yf_tiled_ccs
+kms_ccs@pipe-C-crc-primary-rotation-180-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-C-crc-primary-rotation-180-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-C-crc-primary-rotation-180-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-C-crc-primary-rotation-180-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-C-crc-primary-rotation-180-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-C-crc-primary-rotation-180-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-C-crc-primary-rotation-180-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-C-crc-primary-rotation-180-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-C-crc-primary-rotation-180-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-C-random-ccs-data-y_tiled_ccs
+kms_ccs@pipe-C-random-ccs-data-yf_tiled_ccs
+kms_ccs@pipe-C-random-ccs-data-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-C-random-ccs-data-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-C-random-ccs-data-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-C-random-ccs-data-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-C-random-ccs-data-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-C-random-ccs-data-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-C-random-ccs-data-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-C-random-ccs-data-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-C-random-ccs-data-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-C-missing-ccs-buffer-y_tiled_ccs
+kms_ccs@pipe-C-missing-ccs-buffer-yf_tiled_ccs
+kms_ccs@pipe-C-missing-ccs-buffer-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-C-missing-ccs-buffer-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-C-missing-ccs-buffer-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-C-missing-ccs-buffer-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-C-missing-ccs-buffer-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-C-missing-ccs-buffer-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-C-ccs-on-another-bo-y_tiled_ccs
+kms_ccs@pipe-C-ccs-on-another-bo-yf_tiled_ccs
+kms_ccs@pipe-C-ccs-on-another-bo-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-C-ccs-on-another-bo-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-C-ccs-on-another-bo-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-C-ccs-on-another-bo-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-C-ccs-on-another-bo-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-C-ccs-on-another-bo-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-C-bad-aux-stride-y_tiled_ccs
+kms_ccs@pipe-C-bad-aux-stride-yf_tiled_ccs
+kms_ccs@pipe-C-bad-aux-stride-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-C-bad-aux-stride-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-C-bad-aux-stride-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-C-bad-aux-stride-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-C-bad-aux-stride-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-C-bad-aux-stride-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-C-crc-sprite-planes-basic-y_tiled_ccs
+kms_ccs@pipe-C-crc-sprite-planes-basic-yf_tiled_ccs
+kms_ccs@pipe-C-crc-sprite-planes-basic-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-C-crc-sprite-planes-basic-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-C-crc-sprite-planes-basic-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-C-crc-sprite-planes-basic-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-C-crc-sprite-planes-basic-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-C-crc-sprite-planes-basic-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-C-crc-sprite-planes-basic-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-C-crc-sprite-planes-basic-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-C-crc-sprite-planes-basic-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-D-bad-pixel-format-y_tiled_ccs
+kms_ccs@pipe-D-bad-pixel-format-yf_tiled_ccs
+kms_ccs@pipe-D-bad-pixel-format-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-D-bad-pixel-format-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-D-bad-pixel-format-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-D-bad-pixel-format-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-D-bad-pixel-format-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-D-bad-pixel-format-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-D-bad-pixel-format-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-D-bad-pixel-format-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-D-bad-pixel-format-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-D-bad-rotation-90-y_tiled_ccs
+kms_ccs@pipe-D-bad-rotation-90-yf_tiled_ccs
+kms_ccs@pipe-D-bad-rotation-90-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-D-bad-rotation-90-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-D-bad-rotation-90-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-D-bad-rotation-90-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-D-bad-rotation-90-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-D-bad-rotation-90-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-D-bad-rotation-90-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-D-bad-rotation-90-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-D-bad-rotation-90-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-D-crc-primary-basic-y_tiled_ccs
+kms_ccs@pipe-D-crc-primary-basic-yf_tiled_ccs
+kms_ccs@pipe-D-crc-primary-basic-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-D-crc-primary-basic-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-D-crc-primary-basic-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-D-crc-primary-basic-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-D-crc-primary-basic-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-D-crc-primary-basic-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-D-crc-primary-basic-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-D-crc-primary-basic-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-D-crc-primary-basic-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-D-crc-primary-rotation-180-y_tiled_ccs
+kms_ccs@pipe-D-crc-primary-rotation-180-yf_tiled_ccs
+kms_ccs@pipe-D-crc-primary-rotation-180-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-D-crc-primary-rotation-180-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-D-crc-primary-rotation-180-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-D-crc-primary-rotation-180-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-D-crc-primary-rotation-180-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-D-crc-primary-rotation-180-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-D-crc-primary-rotation-180-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-D-crc-primary-rotation-180-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-D-crc-primary-rotation-180-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-D-random-ccs-data-y_tiled_ccs
+kms_ccs@pipe-D-random-ccs-data-yf_tiled_ccs
+kms_ccs@pipe-D-random-ccs-data-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-D-random-ccs-data-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-D-random-ccs-data-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-D-random-ccs-data-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-D-random-ccs-data-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-D-random-ccs-data-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-D-random-ccs-data-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-D-random-ccs-data-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-D-random-ccs-data-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-D-missing-ccs-buffer-y_tiled_ccs
+kms_ccs@pipe-D-missing-ccs-buffer-yf_tiled_ccs
+kms_ccs@pipe-D-missing-ccs-buffer-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-D-missing-ccs-buffer-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-D-missing-ccs-buffer-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-D-missing-ccs-buffer-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-D-missing-ccs-buffer-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-D-missing-ccs-buffer-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-D-ccs-on-another-bo-y_tiled_ccs
+kms_ccs@pipe-D-ccs-on-another-bo-yf_tiled_ccs
+kms_ccs@pipe-D-ccs-on-another-bo-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-D-ccs-on-another-bo-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-D-ccs-on-another-bo-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-D-ccs-on-another-bo-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-D-ccs-on-another-bo-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-D-ccs-on-another-bo-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-D-bad-aux-stride-y_tiled_ccs
+kms_ccs@pipe-D-bad-aux-stride-yf_tiled_ccs
+kms_ccs@pipe-D-bad-aux-stride-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-D-bad-aux-stride-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-D-bad-aux-stride-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-D-bad-aux-stride-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-D-bad-aux-stride-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-D-bad-aux-stride-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-D-crc-sprite-planes-basic-y_tiled_ccs
+kms_ccs@pipe-D-crc-sprite-planes-basic-yf_tiled_ccs
+kms_ccs@pipe-D-crc-sprite-planes-basic-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-D-crc-sprite-planes-basic-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-D-crc-sprite-planes-basic-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-D-crc-sprite-planes-basic-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-D-crc-sprite-planes-basic-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-D-crc-sprite-planes-basic-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-D-crc-sprite-planes-basic-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-D-crc-sprite-planes-basic-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-D-crc-sprite-planes-basic-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-E-bad-pixel-format-y_tiled_ccs
+kms_ccs@pipe-E-bad-pixel-format-yf_tiled_ccs
+kms_ccs@pipe-E-bad-pixel-format-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-E-bad-pixel-format-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-E-bad-pixel-format-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-E-bad-pixel-format-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-E-bad-pixel-format-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-E-bad-pixel-format-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-E-bad-pixel-format-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-E-bad-pixel-format-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-E-bad-pixel-format-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-E-bad-rotation-90-y_tiled_ccs
+kms_ccs@pipe-E-bad-rotation-90-yf_tiled_ccs
+kms_ccs@pipe-E-bad-rotation-90-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-E-bad-rotation-90-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-E-bad-rotation-90-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-E-bad-rotation-90-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-E-bad-rotation-90-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-E-bad-rotation-90-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-E-bad-rotation-90-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-E-bad-rotation-90-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-E-bad-rotation-90-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-E-crc-primary-basic-y_tiled_ccs
+kms_ccs@pipe-E-crc-primary-basic-yf_tiled_ccs
+kms_ccs@pipe-E-crc-primary-basic-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-E-crc-primary-basic-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-E-crc-primary-basic-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-E-crc-primary-basic-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-E-crc-primary-basic-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-E-crc-primary-basic-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-E-crc-primary-basic-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-E-crc-primary-basic-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-E-crc-primary-basic-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-E-crc-primary-rotation-180-y_tiled_ccs
+kms_ccs@pipe-E-crc-primary-rotation-180-yf_tiled_ccs
+kms_ccs@pipe-E-crc-primary-rotation-180-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-E-crc-primary-rotation-180-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-E-crc-primary-rotation-180-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-E-crc-primary-rotation-180-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-E-crc-primary-rotation-180-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-E-crc-primary-rotation-180-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-E-crc-primary-rotation-180-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-E-crc-primary-rotation-180-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-E-crc-primary-rotation-180-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-E-random-ccs-data-y_tiled_ccs
+kms_ccs@pipe-E-random-ccs-data-yf_tiled_ccs
+kms_ccs@pipe-E-random-ccs-data-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-E-random-ccs-data-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-E-random-ccs-data-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-E-random-ccs-data-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-E-random-ccs-data-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-E-random-ccs-data-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-E-random-ccs-data-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-E-random-ccs-data-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-E-random-ccs-data-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-E-missing-ccs-buffer-y_tiled_ccs
+kms_ccs@pipe-E-missing-ccs-buffer-yf_tiled_ccs
+kms_ccs@pipe-E-missing-ccs-buffer-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-E-missing-ccs-buffer-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-E-missing-ccs-buffer-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-E-missing-ccs-buffer-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-E-missing-ccs-buffer-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-E-missing-ccs-buffer-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-E-ccs-on-another-bo-y_tiled_ccs
+kms_ccs@pipe-E-ccs-on-another-bo-yf_tiled_ccs
+kms_ccs@pipe-E-ccs-on-another-bo-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-E-ccs-on-another-bo-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-E-ccs-on-another-bo-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-E-ccs-on-another-bo-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-E-ccs-on-another-bo-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-E-ccs-on-another-bo-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-E-bad-aux-stride-y_tiled_ccs
+kms_ccs@pipe-E-bad-aux-stride-yf_tiled_ccs
+kms_ccs@pipe-E-bad-aux-stride-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-E-bad-aux-stride-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-E-bad-aux-stride-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-E-bad-aux-stride-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-E-bad-aux-stride-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-E-bad-aux-stride-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-E-crc-sprite-planes-basic-y_tiled_ccs
+kms_ccs@pipe-E-crc-sprite-planes-basic-yf_tiled_ccs
+kms_ccs@pipe-E-crc-sprite-planes-basic-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-E-crc-sprite-planes-basic-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-E-crc-sprite-planes-basic-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-E-crc-sprite-planes-basic-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-E-crc-sprite-planes-basic-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-E-crc-sprite-planes-basic-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-E-crc-sprite-planes-basic-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-E-crc-sprite-planes-basic-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-E-crc-sprite-planes-basic-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-F-bad-pixel-format-y_tiled_ccs
+kms_ccs@pipe-F-bad-pixel-format-yf_tiled_ccs
+kms_ccs@pipe-F-bad-pixel-format-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-F-bad-pixel-format-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-F-bad-pixel-format-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-F-bad-pixel-format-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-F-bad-pixel-format-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-F-bad-pixel-format-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-F-bad-pixel-format-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-F-bad-pixel-format-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-F-bad-pixel-format-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-F-bad-rotation-90-y_tiled_ccs
+kms_ccs@pipe-F-bad-rotation-90-yf_tiled_ccs
+kms_ccs@pipe-F-bad-rotation-90-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-F-bad-rotation-90-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-F-bad-rotation-90-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-F-bad-rotation-90-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-F-bad-rotation-90-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-F-bad-rotation-90-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-F-bad-rotation-90-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-F-bad-rotation-90-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-F-bad-rotation-90-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-F-crc-primary-basic-y_tiled_ccs
+kms_ccs@pipe-F-crc-primary-basic-yf_tiled_ccs
+kms_ccs@pipe-F-crc-primary-basic-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-F-crc-primary-basic-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-F-crc-primary-basic-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-F-crc-primary-basic-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-F-crc-primary-basic-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-F-crc-primary-basic-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-F-crc-primary-basic-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-F-crc-primary-basic-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-F-crc-primary-basic-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-F-crc-primary-rotation-180-y_tiled_ccs
+kms_ccs@pipe-F-crc-primary-rotation-180-yf_tiled_ccs
+kms_ccs@pipe-F-crc-primary-rotation-180-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-F-crc-primary-rotation-180-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-F-crc-primary-rotation-180-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-F-crc-primary-rotation-180-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-F-crc-primary-rotation-180-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-F-crc-primary-rotation-180-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-F-crc-primary-rotation-180-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-F-crc-primary-rotation-180-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-F-crc-primary-rotation-180-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-F-random-ccs-data-y_tiled_ccs
+kms_ccs@pipe-F-random-ccs-data-yf_tiled_ccs
+kms_ccs@pipe-F-random-ccs-data-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-F-random-ccs-data-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-F-random-ccs-data-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-F-random-ccs-data-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-F-random-ccs-data-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-F-random-ccs-data-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-F-random-ccs-data-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-F-random-ccs-data-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-F-random-ccs-data-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-F-missing-ccs-buffer-y_tiled_ccs
+kms_ccs@pipe-F-missing-ccs-buffer-yf_tiled_ccs
+kms_ccs@pipe-F-missing-ccs-buffer-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-F-missing-ccs-buffer-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-F-missing-ccs-buffer-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-F-missing-ccs-buffer-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-F-missing-ccs-buffer-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-F-missing-ccs-buffer-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-F-ccs-on-another-bo-y_tiled_ccs
+kms_ccs@pipe-F-ccs-on-another-bo-yf_tiled_ccs
+kms_ccs@pipe-F-ccs-on-another-bo-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-F-ccs-on-another-bo-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-F-ccs-on-another-bo-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-F-ccs-on-another-bo-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-F-ccs-on-another-bo-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-F-ccs-on-another-bo-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-F-bad-aux-stride-y_tiled_ccs
+kms_ccs@pipe-F-bad-aux-stride-yf_tiled_ccs
+kms_ccs@pipe-F-bad-aux-stride-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-F-bad-aux-stride-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-F-bad-aux-stride-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-F-bad-aux-stride-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-F-bad-aux-stride-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-F-bad-aux-stride-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-F-crc-sprite-planes-basic-y_tiled_ccs
+kms_ccs@pipe-F-crc-sprite-planes-basic-yf_tiled_ccs
+kms_ccs@pipe-F-crc-sprite-planes-basic-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-F-crc-sprite-planes-basic-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-F-crc-sprite-planes-basic-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-F-crc-sprite-planes-basic-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-F-crc-sprite-planes-basic-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-F-crc-sprite-planes-basic-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-F-crc-sprite-planes-basic-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-F-crc-sprite-planes-basic-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-F-crc-sprite-planes-basic-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-G-bad-pixel-format-y_tiled_ccs
+kms_ccs@pipe-G-bad-pixel-format-yf_tiled_ccs
+kms_ccs@pipe-G-bad-pixel-format-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-G-bad-pixel-format-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-G-bad-pixel-format-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-G-bad-pixel-format-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-G-bad-pixel-format-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-G-bad-pixel-format-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-G-bad-pixel-format-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-G-bad-pixel-format-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-G-bad-pixel-format-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-G-bad-rotation-90-y_tiled_ccs
+kms_ccs@pipe-G-bad-rotation-90-yf_tiled_ccs
+kms_ccs@pipe-G-bad-rotation-90-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-G-bad-rotation-90-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-G-bad-rotation-90-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-G-bad-rotation-90-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-G-bad-rotation-90-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-G-bad-rotation-90-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-G-bad-rotation-90-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-G-bad-rotation-90-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-G-bad-rotation-90-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-G-crc-primary-basic-y_tiled_ccs
+kms_ccs@pipe-G-crc-primary-basic-yf_tiled_ccs
+kms_ccs@pipe-G-crc-primary-basic-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-G-crc-primary-basic-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-G-crc-primary-basic-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-G-crc-primary-basic-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-G-crc-primary-basic-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-G-crc-primary-basic-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-G-crc-primary-basic-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-G-crc-primary-basic-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-G-crc-primary-basic-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-G-crc-primary-rotation-180-y_tiled_ccs
+kms_ccs@pipe-G-crc-primary-rotation-180-yf_tiled_ccs
+kms_ccs@pipe-G-crc-primary-rotation-180-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-G-crc-primary-rotation-180-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-G-crc-primary-rotation-180-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-G-crc-primary-rotation-180-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-G-crc-primary-rotation-180-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-G-crc-primary-rotation-180-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-G-crc-primary-rotation-180-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-G-crc-primary-rotation-180-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-G-crc-primary-rotation-180-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-G-random-ccs-data-y_tiled_ccs
+kms_ccs@pipe-G-random-ccs-data-yf_tiled_ccs
+kms_ccs@pipe-G-random-ccs-data-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-G-random-ccs-data-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-G-random-ccs-data-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-G-random-ccs-data-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-G-random-ccs-data-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-G-random-ccs-data-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-G-random-ccs-data-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-G-random-ccs-data-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-G-random-ccs-data-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-G-missing-ccs-buffer-y_tiled_ccs
+kms_ccs@pipe-G-missing-ccs-buffer-yf_tiled_ccs
+kms_ccs@pipe-G-missing-ccs-buffer-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-G-missing-ccs-buffer-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-G-missing-ccs-buffer-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-G-missing-ccs-buffer-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-G-missing-ccs-buffer-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-G-missing-ccs-buffer-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-G-ccs-on-another-bo-y_tiled_ccs
+kms_ccs@pipe-G-ccs-on-another-bo-yf_tiled_ccs
+kms_ccs@pipe-G-ccs-on-another-bo-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-G-ccs-on-another-bo-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-G-ccs-on-another-bo-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-G-ccs-on-another-bo-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-G-ccs-on-another-bo-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-G-ccs-on-another-bo-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-G-bad-aux-stride-y_tiled_ccs
+kms_ccs@pipe-G-bad-aux-stride-yf_tiled_ccs
+kms_ccs@pipe-G-bad-aux-stride-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-G-bad-aux-stride-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-G-bad-aux-stride-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-G-bad-aux-stride-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-G-bad-aux-stride-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-G-bad-aux-stride-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-G-crc-sprite-planes-basic-y_tiled_ccs
+kms_ccs@pipe-G-crc-sprite-planes-basic-yf_tiled_ccs
+kms_ccs@pipe-G-crc-sprite-planes-basic-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-G-crc-sprite-planes-basic-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-G-crc-sprite-planes-basic-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-G-crc-sprite-planes-basic-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-G-crc-sprite-planes-basic-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-G-crc-sprite-planes-basic-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-G-crc-sprite-planes-basic-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-G-crc-sprite-planes-basic-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-G-crc-sprite-planes-basic-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-H-bad-pixel-format-y_tiled_ccs
+kms_ccs@pipe-H-bad-pixel-format-yf_tiled_ccs
+kms_ccs@pipe-H-bad-pixel-format-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-H-bad-pixel-format-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-H-bad-pixel-format-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-H-bad-pixel-format-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-H-bad-pixel-format-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-H-bad-pixel-format-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-H-bad-pixel-format-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-H-bad-pixel-format-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-H-bad-pixel-format-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-H-bad-rotation-90-y_tiled_ccs
+kms_ccs@pipe-H-bad-rotation-90-yf_tiled_ccs
+kms_ccs@pipe-H-bad-rotation-90-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-H-bad-rotation-90-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-H-bad-rotation-90-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-H-bad-rotation-90-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-H-bad-rotation-90-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-H-bad-rotation-90-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-H-bad-rotation-90-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-H-bad-rotation-90-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-H-bad-rotation-90-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-H-crc-primary-basic-y_tiled_ccs
+kms_ccs@pipe-H-crc-primary-basic-yf_tiled_ccs
+kms_ccs@pipe-H-crc-primary-basic-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-H-crc-primary-basic-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-H-crc-primary-basic-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-H-crc-primary-basic-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-H-crc-primary-basic-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-H-crc-primary-basic-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-H-crc-primary-basic-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-H-crc-primary-basic-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-H-crc-primary-basic-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-H-crc-primary-rotation-180-y_tiled_ccs
+kms_ccs@pipe-H-crc-primary-rotation-180-yf_tiled_ccs
+kms_ccs@pipe-H-crc-primary-rotation-180-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-H-crc-primary-rotation-180-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-H-crc-primary-rotation-180-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-H-crc-primary-rotation-180-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-H-crc-primary-rotation-180-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-H-crc-primary-rotation-180-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-H-crc-primary-rotation-180-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-H-crc-primary-rotation-180-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-H-crc-primary-rotation-180-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-H-random-ccs-data-y_tiled_ccs
+kms_ccs@pipe-H-random-ccs-data-yf_tiled_ccs
+kms_ccs@pipe-H-random-ccs-data-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-H-random-ccs-data-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-H-random-ccs-data-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-H-random-ccs-data-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-H-random-ccs-data-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-H-random-ccs-data-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-H-random-ccs-data-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-H-random-ccs-data-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-H-random-ccs-data-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-H-missing-ccs-buffer-y_tiled_ccs
+kms_ccs@pipe-H-missing-ccs-buffer-yf_tiled_ccs
+kms_ccs@pipe-H-missing-ccs-buffer-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-H-missing-ccs-buffer-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-H-missing-ccs-buffer-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-H-missing-ccs-buffer-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-H-missing-ccs-buffer-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-H-missing-ccs-buffer-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-H-ccs-on-another-bo-y_tiled_ccs
+kms_ccs@pipe-H-ccs-on-another-bo-yf_tiled_ccs
+kms_ccs@pipe-H-ccs-on-another-bo-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-H-ccs-on-another-bo-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-H-ccs-on-another-bo-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-H-ccs-on-another-bo-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-H-ccs-on-another-bo-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-H-ccs-on-another-bo-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-H-bad-aux-stride-y_tiled_ccs
+kms_ccs@pipe-H-bad-aux-stride-yf_tiled_ccs
+kms_ccs@pipe-H-bad-aux-stride-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-H-bad-aux-stride-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-H-bad-aux-stride-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-H-bad-aux-stride-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-H-bad-aux-stride-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-H-bad-aux-stride-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-H-crc-sprite-planes-basic-y_tiled_ccs
+kms_ccs@pipe-H-crc-sprite-planes-basic-yf_tiled_ccs
+kms_ccs@pipe-H-crc-sprite-planes-basic-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-H-crc-sprite-planes-basic-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-H-crc-sprite-planes-basic-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-H-crc-sprite-planes-basic-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-H-crc-sprite-planes-basic-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-H-crc-sprite-planes-basic-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-H-crc-sprite-planes-basic-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-H-crc-sprite-planes-basic-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-H-crc-sprite-planes-basic-4_tiled_mtl_rc_ccs_cc
+kms_cdclk@plane-scaling
+kms_cdclk@mode-transition
+kms_cdclk@mode-transition-all-outputs
+kms_color@degamma
+kms_color@gamma
+kms_color@legacy-gamma
+kms_color@legacy-gamma-reset
+kms_color@ctm-red-to-blue
+kms_color@ctm-green-to-red
+kms_color@ctm-blue-to-red
+kms_color@ctm-max
+kms_color@ctm-negative
+kms_color@ctm-0-25
+kms_color@ctm-0-50
+kms_color@ctm-0-75
+kms_color@ctm-signed
+kms_color@deep-color
+kms_color@invalid-gamma-lut-sizes
+kms_color@invalid-degamma-lut-sizes
+kms_color@invalid-ctm-matrix-sizes
+kms_concurrent@pipe-A
+kms_concurrent@pipe-B
+kms_concurrent@pipe-C
+kms_concurrent@pipe-D
+kms_concurrent@pipe-E
+kms_concurrent@pipe-F
+kms_concurrent@pipe-G
+kms_concurrent@pipe-H
+kms_content_protection@legacy
+kms_content_protection@atomic
+kms_content_protection@atomic-dpms
+kms_content_protection@LIC
+kms_content_protection@type1
+kms_content_protection@mei_interface
+kms_content_protection@content_type_change
+kms_content_protection@uevent
+kms_content_protection@srm
+kms_content_protection@dp-mst-type-0
+kms_content_protection@dp-mst-lic-type-0
+kms_content_protection@dp-mst-type-1
+kms_content_protection@dp-mst-lic-type-1
+kms_cursor_crc@cursor-size-change
+kms_cursor_crc@cursor-alpha-opaque
+kms_cursor_crc@cursor-alpha-transparent
+kms_cursor_crc@cursor-dpms
+kms_cursor_crc@cursor-suspend
+kms_cursor_crc@cursor-onscreen-32x32
+kms_cursor_crc@cursor-offscreen-32x32
+kms_cursor_crc@cursor-sliding-32x32
+kms_cursor_crc@cursor-random-32x32
+kms_cursor_crc@cursor-rapid-movement-32x32
+kms_cursor_crc@cursor-onscreen-32x10
+kms_cursor_crc@cursor-offscreen-32x10
+kms_cursor_crc@cursor-sliding-32x10
+kms_cursor_crc@cursor-random-32x10
+kms_cursor_crc@cursor-rapid-movement-32x10
+kms_cursor_crc@cursor-onscreen-64x64
+kms_cursor_crc@cursor-offscreen-64x64
+kms_cursor_crc@cursor-sliding-64x64
+kms_cursor_crc@cursor-random-64x64
+kms_cursor_crc@cursor-rapid-movement-64x64
+kms_cursor_crc@cursor-onscreen-64x21
+kms_cursor_crc@cursor-offscreen-64x21
+kms_cursor_crc@cursor-sliding-64x21
+kms_cursor_crc@cursor-random-64x21
+kms_cursor_crc@cursor-rapid-movement-64x21
+kms_cursor_crc@cursor-onscreen-128x128
+kms_cursor_crc@cursor-offscreen-128x128
+kms_cursor_crc@cursor-sliding-128x128
+kms_cursor_crc@cursor-random-128x128
+kms_cursor_crc@cursor-rapid-movement-128x128
+kms_cursor_crc@cursor-onscreen-128x42
+kms_cursor_crc@cursor-offscreen-128x42
+kms_cursor_crc@cursor-sliding-128x42
+kms_cursor_crc@cursor-random-128x42
+kms_cursor_crc@cursor-rapid-movement-128x42
+kms_cursor_crc@cursor-onscreen-256x256
+kms_cursor_crc@cursor-offscreen-256x256
+kms_cursor_crc@cursor-sliding-256x256
+kms_cursor_crc@cursor-random-256x256
+kms_cursor_crc@cursor-rapid-movement-256x256
+kms_cursor_crc@cursor-onscreen-256x85
+kms_cursor_crc@cursor-offscreen-256x85
+kms_cursor_crc@cursor-sliding-256x85
+kms_cursor_crc@cursor-random-256x85
+kms_cursor_crc@cursor-rapid-movement-256x85
+kms_cursor_crc@cursor-onscreen-512x512
+kms_cursor_crc@cursor-offscreen-512x512
+kms_cursor_crc@cursor-sliding-512x512
+kms_cursor_crc@cursor-random-512x512
+kms_cursor_crc@cursor-rapid-movement-512x512
+kms_cursor_crc@cursor-onscreen-512x170
+kms_cursor_crc@cursor-offscreen-512x170
+kms_cursor_crc@cursor-sliding-512x170
+kms_cursor_crc@cursor-random-512x170
+kms_cursor_crc@cursor-rapid-movement-512x170
+kms_cursor_crc@cursor-onscreen-max-size
+kms_cursor_crc@cursor-offscreen-max-size
+kms_cursor_crc@cursor-sliding-max-size
+kms_cursor_crc@cursor-random-max-size
+kms_cursor_crc@cursor-rapid-movement-max-size
+kms_cursor_legacy@single-bo
+kms_cursor_legacy@single-move
+kms_cursor_legacy@forked-bo
+kms_cursor_legacy@forked-move
+kms_cursor_legacy@torture-bo
+kms_cursor_legacy@torture-move
+kms_cursor_legacy@nonblocking-modeset-vs-cursor-atomic
+kms_cursor_legacy@long-nonblocking-modeset-vs-cursor-atomic
+kms_cursor_legacy@2x-flip-vs-cursor-legacy
+kms_cursor_legacy@2x-flip-vs-cursor-atomic
+kms_cursor_legacy@2x-long-flip-vs-cursor-legacy
+kms_cursor_legacy@2x-long-flip-vs-cursor-atomic
+kms_cursor_legacy@2x-nonblocking-modeset-vs-cursor-atomic
+kms_cursor_legacy@2x-long-nonblocking-modeset-vs-cursor-atomic
+kms_cursor_legacy@2x-cursor-vs-flip-legacy
+kms_cursor_legacy@2x-long-cursor-vs-flip-legacy
+kms_cursor_legacy@2x-cursor-vs-flip-atomic
+kms_cursor_legacy@2x-long-cursor-vs-flip-atomic
+kms_cursor_legacy@flip-vs-cursor-crc-legacy
+kms_cursor_legacy@flip-vs-cursor-crc-atomic
+kms_cursor_legacy@flip-vs-cursor-busy-crc-legacy
+kms_cursor_legacy@flip-vs-cursor-busy-crc-atomic
+kms_cursor_legacy@basic-flip-before-cursor-legacy
+kms_cursor_legacy@basic-busy-flip-before-cursor-legacy
+kms_cursor_legacy@basic-flip-after-cursor-legacy
+kms_cursor_legacy@basic-flip-before-cursor-varying-size
+kms_cursor_legacy@basic-busy-flip-before-cursor-varying-size
+kms_cursor_legacy@basic-flip-after-cursor-varying-size
+kms_cursor_legacy@short-flip-before-cursor-toggle
+kms_cursor_legacy@short-busy-flip-before-cursor-toggle
+kms_cursor_legacy@short-flip-after-cursor-toggle
+kms_cursor_legacy@basic-flip-before-cursor-atomic
+kms_cursor_legacy@basic-busy-flip-before-cursor-atomic
+kms_cursor_legacy@basic-flip-after-cursor-atomic
+kms_cursor_legacy@short-flip-before-cursor-atomic-transitions
+kms_cursor_legacy@short-busy-flip-before-cursor-atomic-transitions
+kms_cursor_legacy@short-flip-after-cursor-atomic-transitions
+kms_cursor_legacy@short-flip-before-cursor-atomic-transitions-varying-size
+kms_cursor_legacy@short-busy-flip-before-cursor-atomic-transitions-varying-size
+kms_cursor_legacy@short-flip-after-cursor-atomic-transitions-varying-size
+kms_cursor_legacy@cursor-vs-flip-legacy
+kms_cursor_legacy@flip-vs-cursor-legacy
+kms_cursor_legacy@cursorA-vs-flipA-legacy
+kms_cursor_legacy@cursorA-vs-flipB-legacy
+kms_cursor_legacy@cursorB-vs-flipA-legacy
+kms_cursor_legacy@cursorB-vs-flipB-legacy
+kms_cursor_legacy@cursor-vs-flip-varying-size
+kms_cursor_legacy@flip-vs-cursor-varying-size
+kms_cursor_legacy@cursorA-vs-flipA-varying-size
+kms_cursor_legacy@cursorA-vs-flipB-varying-size
+kms_cursor_legacy@cursorB-vs-flipA-varying-size
+kms_cursor_legacy@cursorB-vs-flipB-varying-size
+kms_cursor_legacy@cursor-vs-flip-toggle
+kms_cursor_legacy@flip-vs-cursor-toggle
+kms_cursor_legacy@cursorA-vs-flipA-toggle
+kms_cursor_legacy@cursorA-vs-flipB-toggle
+kms_cursor_legacy@cursorB-vs-flipA-toggle
+kms_cursor_legacy@cursorB-vs-flipB-toggle
+kms_cursor_legacy@cursor-vs-flip-atomic
+kms_cursor_legacy@flip-vs-cursor-atomic
+kms_cursor_legacy@cursorA-vs-flipA-atomic
+kms_cursor_legacy@cursorA-vs-flipB-atomic
+kms_cursor_legacy@cursorB-vs-flipA-atomic
+kms_cursor_legacy@cursorB-vs-flipB-atomic
+kms_cursor_legacy@cursor-vs-flip-atomic-transitions
+kms_cursor_legacy@flip-vs-cursor-atomic-transitions
+kms_cursor_legacy@cursorA-vs-flipA-atomic-transitions
+kms_cursor_legacy@cursorA-vs-flipB-atomic-transitions
+kms_cursor_legacy@cursorB-vs-flipA-atomic-transitions
+kms_cursor_legacy@cursorB-vs-flipB-atomic-transitions
+kms_cursor_legacy@cursor-vs-flip-atomic-transitions-varying-size
+kms_cursor_legacy@flip-vs-cursor-atomic-transitions-varying-size
+kms_cursor_legacy@cursorA-vs-flipA-atomic-transitions-varying-size
+kms_cursor_legacy@cursorA-vs-flipB-atomic-transitions-varying-size
+kms_cursor_legacy@cursorB-vs-flipA-atomic-transitions-varying-size
+kms_cursor_legacy@cursorB-vs-flipB-atomic-transitions-varying-size
+kms_dither@FB-8BPC-Vs-Panel-6BPC
+kms_dither@FB-8BPC-Vs-Panel-8BPC
+kms_dp_aux_dev
+kms_tiled_display@basic-test-pattern
+kms_tiled_display@basic-test-pattern-with-chamelium
+kms_draw_crc@draw-method-mmap-cpu
+kms_draw_crc@draw-method-mmap-gtt
+kms_draw_crc@draw-method-mmap-wc
+kms_draw_crc@draw-method-pwrite
+kms_draw_crc@draw-method-blt
+kms_draw_crc@draw-method-render
+kms_draw_crc@fill-fb
+kms_dsc@dsc-basic
+kms_dsc@dsc-with-formats
+kms_dsc@dsc-with-bpc
+kms_dsc@dsc-with-bpc-formats
+kms_dsc@dsc-with-output-formats
+kms_fbcon_fbt@fbc
+kms_fbcon_fbt@psr
+kms_fbcon_fbt@fbc-suspend
+kms_fbcon_fbt@psr-suspend
+kms_fence_pin_leak
+kms_flip@nonblocking-read
+kms_flip@wf_vblank-ts-check
+kms_flip@2x-wf_vblank-ts-check
+kms_flip@blocking-wf_vblank
+kms_flip@2x-blocking-wf_vblank
+kms_flip@absolute-wf_vblank
+kms_flip@2x-absolute-wf_vblank
+kms_flip@blocking-absolute-wf_vblank
+kms_flip@2x-blocking-absolute-wf_vblank
+kms_flip@basic-plain-flip
+kms_flip@2x-plain-flip
+kms_flip@busy-flip
+kms_flip@2x-busy-flip
+kms_flip@flip-vs-fences
+kms_flip@2x-flip-vs-fences
+kms_flip@plain-flip-ts-check
+kms_flip@2x-plain-flip-ts-check
+kms_flip@plain-flip-fb-recreate
+kms_flip@2x-plain-flip-fb-recreate
+kms_flip@flip-vs-rmfb
+kms_flip@2x-flip-vs-rmfb
+kms_flip@basic-flip-vs-dpms
+kms_flip@2x-flip-vs-dpms
+kms_flip@flip-vs-panning
+kms_flip@2x-flip-vs-panning
+kms_flip@basic-flip-vs-modeset
+kms_flip@2x-flip-vs-modeset
+kms_flip@flip-vs-expired-vblank
+kms_flip@2x-flip-vs-expired-vblank
+kms_flip@flip-vs-absolute-wf_vblank
+kms_flip@2x-flip-vs-absolute-wf_vblank
+kms_flip@basic-flip-vs-wf_vblank
+kms_flip@2x-flip-vs-wf_vblank
+kms_flip@flip-vs-blocking-wf-vblank
+kms_flip@2x-flip-vs-blocking-wf-vblank
+kms_flip@flip-vs-modeset-vs-hang
+kms_flip@2x-flip-vs-modeset-vs-hang
+kms_flip@flip-vs-panning-vs-hang
+kms_flip@2x-flip-vs-panning-vs-hang
+kms_flip@flip-vs-dpms-off-vs-modeset
+kms_flip@2x-flip-vs-dpms-off-vs-modeset
+kms_flip@single-buffer-flip-vs-dpms-off-vs-modeset
+kms_flip@2x-single-buffer-flip-vs-dpms-off-vs-modeset
+kms_flip@dpms-off-confusion
+kms_flip@nonexisting-fb
+kms_flip@2x-nonexisting-fb
+kms_flip@dpms-vs-vblank-race
+kms_flip@2x-dpms-vs-vblank-race
+kms_flip@modeset-vs-vblank-race
+kms_flip@2x-modeset-vs-vblank-race
+kms_flip@bo-too-big
+kms_flip@flip-vs-suspend
+kms_flip@2x-flip-vs-suspend
+kms_flip@wf_vblank-ts-check-interruptible
+kms_flip@2x-wf_vblank-ts-check-interruptible
+kms_flip@absolute-wf_vblank-interruptible
+kms_flip@2x-absolute-wf_vblank-interruptible
+kms_flip@blocking-absolute-wf_vblank-interruptible
+kms_flip@2x-blocking-absolute-wf_vblank-interruptible
+kms_flip@plain-flip-interruptible
+kms_flip@2x-plain-flip-interruptible
+kms_flip@flip-vs-fences-interruptible
+kms_flip@2x-flip-vs-fences-interruptible
+kms_flip@plain-flip-ts-check-interruptible
+kms_flip@2x-plain-flip-ts-check-interruptible
+kms_flip@plain-flip-fb-recreate-interruptible
+kms_flip@2x-plain-flip-fb-recreate-interruptible
+kms_flip@flip-vs-rmfb-interruptible
+kms_flip@2x-flip-vs-rmfb-interruptible
+kms_flip@flip-vs-panning-interruptible
+kms_flip@2x-flip-vs-panning-interruptible
+kms_flip@flip-vs-expired-vblank-interruptible
+kms_flip@2x-flip-vs-expired-vblank-interruptible
+kms_flip@flip-vs-absolute-wf_vblank-interruptible
+kms_flip@2x-flip-vs-absolute-wf_vblank-interruptible
+kms_flip@flip-vs-wf_vblank-interruptible
+kms_flip@2x-flip-vs-wf_vblank-interruptible
+kms_flip@flip-vs-dpms-off-vs-modeset-interruptible
+kms_flip@2x-flip-vs-dpms-off-vs-modeset-interruptible
+kms_flip@single-buffer-flip-vs-dpms-off-vs-modeset-interruptible
+kms_flip@2x-single-buffer-flip-vs-dpms-off-vs-modeset-interruptible
+kms_flip@dpms-off-confusion-interruptible
+kms_flip@nonexisting-fb-interruptible
+kms_flip@2x-nonexisting-fb-interruptible
+kms_flip@dpms-vs-vblank-race-interruptible
+kms_flip@2x-dpms-vs-vblank-race-interruptible
+kms_flip@modeset-vs-vblank-race-interruptible
+kms_flip@2x-modeset-vs-vblank-race-interruptible
+kms_flip@bo-too-big-interruptible
+kms_flip@flip-vs-suspend-interruptible
+kms_flip@2x-flip-vs-suspend-interruptible
+kms_flip_event_leak@basic
+kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-downscaling
+kms_flip_scaled_crc@flip-32bpp-yftile-to-64bpp-yftile-downscaling
+kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-downscaling
+kms_flip_scaled_crc@flip-32bpp-4tile-to-64bpp-4tile-downscaling
+kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling
+kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-downscaling
+kms_flip_scaled_crc@flip-64bpp-yftile-to-32bpp-yftile-downscaling
+kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-downscaling
+kms_flip_scaled_crc@flip-64bpp-4tile-to-32bpp-4tile-downscaling
+kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-downscaling
+kms_flip_scaled_crc@flip-64bpp-ytile-to-16bpp-ytile-downscaling
+kms_flip_scaled_crc@flip-64bpp-yftile-to-16bpp-yftile-downscaling
+kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-downscaling
+kms_flip_scaled_crc@flip-64bpp-4tile-to-16bpp-4tile-downscaling
+kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-downscaling
+kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-downscaling
+kms_flip_scaled_crc@flip-32bpp-yftileccs-to-64bpp-yftile-downscaling
+kms_flip_scaled_crc@flip-32bpp-ytile-to-32bpp-ytilegen12rcccs-downscaling
+kms_flip_scaled_crc@flip-32bpp-4tile-to-32bpp-4tiledg2rcccs-downscaling
+kms_flip_scaled_crc@flip-32bpp-ytile-to-32bpp-ytileccs-downscaling
+kms_flip_scaled_crc@flip-32bpp-yftile-to-32bpp-yftileccs-downscaling
+kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling
+kms_flip_scaled_crc@flip-64bpp-4tile-to-32bpp-4tiledg2rcccs-downscaling
+kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-upscaling
+kms_flip_scaled_crc@flip-32bpp-yftile-to-64bpp-yftile-upscaling
+kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-upscaling
+kms_flip_scaled_crc@flip-32bpp-4tile-to-64bpp-4tile-upscaling
+kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-upscaling
+kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling
+kms_flip_scaled_crc@flip-64bpp-yftile-to-32bpp-yftile-upscaling
+kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-upscaling
+kms_flip_scaled_crc@flip-64bpp-4tile-to-32bpp-4tile-upscaling
+kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-upscaling
+kms_flip_scaled_crc@flip-64bpp-ytile-to-16bpp-ytile-upscaling
+kms_flip_scaled_crc@flip-64bpp-yftile-to-16bpp-yftile-upscaling
+kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-upscaling
+kms_flip_scaled_crc@flip-64bpp-4tile-to-16bpp-4tile-upscaling
+kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-upscaling
+kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-upscaling
+kms_flip_scaled_crc@flip-32bpp-yftileccs-to-64bpp-yftile-upscaling
+kms_flip_scaled_crc@flip-32bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling
+kms_flip_scaled_crc@flip-32bpp-4tile-to-32bpp-4tiledg2rcccs-upscaling
+kms_flip_scaled_crc@flip-32bpp-ytile-to-32bpp-ytileccs-upscaling
+kms_flip_scaled_crc@flip-32bpp-yftile-to-32bpp-yftileccs-upscaling
+kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling
+kms_flip_scaled_crc@flip-64bpp-4tile-to-32bpp-4tiledg2rcccs-upscaling
+kms_force_connector_basic@force-load-detect
+kms_force_connector_basic@force-connector-state
+kms_force_connector_basic@force-edid
+kms_force_connector_basic@prune-stale-modes
+kms_frontbuffer_tracking@fbc-1p-rte
+kms_frontbuffer_tracking@fbc-2p-rte
+kms_frontbuffer_tracking@psr-1p-rte
+kms_frontbuffer_tracking@psr-2p-rte
+kms_frontbuffer_tracking@fbcpsr-1p-rte
+kms_frontbuffer_tracking@fbcpsr-2p-rte
+kms_frontbuffer_tracking@drrs-1p-rte
+kms_frontbuffer_tracking@drrs-2p-rte
+kms_frontbuffer_tracking@fbcdrrs-1p-rte
+kms_frontbuffer_tracking@fbcdrrs-2p-rte
+kms_frontbuffer_tracking@psrdrrs-1p-rte
+kms_frontbuffer_tracking@psrdrrs-2p-rte
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-rte
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-rte
+kms_frontbuffer_tracking@fbc-1p-primscrn-pri-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbc-1p-primscrn-pri-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbc-1p-primscrn-pri-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbc-1p-primscrn-pri-indfb-draw-pwrite
+kms_frontbuffer_tracking@fbc-1p-primscrn-pri-indfb-draw-blt
+kms_frontbuffer_tracking@fbc-1p-primscrn-pri-indfb-draw-render
+kms_frontbuffer_tracking@fbc-1p-primscrn-pri-shrfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbc-1p-primscrn-pri-shrfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbc-1p-primscrn-pri-shrfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbc-1p-primscrn-pri-shrfb-draw-pwrite
+kms_frontbuffer_tracking@fbc-1p-primscrn-pri-shrfb-draw-blt
+kms_frontbuffer_tracking@fbc-1p-primscrn-pri-shrfb-draw-render
+kms_frontbuffer_tracking@fbc-1p-primscrn-cur-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbc-1p-primscrn-cur-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbc-1p-primscrn-cur-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbc-1p-primscrn-cur-indfb-draw-pwrite
+kms_frontbuffer_tracking@fbc-1p-primscrn-cur-indfb-draw-blt
+kms_frontbuffer_tracking@fbc-1p-primscrn-cur-indfb-draw-render
+kms_frontbuffer_tracking@fbc-1p-primscrn-spr-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbc-1p-primscrn-spr-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbc-1p-primscrn-spr-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbc-1p-primscrn-spr-indfb-draw-pwrite
+kms_frontbuffer_tracking@fbc-1p-primscrn-spr-indfb-draw-blt
+kms_frontbuffer_tracking@fbc-1p-primscrn-spr-indfb-draw-render
+kms_frontbuffer_tracking@fbc-1p-offscren-pri-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbc-1p-offscren-pri-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbc-1p-offscren-pri-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbc-1p-offscren-pri-indfb-draw-pwrite
+kms_frontbuffer_tracking@fbc-1p-offscren-pri-indfb-draw-blt
+kms_frontbuffer_tracking@fbc-1p-offscren-pri-indfb-draw-render
+kms_frontbuffer_tracking@fbc-1p-offscren-pri-shrfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbc-1p-offscren-pri-shrfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbc-1p-offscren-pri-shrfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbc-1p-offscren-pri-shrfb-draw-pwrite
+kms_frontbuffer_tracking@fbc-1p-offscren-pri-shrfb-draw-blt
+kms_frontbuffer_tracking@fbc-1p-offscren-pri-shrfb-draw-render
+kms_frontbuffer_tracking@fbc-2p-primscrn-pri-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbc-2p-primscrn-pri-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbc-2p-primscrn-pri-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbc-2p-primscrn-pri-indfb-draw-pwrite
+kms_frontbuffer_tracking@fbc-2p-primscrn-pri-indfb-draw-blt
+kms_frontbuffer_tracking@fbc-2p-primscrn-pri-indfb-draw-render
+kms_frontbuffer_tracking@fbc-2p-primscrn-pri-shrfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbc-2p-primscrn-pri-shrfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbc-2p-primscrn-pri-shrfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbc-2p-primscrn-pri-shrfb-draw-pwrite
+kms_frontbuffer_tracking@fbc-2p-primscrn-pri-shrfb-draw-blt
+kms_frontbuffer_tracking@fbc-2p-primscrn-pri-shrfb-draw-render
+kms_frontbuffer_tracking@fbc-2p-primscrn-cur-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbc-2p-primscrn-cur-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbc-2p-primscrn-cur-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbc-2p-primscrn-cur-indfb-draw-pwrite
+kms_frontbuffer_tracking@fbc-2p-primscrn-cur-indfb-draw-blt
+kms_frontbuffer_tracking@fbc-2p-primscrn-cur-indfb-draw-render
+kms_frontbuffer_tracking@fbc-2p-primscrn-spr-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbc-2p-primscrn-spr-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbc-2p-primscrn-spr-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbc-2p-primscrn-spr-indfb-draw-pwrite
+kms_frontbuffer_tracking@fbc-2p-primscrn-spr-indfb-draw-blt
+kms_frontbuffer_tracking@fbc-2p-primscrn-spr-indfb-draw-render
+kms_frontbuffer_tracking@fbc-2p-scndscrn-pri-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbc-2p-scndscrn-pri-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbc-2p-scndscrn-pri-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbc-2p-scndscrn-pri-indfb-draw-pwrite
+kms_frontbuffer_tracking@fbc-2p-scndscrn-pri-indfb-draw-blt
+kms_frontbuffer_tracking@fbc-2p-scndscrn-pri-indfb-draw-render
+kms_frontbuffer_tracking@fbc-2p-scndscrn-pri-shrfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbc-2p-scndscrn-pri-shrfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbc-2p-scndscrn-pri-shrfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbc-2p-scndscrn-pri-shrfb-draw-pwrite
+kms_frontbuffer_tracking@fbc-2p-scndscrn-pri-shrfb-draw-blt
+kms_frontbuffer_tracking@fbc-2p-scndscrn-pri-shrfb-draw-render
+kms_frontbuffer_tracking@fbc-2p-scndscrn-cur-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbc-2p-scndscrn-cur-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbc-2p-scndscrn-cur-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbc-2p-scndscrn-cur-indfb-draw-pwrite
+kms_frontbuffer_tracking@fbc-2p-scndscrn-cur-indfb-draw-blt
+kms_frontbuffer_tracking@fbc-2p-scndscrn-cur-indfb-draw-render
+kms_frontbuffer_tracking@fbc-2p-scndscrn-spr-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbc-2p-scndscrn-spr-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbc-2p-scndscrn-spr-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbc-2p-scndscrn-spr-indfb-draw-pwrite
+kms_frontbuffer_tracking@fbc-2p-scndscrn-spr-indfb-draw-blt
+kms_frontbuffer_tracking@fbc-2p-scndscrn-spr-indfb-draw-render
+kms_frontbuffer_tracking@psr-1p-primscrn-pri-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@psr-1p-primscrn-pri-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@psr-1p-primscrn-pri-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@psr-1p-primscrn-pri-indfb-draw-pwrite
+kms_frontbuffer_tracking@psr-1p-primscrn-pri-indfb-draw-blt
+kms_frontbuffer_tracking@psr-1p-primscrn-pri-indfb-draw-render
+kms_frontbuffer_tracking@psr-1p-primscrn-pri-shrfb-draw-mmap-cpu
+kms_frontbuffer_tracking@psr-1p-primscrn-pri-shrfb-draw-mmap-gtt
+kms_frontbuffer_tracking@psr-1p-primscrn-pri-shrfb-draw-mmap-wc
+kms_frontbuffer_tracking@psr-1p-primscrn-pri-shrfb-draw-pwrite
+kms_frontbuffer_tracking@psr-1p-primscrn-pri-shrfb-draw-blt
+kms_frontbuffer_tracking@psr-1p-primscrn-pri-shrfb-draw-render
+kms_frontbuffer_tracking@psr-1p-primscrn-cur-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@psr-1p-primscrn-cur-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@psr-1p-primscrn-cur-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@psr-1p-primscrn-cur-indfb-draw-pwrite
+kms_frontbuffer_tracking@psr-1p-primscrn-cur-indfb-draw-blt
+kms_frontbuffer_tracking@psr-1p-primscrn-cur-indfb-draw-render
+kms_frontbuffer_tracking@psr-1p-primscrn-spr-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@psr-1p-primscrn-spr-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@psr-1p-primscrn-spr-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@psr-1p-primscrn-spr-indfb-draw-pwrite
+kms_frontbuffer_tracking@psr-1p-primscrn-spr-indfb-draw-blt
+kms_frontbuffer_tracking@psr-1p-primscrn-spr-indfb-draw-render
+kms_frontbuffer_tracking@psr-1p-offscren-pri-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@psr-1p-offscren-pri-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@psr-1p-offscren-pri-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@psr-1p-offscren-pri-indfb-draw-pwrite
+kms_frontbuffer_tracking@psr-1p-offscren-pri-indfb-draw-blt
+kms_frontbuffer_tracking@psr-1p-offscren-pri-indfb-draw-render
+kms_frontbuffer_tracking@psr-1p-offscren-pri-shrfb-draw-mmap-cpu
+kms_frontbuffer_tracking@psr-1p-offscren-pri-shrfb-draw-mmap-gtt
+kms_frontbuffer_tracking@psr-1p-offscren-pri-shrfb-draw-mmap-wc
+kms_frontbuffer_tracking@psr-1p-offscren-pri-shrfb-draw-pwrite
+kms_frontbuffer_tracking@psr-1p-offscren-pri-shrfb-draw-blt
+kms_frontbuffer_tracking@psr-1p-offscren-pri-shrfb-draw-render
+kms_frontbuffer_tracking@psr-2p-primscrn-pri-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@psr-2p-primscrn-pri-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@psr-2p-primscrn-pri-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@psr-2p-primscrn-pri-indfb-draw-pwrite
+kms_frontbuffer_tracking@psr-2p-primscrn-pri-indfb-draw-blt
+kms_frontbuffer_tracking@psr-2p-primscrn-pri-indfb-draw-render
+kms_frontbuffer_tracking@psr-2p-primscrn-pri-shrfb-draw-mmap-cpu
+kms_frontbuffer_tracking@psr-2p-primscrn-pri-shrfb-draw-mmap-gtt
+kms_frontbuffer_tracking@psr-2p-primscrn-pri-shrfb-draw-mmap-wc
+kms_frontbuffer_tracking@psr-2p-primscrn-pri-shrfb-draw-pwrite
+kms_frontbuffer_tracking@psr-2p-primscrn-pri-shrfb-draw-blt
+kms_frontbuffer_tracking@psr-2p-primscrn-pri-shrfb-draw-render
+kms_frontbuffer_tracking@psr-2p-primscrn-cur-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@psr-2p-primscrn-cur-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@psr-2p-primscrn-cur-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@psr-2p-primscrn-cur-indfb-draw-pwrite
+kms_frontbuffer_tracking@psr-2p-primscrn-cur-indfb-draw-blt
+kms_frontbuffer_tracking@psr-2p-primscrn-cur-indfb-draw-render
+kms_frontbuffer_tracking@psr-2p-primscrn-spr-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@psr-2p-primscrn-spr-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@psr-2p-primscrn-spr-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@psr-2p-primscrn-spr-indfb-draw-pwrite
+kms_frontbuffer_tracking@psr-2p-primscrn-spr-indfb-draw-blt
+kms_frontbuffer_tracking@psr-2p-primscrn-spr-indfb-draw-render
+kms_frontbuffer_tracking@psr-2p-scndscrn-pri-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@psr-2p-scndscrn-pri-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@psr-2p-scndscrn-pri-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@psr-2p-scndscrn-pri-indfb-draw-pwrite
+kms_frontbuffer_tracking@psr-2p-scndscrn-pri-indfb-draw-blt
+kms_frontbuffer_tracking@psr-2p-scndscrn-pri-indfb-draw-render
+kms_frontbuffer_tracking@psr-2p-scndscrn-pri-shrfb-draw-mmap-cpu
+kms_frontbuffer_tracking@psr-2p-scndscrn-pri-shrfb-draw-mmap-gtt
+kms_frontbuffer_tracking@psr-2p-scndscrn-pri-shrfb-draw-mmap-wc
+kms_frontbuffer_tracking@psr-2p-scndscrn-pri-shrfb-draw-pwrite
+kms_frontbuffer_tracking@psr-2p-scndscrn-pri-shrfb-draw-blt
+kms_frontbuffer_tracking@psr-2p-scndscrn-pri-shrfb-draw-render
+kms_frontbuffer_tracking@psr-2p-scndscrn-cur-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@psr-2p-scndscrn-cur-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@psr-2p-scndscrn-cur-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@psr-2p-scndscrn-cur-indfb-draw-pwrite
+kms_frontbuffer_tracking@psr-2p-scndscrn-cur-indfb-draw-blt
+kms_frontbuffer_tracking@psr-2p-scndscrn-cur-indfb-draw-render
+kms_frontbuffer_tracking@psr-2p-scndscrn-spr-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@psr-2p-scndscrn-spr-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@psr-2p-scndscrn-spr-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@psr-2p-scndscrn-spr-indfb-draw-pwrite
+kms_frontbuffer_tracking@psr-2p-scndscrn-spr-indfb-draw-blt
+kms_frontbuffer_tracking@psr-2p-scndscrn-spr-indfb-draw-render
+kms_frontbuffer_tracking@fbcpsr-1p-primscrn-pri-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcpsr-1p-primscrn-pri-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcpsr-1p-primscrn-pri-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbcpsr-1p-primscrn-pri-indfb-draw-pwrite
+kms_frontbuffer_tracking@fbcpsr-1p-primscrn-pri-indfb-draw-blt
+kms_frontbuffer_tracking@fbcpsr-1p-primscrn-pri-indfb-draw-render
+kms_frontbuffer_tracking@fbcpsr-1p-primscrn-pri-shrfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcpsr-1p-primscrn-pri-shrfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcpsr-1p-primscrn-pri-shrfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbcpsr-1p-primscrn-pri-shrfb-draw-pwrite
+kms_frontbuffer_tracking@fbcpsr-1p-primscrn-pri-shrfb-draw-blt
+kms_frontbuffer_tracking@fbcpsr-1p-primscrn-pri-shrfb-draw-render
+kms_frontbuffer_tracking@fbcpsr-1p-primscrn-cur-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcpsr-1p-primscrn-cur-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcpsr-1p-primscrn-cur-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbcpsr-1p-primscrn-cur-indfb-draw-pwrite
+kms_frontbuffer_tracking@fbcpsr-1p-primscrn-cur-indfb-draw-blt
+kms_frontbuffer_tracking@fbcpsr-1p-primscrn-cur-indfb-draw-render
+kms_frontbuffer_tracking@fbcpsr-1p-primscrn-spr-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcpsr-1p-primscrn-spr-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcpsr-1p-primscrn-spr-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbcpsr-1p-primscrn-spr-indfb-draw-pwrite
+kms_frontbuffer_tracking@fbcpsr-1p-primscrn-spr-indfb-draw-blt
+kms_frontbuffer_tracking@fbcpsr-1p-primscrn-spr-indfb-draw-render
+kms_frontbuffer_tracking@fbcpsr-1p-offscren-pri-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcpsr-1p-offscren-pri-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcpsr-1p-offscren-pri-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbcpsr-1p-offscren-pri-indfb-draw-pwrite
+kms_frontbuffer_tracking@fbcpsr-1p-offscren-pri-indfb-draw-blt
+kms_frontbuffer_tracking@fbcpsr-1p-offscren-pri-indfb-draw-render
+kms_frontbuffer_tracking@fbcpsr-1p-offscren-pri-shrfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcpsr-1p-offscren-pri-shrfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcpsr-1p-offscren-pri-shrfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbcpsr-1p-offscren-pri-shrfb-draw-pwrite
+kms_frontbuffer_tracking@fbcpsr-1p-offscren-pri-shrfb-draw-blt
+kms_frontbuffer_tracking@fbcpsr-1p-offscren-pri-shrfb-draw-render
+kms_frontbuffer_tracking@fbcpsr-2p-primscrn-pri-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcpsr-2p-primscrn-pri-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcpsr-2p-primscrn-pri-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbcpsr-2p-primscrn-pri-indfb-draw-pwrite
+kms_frontbuffer_tracking@fbcpsr-2p-primscrn-pri-indfb-draw-blt
+kms_frontbuffer_tracking@fbcpsr-2p-primscrn-pri-indfb-draw-render
+kms_frontbuffer_tracking@fbcpsr-2p-primscrn-pri-shrfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcpsr-2p-primscrn-pri-shrfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcpsr-2p-primscrn-pri-shrfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbcpsr-2p-primscrn-pri-shrfb-draw-pwrite
+kms_frontbuffer_tracking@fbcpsr-2p-primscrn-pri-shrfb-draw-blt
+kms_frontbuffer_tracking@fbcpsr-2p-primscrn-pri-shrfb-draw-render
+kms_frontbuffer_tracking@fbcpsr-2p-primscrn-cur-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcpsr-2p-primscrn-cur-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcpsr-2p-primscrn-cur-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbcpsr-2p-primscrn-cur-indfb-draw-pwrite
+kms_frontbuffer_tracking@fbcpsr-2p-primscrn-cur-indfb-draw-blt
+kms_frontbuffer_tracking@fbcpsr-2p-primscrn-cur-indfb-draw-render
+kms_frontbuffer_tracking@fbcpsr-2p-primscrn-spr-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcpsr-2p-primscrn-spr-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcpsr-2p-primscrn-spr-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbcpsr-2p-primscrn-spr-indfb-draw-pwrite
+kms_frontbuffer_tracking@fbcpsr-2p-primscrn-spr-indfb-draw-blt
+kms_frontbuffer_tracking@fbcpsr-2p-primscrn-spr-indfb-draw-render
+kms_frontbuffer_tracking@fbcpsr-2p-scndscrn-pri-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcpsr-2p-scndscrn-pri-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcpsr-2p-scndscrn-pri-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbcpsr-2p-scndscrn-pri-indfb-draw-pwrite
+kms_frontbuffer_tracking@fbcpsr-2p-scndscrn-pri-indfb-draw-blt
+kms_frontbuffer_tracking@fbcpsr-2p-scndscrn-pri-indfb-draw-render
+kms_frontbuffer_tracking@fbcpsr-2p-scndscrn-pri-shrfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcpsr-2p-scndscrn-pri-shrfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcpsr-2p-scndscrn-pri-shrfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbcpsr-2p-scndscrn-pri-shrfb-draw-pwrite
+kms_frontbuffer_tracking@fbcpsr-2p-scndscrn-pri-shrfb-draw-blt
+kms_frontbuffer_tracking@fbcpsr-2p-scndscrn-pri-shrfb-draw-render
+kms_frontbuffer_tracking@fbcpsr-2p-scndscrn-cur-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcpsr-2p-scndscrn-cur-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcpsr-2p-scndscrn-cur-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbcpsr-2p-scndscrn-cur-indfb-draw-pwrite
+kms_frontbuffer_tracking@fbcpsr-2p-scndscrn-cur-indfb-draw-blt
+kms_frontbuffer_tracking@fbcpsr-2p-scndscrn-cur-indfb-draw-render
+kms_frontbuffer_tracking@fbcpsr-2p-scndscrn-spr-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcpsr-2p-scndscrn-spr-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcpsr-2p-scndscrn-spr-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbcpsr-2p-scndscrn-spr-indfb-draw-pwrite
+kms_frontbuffer_tracking@fbcpsr-2p-scndscrn-spr-indfb-draw-blt
+kms_frontbuffer_tracking@fbcpsr-2p-scndscrn-spr-indfb-draw-render
+kms_frontbuffer_tracking@drrs-1p-primscrn-pri-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@drrs-1p-primscrn-pri-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@drrs-1p-primscrn-pri-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@drrs-1p-primscrn-pri-indfb-draw-pwrite
+kms_frontbuffer_tracking@drrs-1p-primscrn-pri-indfb-draw-blt
+kms_frontbuffer_tracking@drrs-1p-primscrn-pri-indfb-draw-render
+kms_frontbuffer_tracking@drrs-1p-primscrn-pri-shrfb-draw-mmap-cpu
+kms_frontbuffer_tracking@drrs-1p-primscrn-pri-shrfb-draw-mmap-gtt
+kms_frontbuffer_tracking@drrs-1p-primscrn-pri-shrfb-draw-mmap-wc
+kms_frontbuffer_tracking@drrs-1p-primscrn-pri-shrfb-draw-pwrite
+kms_frontbuffer_tracking@drrs-1p-primscrn-pri-shrfb-draw-blt
+kms_frontbuffer_tracking@drrs-1p-primscrn-pri-shrfb-draw-render
+kms_frontbuffer_tracking@drrs-1p-primscrn-cur-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@drrs-1p-primscrn-cur-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@drrs-1p-primscrn-cur-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@drrs-1p-primscrn-cur-indfb-draw-pwrite
+kms_frontbuffer_tracking@drrs-1p-primscrn-cur-indfb-draw-blt
+kms_frontbuffer_tracking@drrs-1p-primscrn-cur-indfb-draw-render
+kms_frontbuffer_tracking@drrs-1p-primscrn-spr-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@drrs-1p-primscrn-spr-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@drrs-1p-primscrn-spr-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@drrs-1p-primscrn-spr-indfb-draw-pwrite
+kms_frontbuffer_tracking@drrs-1p-primscrn-spr-indfb-draw-blt
+kms_frontbuffer_tracking@drrs-1p-primscrn-spr-indfb-draw-render
+kms_frontbuffer_tracking@drrs-1p-offscren-pri-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@drrs-1p-offscren-pri-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@drrs-1p-offscren-pri-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@drrs-1p-offscren-pri-indfb-draw-pwrite
+kms_frontbuffer_tracking@drrs-1p-offscren-pri-indfb-draw-blt
+kms_frontbuffer_tracking@drrs-1p-offscren-pri-indfb-draw-render
+kms_frontbuffer_tracking@drrs-1p-offscren-pri-shrfb-draw-mmap-cpu
+kms_frontbuffer_tracking@drrs-1p-offscren-pri-shrfb-draw-mmap-gtt
+kms_frontbuffer_tracking@drrs-1p-offscren-pri-shrfb-draw-mmap-wc
+kms_frontbuffer_tracking@drrs-1p-offscren-pri-shrfb-draw-pwrite
+kms_frontbuffer_tracking@drrs-1p-offscren-pri-shrfb-draw-blt
+kms_frontbuffer_tracking@drrs-1p-offscren-pri-shrfb-draw-render
+kms_frontbuffer_tracking@drrs-2p-primscrn-pri-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@drrs-2p-primscrn-pri-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@drrs-2p-primscrn-pri-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@drrs-2p-primscrn-pri-indfb-draw-pwrite
+kms_frontbuffer_tracking@drrs-2p-primscrn-pri-indfb-draw-blt
+kms_frontbuffer_tracking@drrs-2p-primscrn-pri-indfb-draw-render
+kms_frontbuffer_tracking@drrs-2p-primscrn-pri-shrfb-draw-mmap-cpu
+kms_frontbuffer_tracking@drrs-2p-primscrn-pri-shrfb-draw-mmap-gtt
+kms_frontbuffer_tracking@drrs-2p-primscrn-pri-shrfb-draw-mmap-wc
+kms_frontbuffer_tracking@drrs-2p-primscrn-pri-shrfb-draw-pwrite
+kms_frontbuffer_tracking@drrs-2p-primscrn-pri-shrfb-draw-blt
+kms_frontbuffer_tracking@drrs-2p-primscrn-pri-shrfb-draw-render
+kms_frontbuffer_tracking@drrs-2p-primscrn-cur-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@drrs-2p-primscrn-cur-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@drrs-2p-primscrn-cur-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@drrs-2p-primscrn-cur-indfb-draw-pwrite
+kms_frontbuffer_tracking@drrs-2p-primscrn-cur-indfb-draw-blt
+kms_frontbuffer_tracking@drrs-2p-primscrn-cur-indfb-draw-render
+kms_frontbuffer_tracking@drrs-2p-primscrn-spr-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@drrs-2p-primscrn-spr-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@drrs-2p-primscrn-spr-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@drrs-2p-primscrn-spr-indfb-draw-pwrite
+kms_frontbuffer_tracking@drrs-2p-primscrn-spr-indfb-draw-blt
+kms_frontbuffer_tracking@drrs-2p-primscrn-spr-indfb-draw-render
+kms_frontbuffer_tracking@drrs-2p-scndscrn-pri-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@drrs-2p-scndscrn-pri-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@drrs-2p-scndscrn-pri-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@drrs-2p-scndscrn-pri-indfb-draw-pwrite
+kms_frontbuffer_tracking@drrs-2p-scndscrn-pri-indfb-draw-blt
+kms_frontbuffer_tracking@drrs-2p-scndscrn-pri-indfb-draw-render
+kms_frontbuffer_tracking@drrs-2p-scndscrn-pri-shrfb-draw-mmap-cpu
+kms_frontbuffer_tracking@drrs-2p-scndscrn-pri-shrfb-draw-mmap-gtt
+kms_frontbuffer_tracking@drrs-2p-scndscrn-pri-shrfb-draw-mmap-wc
+kms_frontbuffer_tracking@drrs-2p-scndscrn-pri-shrfb-draw-pwrite
+kms_frontbuffer_tracking@drrs-2p-scndscrn-pri-shrfb-draw-blt
+kms_frontbuffer_tracking@drrs-2p-scndscrn-pri-shrfb-draw-render
+kms_frontbuffer_tracking@drrs-2p-scndscrn-cur-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@drrs-2p-scndscrn-cur-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@drrs-2p-scndscrn-cur-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@drrs-2p-scndscrn-cur-indfb-draw-pwrite
+kms_frontbuffer_tracking@drrs-2p-scndscrn-cur-indfb-draw-blt
+kms_frontbuffer_tracking@drrs-2p-scndscrn-cur-indfb-draw-render
+kms_frontbuffer_tracking@drrs-2p-scndscrn-spr-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@drrs-2p-scndscrn-spr-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@drrs-2p-scndscrn-spr-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@drrs-2p-scndscrn-spr-indfb-draw-pwrite
+kms_frontbuffer_tracking@drrs-2p-scndscrn-spr-indfb-draw-blt
+kms_frontbuffer_tracking@drrs-2p-scndscrn-spr-indfb-draw-render
+kms_frontbuffer_tracking@fbcdrrs-1p-primscrn-pri-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcdrrs-1p-primscrn-pri-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcdrrs-1p-primscrn-pri-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbcdrrs-1p-primscrn-pri-indfb-draw-pwrite
+kms_frontbuffer_tracking@fbcdrrs-1p-primscrn-pri-indfb-draw-blt
+kms_frontbuffer_tracking@fbcdrrs-1p-primscrn-pri-indfb-draw-render
+kms_frontbuffer_tracking@fbcdrrs-1p-primscrn-pri-shrfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcdrrs-1p-primscrn-pri-shrfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcdrrs-1p-primscrn-pri-shrfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbcdrrs-1p-primscrn-pri-shrfb-draw-pwrite
+kms_frontbuffer_tracking@fbcdrrs-1p-primscrn-pri-shrfb-draw-blt
+kms_frontbuffer_tracking@fbcdrrs-1p-primscrn-pri-shrfb-draw-render
+kms_frontbuffer_tracking@fbcdrrs-1p-primscrn-cur-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcdrrs-1p-primscrn-cur-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcdrrs-1p-primscrn-cur-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbcdrrs-1p-primscrn-cur-indfb-draw-pwrite
+kms_frontbuffer_tracking@fbcdrrs-1p-primscrn-cur-indfb-draw-blt
+kms_frontbuffer_tracking@fbcdrrs-1p-primscrn-cur-indfb-draw-render
+kms_frontbuffer_tracking@fbcdrrs-1p-primscrn-spr-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcdrrs-1p-primscrn-spr-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcdrrs-1p-primscrn-spr-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbcdrrs-1p-primscrn-spr-indfb-draw-pwrite
+kms_frontbuffer_tracking@fbcdrrs-1p-primscrn-spr-indfb-draw-blt
+kms_frontbuffer_tracking@fbcdrrs-1p-primscrn-spr-indfb-draw-render
+kms_frontbuffer_tracking@fbcdrrs-1p-offscren-pri-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcdrrs-1p-offscren-pri-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcdrrs-1p-offscren-pri-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbcdrrs-1p-offscren-pri-indfb-draw-pwrite
+kms_frontbuffer_tracking@fbcdrrs-1p-offscren-pri-indfb-draw-blt
+kms_frontbuffer_tracking@fbcdrrs-1p-offscren-pri-indfb-draw-render
+kms_frontbuffer_tracking@fbcdrrs-1p-offscren-pri-shrfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcdrrs-1p-offscren-pri-shrfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcdrrs-1p-offscren-pri-shrfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbcdrrs-1p-offscren-pri-shrfb-draw-pwrite
+kms_frontbuffer_tracking@fbcdrrs-1p-offscren-pri-shrfb-draw-blt
+kms_frontbuffer_tracking@fbcdrrs-1p-offscren-pri-shrfb-draw-render
+kms_frontbuffer_tracking@fbcdrrs-2p-primscrn-pri-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcdrrs-2p-primscrn-pri-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcdrrs-2p-primscrn-pri-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbcdrrs-2p-primscrn-pri-indfb-draw-pwrite
+kms_frontbuffer_tracking@fbcdrrs-2p-primscrn-pri-indfb-draw-blt
+kms_frontbuffer_tracking@fbcdrrs-2p-primscrn-pri-indfb-draw-render
+kms_frontbuffer_tracking@fbcdrrs-2p-primscrn-pri-shrfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcdrrs-2p-primscrn-pri-shrfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcdrrs-2p-primscrn-pri-shrfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbcdrrs-2p-primscrn-pri-shrfb-draw-pwrite
+kms_frontbuffer_tracking@fbcdrrs-2p-primscrn-pri-shrfb-draw-blt
+kms_frontbuffer_tracking@fbcdrrs-2p-primscrn-pri-shrfb-draw-render
+kms_frontbuffer_tracking@fbcdrrs-2p-primscrn-cur-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcdrrs-2p-primscrn-cur-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcdrrs-2p-primscrn-cur-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbcdrrs-2p-primscrn-cur-indfb-draw-pwrite
+kms_frontbuffer_tracking@fbcdrrs-2p-primscrn-cur-indfb-draw-blt
+kms_frontbuffer_tracking@fbcdrrs-2p-primscrn-cur-indfb-draw-render
+kms_frontbuffer_tracking@fbcdrrs-2p-primscrn-spr-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcdrrs-2p-primscrn-spr-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcdrrs-2p-primscrn-spr-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbcdrrs-2p-primscrn-spr-indfb-draw-pwrite
+kms_frontbuffer_tracking@fbcdrrs-2p-primscrn-spr-indfb-draw-blt
+kms_frontbuffer_tracking@fbcdrrs-2p-primscrn-spr-indfb-draw-render
+kms_frontbuffer_tracking@fbcdrrs-2p-scndscrn-pri-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcdrrs-2p-scndscrn-pri-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcdrrs-2p-scndscrn-pri-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbcdrrs-2p-scndscrn-pri-indfb-draw-pwrite
+kms_frontbuffer_tracking@fbcdrrs-2p-scndscrn-pri-indfb-draw-blt
+kms_frontbuffer_tracking@fbcdrrs-2p-scndscrn-pri-indfb-draw-render
+kms_frontbuffer_tracking@fbcdrrs-2p-scndscrn-pri-shrfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcdrrs-2p-scndscrn-pri-shrfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcdrrs-2p-scndscrn-pri-shrfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbcdrrs-2p-scndscrn-pri-shrfb-draw-pwrite
+kms_frontbuffer_tracking@fbcdrrs-2p-scndscrn-pri-shrfb-draw-blt
+kms_frontbuffer_tracking@fbcdrrs-2p-scndscrn-pri-shrfb-draw-render
+kms_frontbuffer_tracking@fbcdrrs-2p-scndscrn-cur-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcdrrs-2p-scndscrn-cur-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcdrrs-2p-scndscrn-cur-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbcdrrs-2p-scndscrn-cur-indfb-draw-pwrite
+kms_frontbuffer_tracking@fbcdrrs-2p-scndscrn-cur-indfb-draw-blt
+kms_frontbuffer_tracking@fbcdrrs-2p-scndscrn-cur-indfb-draw-render
+kms_frontbuffer_tracking@fbcdrrs-2p-scndscrn-spr-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcdrrs-2p-scndscrn-spr-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcdrrs-2p-scndscrn-spr-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbcdrrs-2p-scndscrn-spr-indfb-draw-pwrite
+kms_frontbuffer_tracking@fbcdrrs-2p-scndscrn-spr-indfb-draw-blt
+kms_frontbuffer_tracking@fbcdrrs-2p-scndscrn-spr-indfb-draw-render
+kms_frontbuffer_tracking@psrdrrs-1p-primscrn-pri-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@psrdrrs-1p-primscrn-pri-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@psrdrrs-1p-primscrn-pri-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@psrdrrs-1p-primscrn-pri-indfb-draw-pwrite
+kms_frontbuffer_tracking@psrdrrs-1p-primscrn-pri-indfb-draw-blt
+kms_frontbuffer_tracking@psrdrrs-1p-primscrn-pri-indfb-draw-render
+kms_frontbuffer_tracking@psrdrrs-1p-primscrn-pri-shrfb-draw-mmap-cpu
+kms_frontbuffer_tracking@psrdrrs-1p-primscrn-pri-shrfb-draw-mmap-gtt
+kms_frontbuffer_tracking@psrdrrs-1p-primscrn-pri-shrfb-draw-mmap-wc
+kms_frontbuffer_tracking@psrdrrs-1p-primscrn-pri-shrfb-draw-pwrite
+kms_frontbuffer_tracking@psrdrrs-1p-primscrn-pri-shrfb-draw-blt
+kms_frontbuffer_tracking@psrdrrs-1p-primscrn-pri-shrfb-draw-render
+kms_frontbuffer_tracking@psrdrrs-1p-primscrn-cur-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@psrdrrs-1p-primscrn-cur-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@psrdrrs-1p-primscrn-cur-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@psrdrrs-1p-primscrn-cur-indfb-draw-pwrite
+kms_frontbuffer_tracking@psrdrrs-1p-primscrn-cur-indfb-draw-blt
+kms_frontbuffer_tracking@psrdrrs-1p-primscrn-cur-indfb-draw-render
+kms_frontbuffer_tracking@psrdrrs-1p-primscrn-spr-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@psrdrrs-1p-primscrn-spr-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@psrdrrs-1p-primscrn-spr-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@psrdrrs-1p-primscrn-spr-indfb-draw-pwrite
+kms_frontbuffer_tracking@psrdrrs-1p-primscrn-spr-indfb-draw-blt
+kms_frontbuffer_tracking@psrdrrs-1p-primscrn-spr-indfb-draw-render
+kms_frontbuffer_tracking@psrdrrs-1p-offscren-pri-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@psrdrrs-1p-offscren-pri-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@psrdrrs-1p-offscren-pri-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@psrdrrs-1p-offscren-pri-indfb-draw-pwrite
+kms_frontbuffer_tracking@psrdrrs-1p-offscren-pri-indfb-draw-blt
+kms_frontbuffer_tracking@psrdrrs-1p-offscren-pri-indfb-draw-render
+kms_frontbuffer_tracking@psrdrrs-1p-offscren-pri-shrfb-draw-mmap-cpu
+kms_frontbuffer_tracking@psrdrrs-1p-offscren-pri-shrfb-draw-mmap-gtt
+kms_frontbuffer_tracking@psrdrrs-1p-offscren-pri-shrfb-draw-mmap-wc
+kms_frontbuffer_tracking@psrdrrs-1p-offscren-pri-shrfb-draw-pwrite
+kms_frontbuffer_tracking@psrdrrs-1p-offscren-pri-shrfb-draw-blt
+kms_frontbuffer_tracking@psrdrrs-1p-offscren-pri-shrfb-draw-render
+kms_frontbuffer_tracking@psrdrrs-2p-primscrn-pri-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@psrdrrs-2p-primscrn-pri-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@psrdrrs-2p-primscrn-pri-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@psrdrrs-2p-primscrn-pri-indfb-draw-pwrite
+kms_frontbuffer_tracking@psrdrrs-2p-primscrn-pri-indfb-draw-blt
+kms_frontbuffer_tracking@psrdrrs-2p-primscrn-pri-indfb-draw-render
+kms_frontbuffer_tracking@psrdrrs-2p-primscrn-pri-shrfb-draw-mmap-cpu
+kms_frontbuffer_tracking@psrdrrs-2p-primscrn-pri-shrfb-draw-mmap-gtt
+kms_frontbuffer_tracking@psrdrrs-2p-primscrn-pri-shrfb-draw-mmap-wc
+kms_frontbuffer_tracking@psrdrrs-2p-primscrn-pri-shrfb-draw-pwrite
+kms_frontbuffer_tracking@psrdrrs-2p-primscrn-pri-shrfb-draw-blt
+kms_frontbuffer_tracking@psrdrrs-2p-primscrn-pri-shrfb-draw-render
+kms_frontbuffer_tracking@psrdrrs-2p-primscrn-cur-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@psrdrrs-2p-primscrn-cur-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@psrdrrs-2p-primscrn-cur-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@psrdrrs-2p-primscrn-cur-indfb-draw-pwrite
+kms_frontbuffer_tracking@psrdrrs-2p-primscrn-cur-indfb-draw-blt
+kms_frontbuffer_tracking@psrdrrs-2p-primscrn-cur-indfb-draw-render
+kms_frontbuffer_tracking@psrdrrs-2p-primscrn-spr-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@psrdrrs-2p-primscrn-spr-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@psrdrrs-2p-primscrn-spr-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@psrdrrs-2p-primscrn-spr-indfb-draw-pwrite
+kms_frontbuffer_tracking@psrdrrs-2p-primscrn-spr-indfb-draw-blt
+kms_frontbuffer_tracking@psrdrrs-2p-primscrn-spr-indfb-draw-render
+kms_frontbuffer_tracking@psrdrrs-2p-scndscrn-pri-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@psrdrrs-2p-scndscrn-pri-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@psrdrrs-2p-scndscrn-pri-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@psrdrrs-2p-scndscrn-pri-indfb-draw-pwrite
+kms_frontbuffer_tracking@psrdrrs-2p-scndscrn-pri-indfb-draw-blt
+kms_frontbuffer_tracking@psrdrrs-2p-scndscrn-pri-indfb-draw-render
+kms_frontbuffer_tracking@psrdrrs-2p-scndscrn-pri-shrfb-draw-mmap-cpu
+kms_frontbuffer_tracking@psrdrrs-2p-scndscrn-pri-shrfb-draw-mmap-gtt
+kms_frontbuffer_tracking@psrdrrs-2p-scndscrn-pri-shrfb-draw-mmap-wc
+kms_frontbuffer_tracking@psrdrrs-2p-scndscrn-pri-shrfb-draw-pwrite
+kms_frontbuffer_tracking@psrdrrs-2p-scndscrn-pri-shrfb-draw-blt
+kms_frontbuffer_tracking@psrdrrs-2p-scndscrn-pri-shrfb-draw-render
+kms_frontbuffer_tracking@psrdrrs-2p-scndscrn-cur-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@psrdrrs-2p-scndscrn-cur-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@psrdrrs-2p-scndscrn-cur-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@psrdrrs-2p-scndscrn-cur-indfb-draw-pwrite
+kms_frontbuffer_tracking@psrdrrs-2p-scndscrn-cur-indfb-draw-blt
+kms_frontbuffer_tracking@psrdrrs-2p-scndscrn-cur-indfb-draw-render
+kms_frontbuffer_tracking@psrdrrs-2p-scndscrn-spr-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@psrdrrs-2p-scndscrn-spr-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@psrdrrs-2p-scndscrn-spr-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@psrdrrs-2p-scndscrn-spr-indfb-draw-pwrite
+kms_frontbuffer_tracking@psrdrrs-2p-scndscrn-spr-indfb-draw-blt
+kms_frontbuffer_tracking@psrdrrs-2p-scndscrn-spr-indfb-draw-render
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-primscrn-pri-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-primscrn-pri-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-primscrn-pri-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-primscrn-pri-indfb-draw-pwrite
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-primscrn-pri-indfb-draw-blt
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-primscrn-pri-indfb-draw-render
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-primscrn-pri-shrfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-primscrn-pri-shrfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-primscrn-pri-shrfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-primscrn-pri-shrfb-draw-pwrite
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-primscrn-pri-shrfb-draw-blt
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-primscrn-pri-shrfb-draw-render
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-primscrn-cur-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-primscrn-cur-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-primscrn-cur-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-primscrn-cur-indfb-draw-pwrite
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-primscrn-cur-indfb-draw-blt
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-primscrn-cur-indfb-draw-render
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-primscrn-spr-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-primscrn-spr-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-primscrn-spr-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-primscrn-spr-indfb-draw-pwrite
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-primscrn-spr-indfb-draw-blt
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-primscrn-spr-indfb-draw-render
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-offscren-pri-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-offscren-pri-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-offscren-pri-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-offscren-pri-indfb-draw-pwrite
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-offscren-pri-indfb-draw-blt
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-offscren-pri-indfb-draw-render
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-offscren-pri-shrfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-offscren-pri-shrfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-offscren-pri-shrfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-offscren-pri-shrfb-draw-pwrite
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-offscren-pri-shrfb-draw-blt
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-offscren-pri-shrfb-draw-render
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-primscrn-pri-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-primscrn-pri-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-primscrn-pri-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-primscrn-pri-indfb-draw-pwrite
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-primscrn-pri-indfb-draw-blt
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-primscrn-pri-indfb-draw-render
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-primscrn-pri-shrfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-primscrn-pri-shrfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-primscrn-pri-shrfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-primscrn-pri-shrfb-draw-pwrite
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-primscrn-pri-shrfb-draw-blt
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-primscrn-pri-shrfb-draw-render
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-primscrn-cur-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-primscrn-cur-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-primscrn-cur-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-primscrn-cur-indfb-draw-pwrite
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-primscrn-cur-indfb-draw-blt
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-primscrn-cur-indfb-draw-render
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-primscrn-spr-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-primscrn-spr-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-primscrn-spr-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-primscrn-spr-indfb-draw-pwrite
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-primscrn-spr-indfb-draw-blt
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-primscrn-spr-indfb-draw-render
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-scndscrn-pri-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-scndscrn-pri-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-scndscrn-pri-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-scndscrn-pri-indfb-draw-pwrite
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-scndscrn-pri-indfb-draw-blt
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-scndscrn-pri-indfb-draw-render
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-scndscrn-pri-shrfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-scndscrn-pri-shrfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-scndscrn-pri-shrfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-scndscrn-pri-shrfb-draw-pwrite
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-scndscrn-pri-shrfb-draw-blt
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-scndscrn-pri-shrfb-draw-render
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-scndscrn-cur-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-scndscrn-cur-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-scndscrn-cur-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-scndscrn-cur-indfb-draw-pwrite
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-scndscrn-cur-indfb-draw-blt
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-scndscrn-cur-indfb-draw-render
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-scndscrn-spr-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-scndscrn-spr-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-scndscrn-spr-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-scndscrn-spr-indfb-draw-pwrite
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-scndscrn-spr-indfb-draw-blt
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-scndscrn-spr-indfb-draw-render
+kms_frontbuffer_tracking@fbc-1p-primscrn-indfb-pgflip-blt
+kms_frontbuffer_tracking@fbc-1p-primscrn-indfb-msflip-blt
+kms_frontbuffer_tracking@fbc-1p-primscrn-indfb-plflip-blt
+kms_frontbuffer_tracking@fbc-1p-primscrn-shrfb-pgflip-blt
+kms_frontbuffer_tracking@fbc-1p-primscrn-shrfb-msflip-blt
+kms_frontbuffer_tracking@fbc-1p-primscrn-shrfb-plflip-blt
+kms_frontbuffer_tracking@fbc-2p-primscrn-indfb-pgflip-blt
+kms_frontbuffer_tracking@fbc-2p-primscrn-indfb-msflip-blt
+kms_frontbuffer_tracking@fbc-2p-primscrn-indfb-plflip-blt
+kms_frontbuffer_tracking@fbc-2p-primscrn-shrfb-pgflip-blt
+kms_frontbuffer_tracking@fbc-2p-primscrn-shrfb-msflip-blt
+kms_frontbuffer_tracking@fbc-2p-primscrn-shrfb-plflip-blt
+kms_frontbuffer_tracking@fbc-2p-scndscrn-indfb-pgflip-blt
+kms_frontbuffer_tracking@fbc-2p-scndscrn-indfb-msflip-blt
+kms_frontbuffer_tracking@fbc-2p-scndscrn-indfb-plflip-blt
+kms_frontbuffer_tracking@fbc-2p-scndscrn-shrfb-pgflip-blt
+kms_frontbuffer_tracking@fbc-2p-scndscrn-shrfb-msflip-blt
+kms_frontbuffer_tracking@fbc-2p-scndscrn-shrfb-plflip-blt
+kms_frontbuffer_tracking@psr-1p-primscrn-indfb-pgflip-blt
+kms_frontbuffer_tracking@psr-1p-primscrn-indfb-msflip-blt
+kms_frontbuffer_tracking@psr-1p-primscrn-indfb-plflip-blt
+kms_frontbuffer_tracking@psr-1p-primscrn-shrfb-pgflip-blt
+kms_frontbuffer_tracking@psr-1p-primscrn-shrfb-msflip-blt
+kms_frontbuffer_tracking@psr-1p-primscrn-shrfb-plflip-blt
+kms_frontbuffer_tracking@psr-2p-primscrn-indfb-pgflip-blt
+kms_frontbuffer_tracking@psr-2p-primscrn-indfb-msflip-blt
+kms_frontbuffer_tracking@psr-2p-primscrn-indfb-plflip-blt
+kms_frontbuffer_tracking@psr-2p-primscrn-shrfb-pgflip-blt
+kms_frontbuffer_tracking@psr-2p-primscrn-shrfb-msflip-blt
+kms_frontbuffer_tracking@psr-2p-primscrn-shrfb-plflip-blt
+kms_frontbuffer_tracking@psr-2p-scndscrn-indfb-pgflip-blt
+kms_frontbuffer_tracking@psr-2p-scndscrn-indfb-msflip-blt
+kms_frontbuffer_tracking@psr-2p-scndscrn-indfb-plflip-blt
+kms_frontbuffer_tracking@psr-2p-scndscrn-shrfb-pgflip-blt
+kms_frontbuffer_tracking@psr-2p-scndscrn-shrfb-msflip-blt
+kms_frontbuffer_tracking@psr-2p-scndscrn-shrfb-plflip-blt
+kms_frontbuffer_tracking@fbcpsr-1p-primscrn-indfb-pgflip-blt
+kms_frontbuffer_tracking@fbcpsr-1p-primscrn-indfb-msflip-blt
+kms_frontbuffer_tracking@fbcpsr-1p-primscrn-indfb-plflip-blt
+kms_frontbuffer_tracking@fbcpsr-1p-primscrn-shrfb-pgflip-blt
+kms_frontbuffer_tracking@fbcpsr-1p-primscrn-shrfb-msflip-blt
+kms_frontbuffer_tracking@fbcpsr-1p-primscrn-shrfb-plflip-blt
+kms_frontbuffer_tracking@fbcpsr-2p-primscrn-indfb-pgflip-blt
+kms_frontbuffer_tracking@fbcpsr-2p-primscrn-indfb-msflip-blt
+kms_frontbuffer_tracking@fbcpsr-2p-primscrn-indfb-plflip-blt
+kms_frontbuffer_tracking@fbcpsr-2p-primscrn-shrfb-pgflip-blt
+kms_frontbuffer_tracking@fbcpsr-2p-primscrn-shrfb-msflip-blt
+kms_frontbuffer_tracking@fbcpsr-2p-primscrn-shrfb-plflip-blt
+kms_frontbuffer_tracking@fbcpsr-2p-scndscrn-indfb-pgflip-blt
+kms_frontbuffer_tracking@fbcpsr-2p-scndscrn-indfb-msflip-blt
+kms_frontbuffer_tracking@fbcpsr-2p-scndscrn-indfb-plflip-blt
+kms_frontbuffer_tracking@fbcpsr-2p-scndscrn-shrfb-pgflip-blt
+kms_frontbuffer_tracking@fbcpsr-2p-scndscrn-shrfb-msflip-blt
+kms_frontbuffer_tracking@fbcpsr-2p-scndscrn-shrfb-plflip-blt
+kms_frontbuffer_tracking@drrs-1p-primscrn-indfb-pgflip-blt
+kms_frontbuffer_tracking@drrs-1p-primscrn-indfb-msflip-blt
+kms_frontbuffer_tracking@drrs-1p-primscrn-indfb-plflip-blt
+kms_frontbuffer_tracking@drrs-1p-primscrn-shrfb-pgflip-blt
+kms_frontbuffer_tracking@drrs-1p-primscrn-shrfb-msflip-blt
+kms_frontbuffer_tracking@drrs-1p-primscrn-shrfb-plflip-blt
+kms_frontbuffer_tracking@drrs-2p-primscrn-indfb-pgflip-blt
+kms_frontbuffer_tracking@drrs-2p-primscrn-indfb-msflip-blt
+kms_frontbuffer_tracking@drrs-2p-primscrn-indfb-plflip-blt
+kms_frontbuffer_tracking@drrs-2p-primscrn-shrfb-pgflip-blt
+kms_frontbuffer_tracking@drrs-2p-primscrn-shrfb-msflip-blt
+kms_frontbuffer_tracking@drrs-2p-primscrn-shrfb-plflip-blt
+kms_frontbuffer_tracking@drrs-2p-scndscrn-indfb-pgflip-blt
+kms_frontbuffer_tracking@drrs-2p-scndscrn-indfb-msflip-blt
+kms_frontbuffer_tracking@drrs-2p-scndscrn-indfb-plflip-blt
+kms_frontbuffer_tracking@drrs-2p-scndscrn-shrfb-pgflip-blt
+kms_frontbuffer_tracking@drrs-2p-scndscrn-shrfb-msflip-blt
+kms_frontbuffer_tracking@drrs-2p-scndscrn-shrfb-plflip-blt
+kms_frontbuffer_tracking@fbcdrrs-1p-primscrn-indfb-pgflip-blt
+kms_frontbuffer_tracking@fbcdrrs-1p-primscrn-indfb-msflip-blt
+kms_frontbuffer_tracking@fbcdrrs-1p-primscrn-indfb-plflip-blt
+kms_frontbuffer_tracking@fbcdrrs-1p-primscrn-shrfb-pgflip-blt
+kms_frontbuffer_tracking@fbcdrrs-1p-primscrn-shrfb-msflip-blt
+kms_frontbuffer_tracking@fbcdrrs-1p-primscrn-shrfb-plflip-blt
+kms_frontbuffer_tracking@fbcdrrs-2p-primscrn-indfb-pgflip-blt
+kms_frontbuffer_tracking@fbcdrrs-2p-primscrn-indfb-msflip-blt
+kms_frontbuffer_tracking@fbcdrrs-2p-primscrn-indfb-plflip-blt
+kms_frontbuffer_tracking@fbcdrrs-2p-primscrn-shrfb-pgflip-blt
+kms_frontbuffer_tracking@fbcdrrs-2p-primscrn-shrfb-msflip-blt
+kms_frontbuffer_tracking@fbcdrrs-2p-primscrn-shrfb-plflip-blt
+kms_frontbuffer_tracking@fbcdrrs-2p-scndscrn-indfb-pgflip-blt
+kms_frontbuffer_tracking@fbcdrrs-2p-scndscrn-indfb-msflip-blt
+kms_frontbuffer_tracking@fbcdrrs-2p-scndscrn-indfb-plflip-blt
+kms_frontbuffer_tracking@fbcdrrs-2p-scndscrn-shrfb-pgflip-blt
+kms_frontbuffer_tracking@fbcdrrs-2p-scndscrn-shrfb-msflip-blt
+kms_frontbuffer_tracking@fbcdrrs-2p-scndscrn-shrfb-plflip-blt
+kms_frontbuffer_tracking@psrdrrs-1p-primscrn-indfb-pgflip-blt
+kms_frontbuffer_tracking@psrdrrs-1p-primscrn-indfb-msflip-blt
+kms_frontbuffer_tracking@psrdrrs-1p-primscrn-indfb-plflip-blt
+kms_frontbuffer_tracking@psrdrrs-1p-primscrn-shrfb-pgflip-blt
+kms_frontbuffer_tracking@psrdrrs-1p-primscrn-shrfb-msflip-blt
+kms_frontbuffer_tracking@psrdrrs-1p-primscrn-shrfb-plflip-blt
+kms_frontbuffer_tracking@psrdrrs-2p-primscrn-indfb-pgflip-blt
+kms_frontbuffer_tracking@psrdrrs-2p-primscrn-indfb-msflip-blt
+kms_frontbuffer_tracking@psrdrrs-2p-primscrn-indfb-plflip-blt
+kms_frontbuffer_tracking@psrdrrs-2p-primscrn-shrfb-pgflip-blt
+kms_frontbuffer_tracking@psrdrrs-2p-primscrn-shrfb-msflip-blt
+kms_frontbuffer_tracking@psrdrrs-2p-primscrn-shrfb-plflip-blt
+kms_frontbuffer_tracking@psrdrrs-2p-scndscrn-indfb-pgflip-blt
+kms_frontbuffer_tracking@psrdrrs-2p-scndscrn-indfb-msflip-blt
+kms_frontbuffer_tracking@psrdrrs-2p-scndscrn-indfb-plflip-blt
+kms_frontbuffer_tracking@psrdrrs-2p-scndscrn-shrfb-pgflip-blt
+kms_frontbuffer_tracking@psrdrrs-2p-scndscrn-shrfb-msflip-blt
+kms_frontbuffer_tracking@psrdrrs-2p-scndscrn-shrfb-plflip-blt
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-primscrn-indfb-pgflip-blt
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-primscrn-indfb-msflip-blt
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-primscrn-indfb-plflip-blt
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-primscrn-shrfb-pgflip-blt
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-primscrn-shrfb-msflip-blt
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-primscrn-shrfb-plflip-blt
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-primscrn-indfb-pgflip-blt
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-primscrn-indfb-msflip-blt
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-primscrn-indfb-plflip-blt
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-primscrn-shrfb-pgflip-blt
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-primscrn-shrfb-msflip-blt
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-primscrn-shrfb-plflip-blt
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-scndscrn-indfb-pgflip-blt
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-scndscrn-indfb-msflip-blt
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-scndscrn-indfb-plflip-blt
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-scndscrn-shrfb-pgflip-blt
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-scndscrn-shrfb-msflip-blt
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-scndscrn-shrfb-plflip-blt
+kms_frontbuffer_tracking@fbc-1p-indfb-fliptrack-mmap-gtt
+kms_frontbuffer_tracking@fbc-1p-shrfb-fliptrack-mmap-gtt
+kms_frontbuffer_tracking@fbc-2p-indfb-fliptrack-mmap-gtt
+kms_frontbuffer_tracking@fbc-2p-shrfb-fliptrack-mmap-gtt
+kms_frontbuffer_tracking@fbcpsr-1p-indfb-fliptrack-mmap-gtt
+kms_frontbuffer_tracking@fbcpsr-1p-shrfb-fliptrack-mmap-gtt
+kms_frontbuffer_tracking@fbcpsr-2p-indfb-fliptrack-mmap-gtt
+kms_frontbuffer_tracking@fbcpsr-2p-shrfb-fliptrack-mmap-gtt
+kms_frontbuffer_tracking@fbcdrrs-1p-indfb-fliptrack-mmap-gtt
+kms_frontbuffer_tracking@fbcdrrs-1p-shrfb-fliptrack-mmap-gtt
+kms_frontbuffer_tracking@fbcdrrs-2p-indfb-fliptrack-mmap-gtt
+kms_frontbuffer_tracking@fbcdrrs-2p-shrfb-fliptrack-mmap-gtt
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-indfb-fliptrack-mmap-gtt
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-shrfb-fliptrack-mmap-gtt
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-indfb-fliptrack-mmap-gtt
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-shrfb-fliptrack-mmap-gtt
+kms_frontbuffer_tracking@fbc-1p-primscrn-cur-indfb-move
+kms_frontbuffer_tracking@fbc-1p-primscrn-cur-indfb-onoff
+kms_frontbuffer_tracking@fbc-1p-primscrn-spr-indfb-move
+kms_frontbuffer_tracking@fbc-1p-primscrn-spr-indfb-onoff
+kms_frontbuffer_tracking@fbc-2p-primscrn-cur-indfb-move
+kms_frontbuffer_tracking@fbc-2p-primscrn-cur-indfb-onoff
+kms_frontbuffer_tracking@fbc-2p-primscrn-spr-indfb-move
+kms_frontbuffer_tracking@fbc-2p-primscrn-spr-indfb-onoff
+kms_frontbuffer_tracking@fbc-2p-scndscrn-cur-indfb-move
+kms_frontbuffer_tracking@fbc-2p-scndscrn-cur-indfb-onoff
+kms_frontbuffer_tracking@fbc-2p-scndscrn-spr-indfb-move
+kms_frontbuffer_tracking@fbc-2p-scndscrn-spr-indfb-onoff
+kms_frontbuffer_tracking@psr-1p-primscrn-cur-indfb-move
+kms_frontbuffer_tracking@psr-1p-primscrn-cur-indfb-onoff
+kms_frontbuffer_tracking@psr-1p-primscrn-spr-indfb-move
+kms_frontbuffer_tracking@psr-1p-primscrn-spr-indfb-onoff
+kms_frontbuffer_tracking@psr-2p-primscrn-cur-indfb-move
+kms_frontbuffer_tracking@psr-2p-primscrn-cur-indfb-onoff
+kms_frontbuffer_tracking@psr-2p-primscrn-spr-indfb-move
+kms_frontbuffer_tracking@psr-2p-primscrn-spr-indfb-onoff
+kms_frontbuffer_tracking@psr-2p-scndscrn-cur-indfb-move
+kms_frontbuffer_tracking@psr-2p-scndscrn-cur-indfb-onoff
+kms_frontbuffer_tracking@psr-2p-scndscrn-spr-indfb-move
+kms_frontbuffer_tracking@psr-2p-scndscrn-spr-indfb-onoff
+kms_frontbuffer_tracking@fbcpsr-1p-primscrn-cur-indfb-move
+kms_frontbuffer_tracking@fbcpsr-1p-primscrn-cur-indfb-onoff
+kms_frontbuffer_tracking@fbcpsr-1p-primscrn-spr-indfb-move
+kms_frontbuffer_tracking@fbcpsr-1p-primscrn-spr-indfb-onoff
+kms_frontbuffer_tracking@fbcpsr-2p-primscrn-cur-indfb-move
+kms_frontbuffer_tracking@fbcpsr-2p-primscrn-cur-indfb-onoff
+kms_frontbuffer_tracking@fbcpsr-2p-primscrn-spr-indfb-move
+kms_frontbuffer_tracking@fbcpsr-2p-primscrn-spr-indfb-onoff
+kms_frontbuffer_tracking@fbcpsr-2p-scndscrn-cur-indfb-move
+kms_frontbuffer_tracking@fbcpsr-2p-scndscrn-cur-indfb-onoff
+kms_frontbuffer_tracking@fbcpsr-2p-scndscrn-spr-indfb-move
+kms_frontbuffer_tracking@fbcpsr-2p-scndscrn-spr-indfb-onoff
+kms_frontbuffer_tracking@drrs-1p-primscrn-cur-indfb-move
+kms_frontbuffer_tracking@drrs-1p-primscrn-cur-indfb-onoff
+kms_frontbuffer_tracking@drrs-1p-primscrn-spr-indfb-move
+kms_frontbuffer_tracking@drrs-1p-primscrn-spr-indfb-onoff
+kms_frontbuffer_tracking@drrs-2p-primscrn-cur-indfb-move
+kms_frontbuffer_tracking@drrs-2p-primscrn-cur-indfb-onoff
+kms_frontbuffer_tracking@drrs-2p-primscrn-spr-indfb-move
+kms_frontbuffer_tracking@drrs-2p-primscrn-spr-indfb-onoff
+kms_frontbuffer_tracking@drrs-2p-scndscrn-cur-indfb-move
+kms_frontbuffer_tracking@drrs-2p-scndscrn-cur-indfb-onoff
+kms_frontbuffer_tracking@drrs-2p-scndscrn-spr-indfb-move
+kms_frontbuffer_tracking@drrs-2p-scndscrn-spr-indfb-onoff
+kms_frontbuffer_tracking@fbcdrrs-1p-primscrn-cur-indfb-move
+kms_frontbuffer_tracking@fbcdrrs-1p-primscrn-cur-indfb-onoff
+kms_frontbuffer_tracking@fbcdrrs-1p-primscrn-spr-indfb-move
+kms_frontbuffer_tracking@fbcdrrs-1p-primscrn-spr-indfb-onoff
+kms_frontbuffer_tracking@fbcdrrs-2p-primscrn-cur-indfb-move
+kms_frontbuffer_tracking@fbcdrrs-2p-primscrn-cur-indfb-onoff
+kms_frontbuffer_tracking@fbcdrrs-2p-primscrn-spr-indfb-move
+kms_frontbuffer_tracking@fbcdrrs-2p-primscrn-spr-indfb-onoff
+kms_frontbuffer_tracking@fbcdrrs-2p-scndscrn-cur-indfb-move
+kms_frontbuffer_tracking@fbcdrrs-2p-scndscrn-cur-indfb-onoff
+kms_frontbuffer_tracking@fbcdrrs-2p-scndscrn-spr-indfb-move
+kms_frontbuffer_tracking@fbcdrrs-2p-scndscrn-spr-indfb-onoff
+kms_frontbuffer_tracking@psrdrrs-1p-primscrn-cur-indfb-move
+kms_frontbuffer_tracking@psrdrrs-1p-primscrn-cur-indfb-onoff
+kms_frontbuffer_tracking@psrdrrs-1p-primscrn-spr-indfb-move
+kms_frontbuffer_tracking@psrdrrs-1p-primscrn-spr-indfb-onoff
+kms_frontbuffer_tracking@psrdrrs-2p-primscrn-cur-indfb-move
+kms_frontbuffer_tracking@psrdrrs-2p-primscrn-cur-indfb-onoff
+kms_frontbuffer_tracking@psrdrrs-2p-primscrn-spr-indfb-move
+kms_frontbuffer_tracking@psrdrrs-2p-primscrn-spr-indfb-onoff
+kms_frontbuffer_tracking@psrdrrs-2p-scndscrn-cur-indfb-move
+kms_frontbuffer_tracking@psrdrrs-2p-scndscrn-cur-indfb-onoff
+kms_frontbuffer_tracking@psrdrrs-2p-scndscrn-spr-indfb-move
+kms_frontbuffer_tracking@psrdrrs-2p-scndscrn-spr-indfb-onoff
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-primscrn-cur-indfb-move
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-primscrn-cur-indfb-onoff
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-primscrn-spr-indfb-move
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-primscrn-spr-indfb-onoff
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-primscrn-cur-indfb-move
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-primscrn-cur-indfb-onoff
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-primscrn-spr-indfb-move
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-primscrn-spr-indfb-onoff
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-scndscrn-cur-indfb-move
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-scndscrn-cur-indfb-onoff
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-scndscrn-spr-indfb-move
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-scndscrn-spr-indfb-onoff
+kms_frontbuffer_tracking@fbc-1p-primscrn-spr-indfb-fullscreen
+kms_frontbuffer_tracking@fbc-2p-primscrn-spr-indfb-fullscreen
+kms_frontbuffer_tracking@fbc-2p-scndscrn-spr-indfb-fullscreen
+kms_frontbuffer_tracking@psr-1p-primscrn-spr-indfb-fullscreen
+kms_frontbuffer_tracking@psr-2p-primscrn-spr-indfb-fullscreen
+kms_frontbuffer_tracking@psr-2p-scndscrn-spr-indfb-fullscreen
+kms_frontbuffer_tracking@fbcpsr-1p-primscrn-spr-indfb-fullscreen
+kms_frontbuffer_tracking@fbcpsr-2p-primscrn-spr-indfb-fullscreen
+kms_frontbuffer_tracking@fbcpsr-2p-scndscrn-spr-indfb-fullscreen
+kms_frontbuffer_tracking@drrs-1p-primscrn-spr-indfb-fullscreen
+kms_frontbuffer_tracking@drrs-2p-primscrn-spr-indfb-fullscreen
+kms_frontbuffer_tracking@drrs-2p-scndscrn-spr-indfb-fullscreen
+kms_frontbuffer_tracking@fbcdrrs-1p-primscrn-spr-indfb-fullscreen
+kms_frontbuffer_tracking@fbcdrrs-2p-primscrn-spr-indfb-fullscreen
+kms_frontbuffer_tracking@fbcdrrs-2p-scndscrn-spr-indfb-fullscreen
+kms_frontbuffer_tracking@psrdrrs-1p-primscrn-spr-indfb-fullscreen
+kms_frontbuffer_tracking@psrdrrs-2p-primscrn-spr-indfb-fullscreen
+kms_frontbuffer_tracking@psrdrrs-2p-scndscrn-spr-indfb-fullscreen
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-primscrn-spr-indfb-fullscreen
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-primscrn-spr-indfb-fullscreen
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-scndscrn-spr-indfb-fullscreen
+kms_frontbuffer_tracking@fbc-1p-pri-indfb-multidraw
+kms_frontbuffer_tracking@fbc-2p-pri-indfb-multidraw
+kms_frontbuffer_tracking@psr-1p-pri-indfb-multidraw
+kms_frontbuffer_tracking@psr-2p-pri-indfb-multidraw
+kms_frontbuffer_tracking@fbcpsr-1p-pri-indfb-multidraw
+kms_frontbuffer_tracking@fbcpsr-2p-pri-indfb-multidraw
+kms_frontbuffer_tracking@drrs-1p-pri-indfb-multidraw
+kms_frontbuffer_tracking@drrs-2p-pri-indfb-multidraw
+kms_frontbuffer_tracking@fbcdrrs-1p-pri-indfb-multidraw
+kms_frontbuffer_tracking@fbcdrrs-2p-pri-indfb-multidraw
+kms_frontbuffer_tracking@psrdrrs-1p-pri-indfb-multidraw
+kms_frontbuffer_tracking@psrdrrs-2p-pri-indfb-multidraw
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-pri-indfb-multidraw
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-pri-indfb-multidraw
+kms_frontbuffer_tracking@fbc-farfromfence-mmap-gtt
+kms_frontbuffer_tracking@psr-farfromfence-mmap-gtt
+kms_frontbuffer_tracking@fbcpsr-farfromfence-mmap-gtt
+kms_frontbuffer_tracking@drrs-farfromfence-mmap-gtt
+kms_frontbuffer_tracking@fbcdrrs-farfromfence-mmap-gtt
+kms_frontbuffer_tracking@psrdrrs-farfromfence-mmap-gtt
+kms_frontbuffer_tracking@fbcpsrdrrs-farfromfence-mmap-gtt
+kms_frontbuffer_tracking@fbc-rgb565-draw-mmap-cpu
+kms_frontbuffer_tracking@fbc-rgb101010-draw-mmap-cpu
+kms_frontbuffer_tracking@fbc-rgb565-draw-mmap-gtt
+kms_frontbuffer_tracking@fbc-rgb101010-draw-mmap-gtt
+kms_frontbuffer_tracking@fbc-rgb565-draw-mmap-wc
+kms_frontbuffer_tracking@fbc-rgb101010-draw-mmap-wc
+kms_frontbuffer_tracking@fbc-rgb565-draw-pwrite
+kms_frontbuffer_tracking@fbc-rgb101010-draw-pwrite
+kms_frontbuffer_tracking@fbc-rgb565-draw-blt
+kms_frontbuffer_tracking@fbc-rgb101010-draw-blt
+kms_frontbuffer_tracking@fbc-rgb565-draw-render
+kms_frontbuffer_tracking@fbc-rgb101010-draw-render
+kms_frontbuffer_tracking@psr-rgb565-draw-mmap-cpu
+kms_frontbuffer_tracking@psr-rgb101010-draw-mmap-cpu
+kms_frontbuffer_tracking@psr-rgb565-draw-mmap-gtt
+kms_frontbuffer_tracking@psr-rgb101010-draw-mmap-gtt
+kms_frontbuffer_tracking@psr-rgb565-draw-mmap-wc
+kms_frontbuffer_tracking@psr-rgb101010-draw-mmap-wc
+kms_frontbuffer_tracking@psr-rgb565-draw-pwrite
+kms_frontbuffer_tracking@psr-rgb101010-draw-pwrite
+kms_frontbuffer_tracking@psr-rgb565-draw-blt
+kms_frontbuffer_tracking@psr-rgb101010-draw-blt
+kms_frontbuffer_tracking@psr-rgb565-draw-render
+kms_frontbuffer_tracking@psr-rgb101010-draw-render
+kms_frontbuffer_tracking@fbcpsr-rgb565-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcpsr-rgb101010-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcpsr-rgb565-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcpsr-rgb101010-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcpsr-rgb565-draw-mmap-wc
+kms_frontbuffer_tracking@fbcpsr-rgb101010-draw-mmap-wc
+kms_frontbuffer_tracking@fbcpsr-rgb565-draw-pwrite
+kms_frontbuffer_tracking@fbcpsr-rgb101010-draw-pwrite
+kms_frontbuffer_tracking@fbcpsr-rgb565-draw-blt
+kms_frontbuffer_tracking@fbcpsr-rgb101010-draw-blt
+kms_frontbuffer_tracking@fbcpsr-rgb565-draw-render
+kms_frontbuffer_tracking@fbcpsr-rgb101010-draw-render
+kms_frontbuffer_tracking@drrs-rgb565-draw-mmap-cpu
+kms_frontbuffer_tracking@drrs-rgb101010-draw-mmap-cpu
+kms_frontbuffer_tracking@drrs-rgb565-draw-mmap-gtt
+kms_frontbuffer_tracking@drrs-rgb101010-draw-mmap-gtt
+kms_frontbuffer_tracking@drrs-rgb565-draw-mmap-wc
+kms_frontbuffer_tracking@drrs-rgb101010-draw-mmap-wc
+kms_frontbuffer_tracking@drrs-rgb565-draw-pwrite
+kms_frontbuffer_tracking@drrs-rgb101010-draw-pwrite
+kms_frontbuffer_tracking@drrs-rgb565-draw-blt
+kms_frontbuffer_tracking@drrs-rgb101010-draw-blt
+kms_frontbuffer_tracking@drrs-rgb565-draw-render
+kms_frontbuffer_tracking@drrs-rgb101010-draw-render
+kms_frontbuffer_tracking@fbcdrrs-rgb565-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcdrrs-rgb101010-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcdrrs-rgb565-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcdrrs-rgb101010-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcdrrs-rgb565-draw-mmap-wc
+kms_frontbuffer_tracking@fbcdrrs-rgb101010-draw-mmap-wc
+kms_frontbuffer_tracking@fbcdrrs-rgb565-draw-pwrite
+kms_frontbuffer_tracking@fbcdrrs-rgb101010-draw-pwrite
+kms_frontbuffer_tracking@fbcdrrs-rgb565-draw-blt
+kms_frontbuffer_tracking@fbcdrrs-rgb101010-draw-blt
+kms_frontbuffer_tracking@fbcdrrs-rgb565-draw-render
+kms_frontbuffer_tracking@fbcdrrs-rgb101010-draw-render
+kms_frontbuffer_tracking@psrdrrs-rgb565-draw-mmap-cpu
+kms_frontbuffer_tracking@psrdrrs-rgb101010-draw-mmap-cpu
+kms_frontbuffer_tracking@psrdrrs-rgb565-draw-mmap-gtt
+kms_frontbuffer_tracking@psrdrrs-rgb101010-draw-mmap-gtt
+kms_frontbuffer_tracking@psrdrrs-rgb565-draw-mmap-wc
+kms_frontbuffer_tracking@psrdrrs-rgb101010-draw-mmap-wc
+kms_frontbuffer_tracking@psrdrrs-rgb565-draw-pwrite
+kms_frontbuffer_tracking@psrdrrs-rgb101010-draw-pwrite
+kms_frontbuffer_tracking@psrdrrs-rgb565-draw-blt
+kms_frontbuffer_tracking@psrdrrs-rgb101010-draw-blt
+kms_frontbuffer_tracking@psrdrrs-rgb565-draw-render
+kms_frontbuffer_tracking@psrdrrs-rgb101010-draw-render
+kms_frontbuffer_tracking@fbcpsrdrrs-rgb565-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcpsrdrrs-rgb101010-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcpsrdrrs-rgb565-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcpsrdrrs-rgb101010-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcpsrdrrs-rgb565-draw-mmap-wc
+kms_frontbuffer_tracking@fbcpsrdrrs-rgb101010-draw-mmap-wc
+kms_frontbuffer_tracking@fbcpsrdrrs-rgb565-draw-pwrite
+kms_frontbuffer_tracking@fbcpsrdrrs-rgb101010-draw-pwrite
+kms_frontbuffer_tracking@fbcpsrdrrs-rgb565-draw-blt
+kms_frontbuffer_tracking@fbcpsrdrrs-rgb101010-draw-blt
+kms_frontbuffer_tracking@fbcpsrdrrs-rgb565-draw-render
+kms_frontbuffer_tracking@fbcpsrdrrs-rgb101010-draw-render
+kms_frontbuffer_tracking@fbc-indfb-scaledprimary
+kms_frontbuffer_tracking@fbc-shrfb-scaledprimary
+kms_frontbuffer_tracking@psr-indfb-scaledprimary
+kms_frontbuffer_tracking@psr-shrfb-scaledprimary
+kms_frontbuffer_tracking@fbcpsr-indfb-scaledprimary
+kms_frontbuffer_tracking@fbcpsr-shrfb-scaledprimary
+kms_frontbuffer_tracking@drrs-indfb-scaledprimary
+kms_frontbuffer_tracking@drrs-shrfb-scaledprimary
+kms_frontbuffer_tracking@fbcdrrs-indfb-scaledprimary
+kms_frontbuffer_tracking@fbcdrrs-shrfb-scaledprimary
+kms_frontbuffer_tracking@psrdrrs-indfb-scaledprimary
+kms_frontbuffer_tracking@psrdrrs-shrfb-scaledprimary
+kms_frontbuffer_tracking@fbcpsrdrrs-indfb-scaledprimary
+kms_frontbuffer_tracking@fbcpsrdrrs-shrfb-scaledprimary
+kms_frontbuffer_tracking@fbc-modesetfrombusy
+kms_frontbuffer_tracking@fbc-badstride
+kms_frontbuffer_tracking@fbc-stridechange
+kms_frontbuffer_tracking@fbc-tiling-linear
+kms_frontbuffer_tracking@fbc-tiling-y
+kms_frontbuffer_tracking@fbc-tiling-4
+kms_frontbuffer_tracking@fbc-suspend
+kms_frontbuffer_tracking@psr-modesetfrombusy
+kms_frontbuffer_tracking@psr-slowdraw
+kms_frontbuffer_tracking@psr-suspend
+kms_frontbuffer_tracking@fbcpsr-modesetfrombusy
+kms_frontbuffer_tracking@fbcpsr-badstride
+kms_frontbuffer_tracking@fbcpsr-stridechange
+kms_frontbuffer_tracking@fbcpsr-tiling-linear
+kms_frontbuffer_tracking@fbcpsr-tiling-y
+kms_frontbuffer_tracking@fbcpsr-tiling-4
+kms_frontbuffer_tracking@fbcpsr-slowdraw
+kms_frontbuffer_tracking@fbcpsr-suspend
+kms_frontbuffer_tracking@drrs-modesetfrombusy
+kms_frontbuffer_tracking@drrs-slowdraw
+kms_frontbuffer_tracking@drrs-suspend
+kms_frontbuffer_tracking@fbcdrrs-modesetfrombusy
+kms_frontbuffer_tracking@fbcdrrs-badstride
+kms_frontbuffer_tracking@fbcdrrs-stridechange
+kms_frontbuffer_tracking@fbcdrrs-tiling-linear
+kms_frontbuffer_tracking@fbcdrrs-tiling-y
+kms_frontbuffer_tracking@fbcdrrs-tiling-4
+kms_frontbuffer_tracking@fbcdrrs-slowdraw
+kms_frontbuffer_tracking@fbcdrrs-suspend
+kms_frontbuffer_tracking@psrdrrs-modesetfrombusy
+kms_frontbuffer_tracking@psrdrrs-slowdraw
+kms_frontbuffer_tracking@psrdrrs-suspend
+kms_frontbuffer_tracking@fbcpsrdrrs-modesetfrombusy
+kms_frontbuffer_tracking@fbcpsrdrrs-badstride
+kms_frontbuffer_tracking@fbcpsrdrrs-stridechange
+kms_frontbuffer_tracking@fbcpsrdrrs-tiling-linear
+kms_frontbuffer_tracking@fbcpsrdrrs-tiling-y
+kms_frontbuffer_tracking@fbcpsrdrrs-tiling-4
+kms_frontbuffer_tracking@fbcpsrdrrs-slowdraw
+kms_frontbuffer_tracking@fbcpsrdrrs-suspend
+kms_frontbuffer_tracking@basic
+kms_getfb@getfb-handle-zero
+kms_getfb@getfb-handle-valid
+kms_getfb@getfb-handle-closed
+kms_getfb@getfb-handle-not-fb
+kms_getfb@getfb-addfb-different-handles
+kms_getfb@getfb-repeated-different-handles
+kms_getfb@getfb-reject-ccs
+kms_getfb@getfb2-handle-zero
+kms_getfb@getfb2-handle-closed
+kms_getfb@getfb2-handle-not-fb
+kms_getfb@getfb2-accept-ccs
+kms_getfb@getfb2-into-addfb2
+kms_getfb@getfb-handle-protection
+kms_getfb@getfb2-handle-protection
+kms_hdmi_inject@inject-4k
+kms_hdmi_inject@inject-audio
+kms_hdr@bpc-switch
+kms_hdr@bpc-switch-dpms
+kms_hdr@bpc-switch-suspend
+kms_hdr@static-toggle
+kms_hdr@static-toggle-dpms
+kms_hdr@static-toggle-suspend
+kms_hdr@static-swap
+kms_hdr@invalid-metadata-sizes
+kms_hdr@invalid-hdr
+kms_invalid_mode@clock-too-high
+kms_invalid_mode@zero-clock
+kms_invalid_mode@int-max-clock
+kms_invalid_mode@uint-max-clock
+kms_invalid_mode@zero-hdisplay
+kms_invalid_mode@zero-vdisplay
+kms_invalid_mode@bad-hsync-start
+kms_invalid_mode@bad-vsync-start
+kms_invalid_mode@bad-hsync-end
+kms_invalid_mode@bad-vsync-end
+kms_invalid_mode@bad-htotal
+kms_invalid_mode@bad-vtotal
+kms_legacy_colorkey@basic
+kms_legacy_colorkey@invalid-plane
+kms_multipipe_modeset@basic-max-pipe-crc-check
+kms_panel_fitting@legacy
+kms_panel_fitting@atomic-fastset
+kms_pipe_b_c_ivb@pipe-B-dpms-off-modeset-pipe-C
+kms_pipe_b_c_ivb@pipe-B-double-modeset-then-modeset-pipe-C
+kms_pipe_b_c_ivb@disable-pipe-B-enable-pipe-C
+kms_pipe_b_c_ivb@from-pipe-C-to-B-with-3-lanes
+kms_pipe_b_c_ivb@enable-pipe-C-while-B-has-3-lanes
+kms_pipe_crc_basic@bad-source
+kms_pipe_crc_basic@read-crc
+kms_pipe_crc_basic@read-crc-frame-sequence
+kms_pipe_crc_basic@nonblocking-crc
+kms_pipe_crc_basic@nonblocking-crc-frame-sequence
+kms_pipe_crc_basic@suspend-read-crc
+kms_pipe_crc_basic@hang-read-crc
+kms_pipe_crc_basic@disable-crc-after-crtc
+kms_pipe_crc_basic@compare-crc-sanitycheck-xr24
+kms_pipe_crc_basic@compare-crc-sanitycheck-nv12
+kms_plane@pixel-format
+kms_plane@pixel-format-source-clamping
+kms_plane@plane-position-covered
+kms_plane@plane-position-hole
+kms_plane@plane-position-hole-dpms
+kms_plane@plane-panning-top-left
+kms_plane@plane-panning-bottom-right
+kms_plane@plane-panning-bottom-right-suspend
+kms_plane@invalid-pixel-format-settings
+kms_plane_alpha_blend@alpha-basic
+kms_plane_alpha_blend@alpha-7efc
+kms_plane_alpha_blend@coverage-7efc
+kms_plane_alpha_blend@coverage-vs-premult-vs-constant
+kms_plane_alpha_blend@alpha-transparent-fb
+kms_plane_alpha_blend@alpha-opaque-fb
+kms_plane_alpha_blend@constant-alpha-min
+kms_plane_alpha_blend@constant-alpha-mid
+kms_plane_alpha_blend@constant-alpha-max
+kms_plane_cursor@primary
+kms_plane_cursor@overlay
+kms_plane_cursor@viewport
+kms_plane_lowres@tiling-none
+kms_plane_lowres@tiling-x
+kms_plane_lowres@tiling-y
+kms_plane_lowres@tiling-yf
+kms_plane_lowres@tiling-4
+kms_plane_multiple@tiling-none
+kms_plane_multiple@tiling-x
+kms_plane_multiple@tiling-y
+kms_plane_multiple@tiling-yf
+kms_plane_multiple@tiling-4
+kms_plane_scaling@plane-upscale-with-pixel-format-20x20
+kms_plane_scaling@plane-upscale-with-pixel-format-factor-0-25
+kms_plane_scaling@plane-downscale-with-pixel-format-factor-0-25
+kms_plane_scaling@plane-downscale-with-pixel-format-factor-0-5
+kms_plane_scaling@plane-downscale-with-pixel-format-factor-0-75
+kms_plane_scaling@plane-scaler-with-pixel-format-unity-scaling
+kms_plane_scaling@plane-upscale-with-rotation-20x20
+kms_plane_scaling@plane-upscale-with-rotation-factor-0-25
+kms_plane_scaling@plane-downscale-with-rotation-factor-0-25
+kms_plane_scaling@plane-downscale-with-rotation-factor-0-5
+kms_plane_scaling@plane-downscale-with-rotation-factor-0-75
+kms_plane_scaling@plane-scaler-with-rotation-unity-scaling
+kms_plane_scaling@plane-upscale-with-modifiers-20x20
+kms_plane_scaling@plane-upscale-with-modifiers-factor-0-25
+kms_plane_scaling@plane-downscale-with-modifiers-factor-0-25
+kms_plane_scaling@plane-downscale-with-modifiers-factor-0-5
+kms_plane_scaling@plane-downscale-with-modifiers-factor-0-75
+kms_plane_scaling@plane-scaler-with-modifiers-unity-scaling
+kms_plane_scaling@plane-scaler-with-clipping-clamping-pixel-formats
+kms_plane_scaling@plane-scaler-with-clipping-clamping-rotation
+kms_plane_scaling@plane-scaler-with-clipping-clamping-modifiers
+kms_plane_scaling@planes-upscale-20x20
+kms_plane_scaling@planes-upscale-factor-0-25
+kms_plane_scaling@planes-scaler-unity-scaling
+kms_plane_scaling@planes-downscale-factor-0-25
+kms_plane_scaling@planes-downscale-factor-0-5
+kms_plane_scaling@planes-downscale-factor-0-75
+kms_plane_scaling@planes-upscale-20x20-downscale-factor-0-25
+kms_plane_scaling@planes-upscale-20x20-downscale-factor-0-5
+kms_plane_scaling@planes-upscale-20x20-downscale-factor-0-75
+kms_plane_scaling@planes-upscale-factor-0-25-downscale-factor-0-25
+kms_plane_scaling@planes-upscale-factor-0-25-downscale-factor-0-5
+kms_plane_scaling@planes-upscale-factor-0-25-downscale-factor-0-75
+kms_plane_scaling@planes-unity-scaling-downscale-factor-0-25
+kms_plane_scaling@planes-unity-scaling-downscale-factor-0-5
+kms_plane_scaling@planes-unity-scaling-downscale-factor-0-75
+kms_plane_scaling@planes-downscale-factor-0-25-upscale-20x20
+kms_plane_scaling@planes-downscale-factor-0-25-upscale-factor-0-25
+kms_plane_scaling@planes-downscale-factor-0-25-unity-scaling
+kms_plane_scaling@planes-downscale-factor-0-5-upscale-20x20
+kms_plane_scaling@planes-downscale-factor-0-5-upscale-factor-0-25
+kms_plane_scaling@planes-downscale-factor-0-5-unity-scaling
+kms_plane_scaling@planes-downscale-factor-0-75-upscale-20x20
+kms_plane_scaling@planes-downscale-factor-0-75-upscale-factor-0-25
+kms_plane_scaling@planes-downscale-factor-0-75-unity-scaling
+kms_plane_scaling@intel-max-src-size
+kms_plane_scaling@invalid-num-scalers
+kms_plane_scaling@invalid-parameters
+kms_plane_scaling@2x-scaler-multi-pipe
+kms_prime@basic-crc-hybrid
+kms_prime@basic-modeset-hybrid
+kms_prime@D3hot
+kms_prime@basic-crc-vgem
+kms_prop_blob@basic
+kms_prop_blob@blob-prop-core
+kms_prop_blob@blob-prop-validate
+kms_prop_blob@blob-prop-lifetime
+kms_prop_blob@blob-multiple
+kms_prop_blob@invalid-get-prop-any
+kms_prop_blob@invalid-get-prop
+kms_prop_blob@invalid-set-prop-any
+kms_prop_blob@invalid-set-prop
+kms_properties@plane-properties-legacy
+kms_properties@plane-properties-atomic
+kms_properties@crtc-properties-legacy
+kms_properties@crtc-properties-atomic
+kms_properties@connector-properties-legacy
+kms_properties@connector-properties-atomic
+kms_properties@invalid-properties-legacy
+kms_properties@invalid-properties-atomic
+kms_properties@get_properties-sanity-atomic
+kms_properties@get_properties-sanity-non-atomic
+kms_psr@basic
+kms_psr@no_drrs
+kms_psr@primary_page_flip
+kms_psr@primary_mmap_gtt
+kms_psr@primary_mmap_cpu
+kms_psr@primary_blt
+kms_psr@primary_render
+kms_psr@sprite_mmap_gtt
+kms_psr@cursor_mmap_gtt
+kms_psr@sprite_mmap_cpu
+kms_psr@cursor_mmap_cpu
+kms_psr@sprite_blt
+kms_psr@cursor_blt
+kms_psr@sprite_render
+kms_psr@cursor_render
+kms_psr@sprite_plane_move
+kms_psr@cursor_plane_move
+kms_psr@sprite_plane_onoff
+kms_psr@cursor_plane_onoff
+kms_psr@dpms
+kms_psr@suspend
+kms_psr@psr2_basic
+kms_psr@psr2_no_drrs
+kms_psr@psr2_primary_page_flip
+kms_psr@psr2_primary_mmap_gtt
+kms_psr@psr2_primary_mmap_cpu
+kms_psr@psr2_primary_blt
+kms_psr@psr2_primary_render
+kms_psr@psr2_sprite_mmap_gtt
+kms_psr@psr2_cursor_mmap_gtt
+kms_psr@psr2_sprite_mmap_cpu
+kms_psr@psr2_cursor_mmap_cpu
+kms_psr@psr2_sprite_blt
+kms_psr@psr2_cursor_blt
+kms_psr@psr2_sprite_render
+kms_psr@psr2_cursor_render
+kms_psr@psr2_sprite_plane_move
+kms_psr@psr2_cursor_plane_move
+kms_psr@psr2_sprite_plane_onoff
+kms_psr@psr2_cursor_plane_onoff
+kms_psr@psr2_dpms
+kms_psr@psr2_suspend
+kms_psr2_sf@primary-plane-update-sf-dmg-area
+kms_psr2_sf@primary-plane-update-sf-dmg-area-big-fb
+kms_psr2_sf@overlay-plane-update-sf-dmg-area
+kms_psr2_sf@cursor-plane-update-sf
+kms_psr2_sf@cursor-plane-move-continuous-sf
+kms_psr2_sf@cursor-plane-move-continuous-exceed-sf
+kms_psr2_sf@cursor-plane-move-continuous-exceed-fully-sf
+kms_psr2_sf@plane-move-sf-dmg-area
+kms_psr2_sf@overlay-plane-move-continuous-sf
+kms_psr2_sf@overlay-plane-move-continuous-exceed-sf
+kms_psr2_sf@overlay-plane-move-continuous-exceed-fully-sf
+kms_psr2_sf@overlay-primary-update-sf-dmg-area
+kms_psr2_sf@overlay-plane-update-continuous-sf
+kms_psr2_su@page_flip-XRGB8888
+kms_psr2_su@page_flip-NV12
+kms_psr2_su@page_flip-P010
+kms_psr2_su@frontbuffer-XRGB8888
+kms_pwrite_crc
+kms_rmfb@rmfb-ioctl
+kms_rmfb@close-fd
+kms_rotation_crc@primary-rotation-90
+kms_rotation_crc@primary-rotation-180
+kms_rotation_crc@primary-rotation-270
+kms_rotation_crc@sprite-rotation-90
+kms_rotation_crc@sprite-rotation-180
+kms_rotation_crc@sprite-rotation-270
+kms_rotation_crc@cursor-rotation-180
+kms_rotation_crc@sprite-rotation-90-pos-100-0
+kms_rotation_crc@bad-pixel-format
+kms_rotation_crc@bad-tiling
+kms_rotation_crc@primary-x-tiled-reflect-x-0
+kms_rotation_crc@primary-x-tiled-reflect-x-180
+kms_rotation_crc@primary-y-tiled-reflect-x-0
+kms_rotation_crc@primary-y-tiled-reflect-x-90
+kms_rotation_crc@primary-y-tiled-reflect-x-180
+kms_rotation_crc@primary-y-tiled-reflect-x-270
+kms_rotation_crc@primary-yf-tiled-reflect-x-0
+kms_rotation_crc@primary-yf-tiled-reflect-x-90
+kms_rotation_crc@primary-yf-tiled-reflect-x-180
+kms_rotation_crc@primary-yf-tiled-reflect-x-270
+kms_rotation_crc@primary-4-tiled-reflect-x-0
+kms_rotation_crc@primary-4-tiled-reflect-x-180
+kms_rotation_crc@multiplane-rotation
+kms_rotation_crc@multiplane-rotation-cropping-top
+kms_rotation_crc@multiplane-rotation-cropping-bottom
+kms_rotation_crc@exhaust-fences
+kms_scaling_modes@scaling-mode-full
+kms_scaling_modes@scaling-mode-center
+kms_scaling_modes@scaling-mode-full-aspect
+kms_scaling_modes@scaling-mode-none
+kms_selftest@drm_cmdline
+kms_selftest@drm_damage
+kms_selftest@drm_dp_mst
+kms_selftest@drm_format_helper
+kms_selftest@drm_format
+kms_selftest@framebuffer
+kms_selftest@drm_plane
+kms_setmode@basic
+kms_setmode@basic-clone-single-crtc
+kms_setmode@invalid-clone-single-crtc
+kms_setmode@invalid-clone-exclusive-crtc
+kms_setmode@clone-exclusive-crtc
+kms_setmode@invalid-clone-single-crtc-stealing
+kms_sysfs_edid_timing
+kms_tv_load_detect@load-detect
+kms_universal_plane@universal-plane-pipe-A-functional
+kms_universal_plane@universal-plane-pipe-A-sanity
+kms_universal_plane@disable-primary-vs-flip-pipe-A
+kms_universal_plane@cursor-fb-leak-pipe-A
+kms_universal_plane@universal-plane-pageflip-windowed-pipe-A
+kms_universal_plane@universal-plane-pipe-B-functional
+kms_universal_plane@universal-plane-pipe-B-sanity
+kms_universal_plane@disable-primary-vs-flip-pipe-B
+kms_universal_plane@cursor-fb-leak-pipe-B
+kms_universal_plane@universal-plane-pageflip-windowed-pipe-B
+kms_universal_plane@universal-plane-pipe-C-functional
+kms_universal_plane@universal-plane-pipe-C-sanity
+kms_universal_plane@disable-primary-vs-flip-pipe-C
+kms_universal_plane@cursor-fb-leak-pipe-C
+kms_universal_plane@universal-plane-pageflip-windowed-pipe-C
+kms_universal_plane@universal-plane-pipe-D-functional
+kms_universal_plane@universal-plane-pipe-D-sanity
+kms_universal_plane@disable-primary-vs-flip-pipe-D
+kms_universal_plane@cursor-fb-leak-pipe-D
+kms_universal_plane@universal-plane-pageflip-windowed-pipe-D
+kms_universal_plane@universal-plane-pipe-E-functional
+kms_universal_plane@universal-plane-pipe-E-sanity
+kms_universal_plane@disable-primary-vs-flip-pipe-E
+kms_universal_plane@cursor-fb-leak-pipe-E
+kms_universal_plane@universal-plane-pageflip-windowed-pipe-E
+kms_universal_plane@universal-plane-pipe-F-functional
+kms_universal_plane@universal-plane-pipe-F-sanity
+kms_universal_plane@disable-primary-vs-flip-pipe-F
+kms_universal_plane@cursor-fb-leak-pipe-F
+kms_universal_plane@universal-plane-pageflip-windowed-pipe-F
+kms_universal_plane@universal-plane-pipe-G-functional
+kms_universal_plane@universal-plane-pipe-G-sanity
+kms_universal_plane@disable-primary-vs-flip-pipe-G
+kms_universal_plane@cursor-fb-leak-pipe-G
+kms_universal_plane@universal-plane-pageflip-windowed-pipe-G
+kms_universal_plane@universal-plane-pipe-H-functional
+kms_universal_plane@universal-plane-pipe-H-sanity
+kms_universal_plane@disable-primary-vs-flip-pipe-H
+kms_universal_plane@cursor-fb-leak-pipe-H
+kms_universal_plane@universal-plane-pageflip-windowed-pipe-H
+kms_vblank@invalid
+kms_vblank@crtc-id
+kms_vblank@pipe-A-accuracy-idle
+kms_vblank@pipe-A-query-idle
+kms_vblank@pipe-A-query-idle-hang
+kms_vblank@pipe-A-query-forked
+kms_vblank@pipe-A-query-forked-hang
+kms_vblank@pipe-A-query-busy
+kms_vblank@pipe-A-query-busy-hang
+kms_vblank@pipe-A-query-forked-busy
+kms_vblank@pipe-A-query-forked-busy-hang
+kms_vblank@pipe-A-wait-idle
+kms_vblank@pipe-A-wait-idle-hang
+kms_vblank@pipe-A-wait-forked
+kms_vblank@pipe-A-wait-forked-hang
+kms_vblank@pipe-A-wait-busy
+kms_vblank@pipe-A-wait-busy-hang
+kms_vblank@pipe-A-wait-forked-busy
+kms_vblank@pipe-A-wait-forked-busy-hang
+kms_vblank@pipe-A-ts-continuation-idle
+kms_vblank@pipe-A-ts-continuation-idle-hang
+kms_vblank@pipe-A-ts-continuation-dpms-rpm
+kms_vblank@pipe-A-ts-continuation-dpms-suspend
+kms_vblank@pipe-A-ts-continuation-suspend
+kms_vblank@pipe-A-ts-continuation-modeset
+kms_vblank@pipe-A-ts-continuation-modeset-hang
+kms_vblank@pipe-A-ts-continuation-modeset-rpm
+kms_vblank@pipe-B-accuracy-idle
+kms_vblank@pipe-B-query-idle
+kms_vblank@pipe-B-query-idle-hang
+kms_vblank@pipe-B-query-forked
+kms_vblank@pipe-B-query-forked-hang
+kms_vblank@pipe-B-query-busy
+kms_vblank@pipe-B-query-busy-hang
+kms_vblank@pipe-B-query-forked-busy
+kms_vblank@pipe-B-query-forked-busy-hang
+kms_vblank@pipe-B-wait-idle
+kms_vblank@pipe-B-wait-idle-hang
+kms_vblank@pipe-B-wait-forked
+kms_vblank@pipe-B-wait-forked-hang
+kms_vblank@pipe-B-wait-busy
+kms_vblank@pipe-B-wait-busy-hang
+kms_vblank@pipe-B-wait-forked-busy
+kms_vblank@pipe-B-wait-forked-busy-hang
+kms_vblank@pipe-B-ts-continuation-idle
+kms_vblank@pipe-B-ts-continuation-idle-hang
+kms_vblank@pipe-B-ts-continuation-dpms-rpm
+kms_vblank@pipe-B-ts-continuation-dpms-suspend
+kms_vblank@pipe-B-ts-continuation-suspend
+kms_vblank@pipe-B-ts-continuation-modeset
+kms_vblank@pipe-B-ts-continuation-modeset-hang
+kms_vblank@pipe-B-ts-continuation-modeset-rpm
+kms_vblank@pipe-C-accuracy-idle
+kms_vblank@pipe-C-query-idle
+kms_vblank@pipe-C-query-idle-hang
+kms_vblank@pipe-C-query-forked
+kms_vblank@pipe-C-query-forked-hang
+kms_vblank@pipe-C-query-busy
+kms_vblank@pipe-C-query-busy-hang
+kms_vblank@pipe-C-query-forked-busy
+kms_vblank@pipe-C-query-forked-busy-hang
+kms_vblank@pipe-C-wait-idle
+kms_vblank@pipe-C-wait-idle-hang
+kms_vblank@pipe-C-wait-forked
+kms_vblank@pipe-C-wait-forked-hang
+kms_vblank@pipe-C-wait-busy
+kms_vblank@pipe-C-wait-busy-hang
+kms_vblank@pipe-C-wait-forked-busy
+kms_vblank@pipe-C-wait-forked-busy-hang
+kms_vblank@pipe-C-ts-continuation-idle
+kms_vblank@pipe-C-ts-continuation-idle-hang
+kms_vblank@pipe-C-ts-continuation-dpms-rpm
+kms_vblank@pipe-C-ts-continuation-dpms-suspend
+kms_vblank@pipe-C-ts-continuation-suspend
+kms_vblank@pipe-C-ts-continuation-modeset
+kms_vblank@pipe-C-ts-continuation-modeset-hang
+kms_vblank@pipe-C-ts-continuation-modeset-rpm
+kms_vblank@pipe-D-accuracy-idle
+kms_vblank@pipe-D-query-idle
+kms_vblank@pipe-D-query-idle-hang
+kms_vblank@pipe-D-query-forked
+kms_vblank@pipe-D-query-forked-hang
+kms_vblank@pipe-D-query-busy
+kms_vblank@pipe-D-query-busy-hang
+kms_vblank@pipe-D-query-forked-busy
+kms_vblank@pipe-D-query-forked-busy-hang
+kms_vblank@pipe-D-wait-idle
+kms_vblank@pipe-D-wait-idle-hang
+kms_vblank@pipe-D-wait-forked
+kms_vblank@pipe-D-wait-forked-hang
+kms_vblank@pipe-D-wait-busy
+kms_vblank@pipe-D-wait-busy-hang
+kms_vblank@pipe-D-wait-forked-busy
+kms_vblank@pipe-D-wait-forked-busy-hang
+kms_vblank@pipe-D-ts-continuation-idle
+kms_vblank@pipe-D-ts-continuation-idle-hang
+kms_vblank@pipe-D-ts-continuation-dpms-rpm
+kms_vblank@pipe-D-ts-continuation-dpms-suspend
+kms_vblank@pipe-D-ts-continuation-suspend
+kms_vblank@pipe-D-ts-continuation-modeset
+kms_vblank@pipe-D-ts-continuation-modeset-hang
+kms_vblank@pipe-D-ts-continuation-modeset-rpm
+kms_vblank@pipe-E-accuracy-idle
+kms_vblank@pipe-E-query-idle
+kms_vblank@pipe-E-query-idle-hang
+kms_vblank@pipe-E-query-forked
+kms_vblank@pipe-E-query-forked-hang
+kms_vblank@pipe-E-query-busy
+kms_vblank@pipe-E-query-busy-hang
+kms_vblank@pipe-E-query-forked-busy
+kms_vblank@pipe-E-query-forked-busy-hang
+kms_vblank@pipe-E-wait-idle
+kms_vblank@pipe-E-wait-idle-hang
+kms_vblank@pipe-E-wait-forked
+kms_vblank@pipe-E-wait-forked-hang
+kms_vblank@pipe-E-wait-busy
+kms_vblank@pipe-E-wait-busy-hang
+kms_vblank@pipe-E-wait-forked-busy
+kms_vblank@pipe-E-wait-forked-busy-hang
+kms_vblank@pipe-E-ts-continuation-idle
+kms_vblank@pipe-E-ts-continuation-idle-hang
+kms_vblank@pipe-E-ts-continuation-dpms-rpm
+kms_vblank@pipe-E-ts-continuation-dpms-suspend
+kms_vblank@pipe-E-ts-continuation-suspend
+kms_vblank@pipe-E-ts-continuation-modeset
+kms_vblank@pipe-E-ts-continuation-modeset-hang
+kms_vblank@pipe-E-ts-continuation-modeset-rpm
+kms_vblank@pipe-F-accuracy-idle
+kms_vblank@pipe-F-query-idle
+kms_vblank@pipe-F-query-idle-hang
+kms_vblank@pipe-F-query-forked
+kms_vblank@pipe-F-query-forked-hang
+kms_vblank@pipe-F-query-busy
+kms_vblank@pipe-F-query-busy-hang
+kms_vblank@pipe-F-query-forked-busy
+kms_vblank@pipe-F-query-forked-busy-hang
+kms_vblank@pipe-F-wait-idle
+kms_vblank@pipe-F-wait-idle-hang
+kms_vblank@pipe-F-wait-forked
+kms_vblank@pipe-F-wait-forked-hang
+kms_vblank@pipe-F-wait-busy
+kms_vblank@pipe-F-wait-busy-hang
+kms_vblank@pipe-F-wait-forked-busy
+kms_vblank@pipe-F-wait-forked-busy-hang
+kms_vblank@pipe-F-ts-continuation-idle
+kms_vblank@pipe-F-ts-continuation-idle-hang
+kms_vblank@pipe-F-ts-continuation-dpms-rpm
+kms_vblank@pipe-F-ts-continuation-dpms-suspend
+kms_vblank@pipe-F-ts-continuation-suspend
+kms_vblank@pipe-F-ts-continuation-modeset
+kms_vblank@pipe-F-ts-continuation-modeset-hang
+kms_vblank@pipe-F-ts-continuation-modeset-rpm
+kms_vblank@pipe-G-accuracy-idle
+kms_vblank@pipe-G-query-idle
+kms_vblank@pipe-G-query-idle-hang
+kms_vblank@pipe-G-query-forked
+kms_vblank@pipe-G-query-forked-hang
+kms_vblank@pipe-G-query-busy
+kms_vblank@pipe-G-query-busy-hang
+kms_vblank@pipe-G-query-forked-busy
+kms_vblank@pipe-G-query-forked-busy-hang
+kms_vblank@pipe-G-wait-idle
+kms_vblank@pipe-G-wait-idle-hang
+kms_vblank@pipe-G-wait-forked
+kms_vblank@pipe-G-wait-forked-hang
+kms_vblank@pipe-G-wait-busy
+kms_vblank@pipe-G-wait-busy-hang
+kms_vblank@pipe-G-wait-forked-busy
+kms_vblank@pipe-G-wait-forked-busy-hang
+kms_vblank@pipe-G-ts-continuation-idle
+kms_vblank@pipe-G-ts-continuation-idle-hang
+kms_vblank@pipe-G-ts-continuation-dpms-rpm
+kms_vblank@pipe-G-ts-continuation-dpms-suspend
+kms_vblank@pipe-G-ts-continuation-suspend
+kms_vblank@pipe-G-ts-continuation-modeset
+kms_vblank@pipe-G-ts-continuation-modeset-hang
+kms_vblank@pipe-G-ts-continuation-modeset-rpm
+kms_vblank@pipe-H-accuracy-idle
+kms_vblank@pipe-H-query-idle
+kms_vblank@pipe-H-query-idle-hang
+kms_vblank@pipe-H-query-forked
+kms_vblank@pipe-H-query-forked-hang
+kms_vblank@pipe-H-query-busy
+kms_vblank@pipe-H-query-busy-hang
+kms_vblank@pipe-H-query-forked-busy
+kms_vblank@pipe-H-query-forked-busy-hang
+kms_vblank@pipe-H-wait-idle
+kms_vblank@pipe-H-wait-idle-hang
+kms_vblank@pipe-H-wait-forked
+kms_vblank@pipe-H-wait-forked-hang
+kms_vblank@pipe-H-wait-busy
+kms_vblank@pipe-H-wait-busy-hang
+kms_vblank@pipe-H-wait-forked-busy
+kms_vblank@pipe-H-wait-forked-busy-hang
+kms_vblank@pipe-H-ts-continuation-idle
+kms_vblank@pipe-H-ts-continuation-idle-hang
+kms_vblank@pipe-H-ts-continuation-dpms-rpm
+kms_vblank@pipe-H-ts-continuation-dpms-suspend
+kms_vblank@pipe-H-ts-continuation-suspend
+kms_vblank@pipe-H-ts-continuation-modeset
+kms_vblank@pipe-H-ts-continuation-modeset-hang
+kms_vblank@pipe-H-ts-continuation-modeset-rpm
+kms_vrr@flip-basic
+kms_vrr@flip-dpms
+kms_vrr@flip-suspend
+kms_vrr@flipline
+kms_vrr@negative-basic
+kms_writeback@writeback-pixel-formats
+kms_writeback@writeback-invalid-parameters
+kms_writeback@writeback-fb-id
+kms_writeback@writeback-check-output
+prime_mmap_kms@buffer-sharing
diff --git a/drivers/gpu/drm/ci/x86_64.config b/drivers/gpu/drm/ci/x86_64.config
new file mode 100644
index 000000000000..1cbd49a5b23a
--- /dev/null
+++ b/drivers/gpu/drm/ci/x86_64.config
@@ -0,0 +1,111 @@
+CONFIG_LOCALVERSION_AUTO=y
+CONFIG_DEBUG_KERNEL=y
+
+CONFIG_CRYPTO_ZSTD=y
+CONFIG_ZRAM_MEMORY_TRACKING=y
+CONFIG_ZRAM_WRITEBACK=y
+CONFIG_ZRAM=y
+CONFIG_ZSMALLOC_STAT=y
+
+CONFIG_PWM=y
+CONFIG_PM_DEVFREQ=y
+CONFIG_OF=y
+CONFIG_CROS_EC=y
+
+# abootimg with a 'dummy' rootfs fails with root=/dev/nfs
+CONFIG_BLK_DEV_INITRD=n
+
+CONFIG_DEVFREQ_GOV_PERFORMANCE=y
+CONFIG_DEVFREQ_GOV_POWERSAVE=y
+CONFIG_DEVFREQ_GOV_USERSPACE=y
+CONFIG_DEVFREQ_GOV_PASSIVE=y
+
+CONFIG_DRM=y
+CONFIG_DRM_PANEL_SIMPLE=y
+CONFIG_PWM_CROS_EC=y
+CONFIG_BACKLIGHT_PWM=y
+
+# Strip out some stuff we don't need for graphics testing, to reduce
+# the build.
+CONFIG_CAN=n
+CONFIG_WIRELESS=n
+CONFIG_RFKILL=n
+CONFIG_WLAN=n
+
+CONFIG_REGULATOR_FAN53555=y
+CONFIG_REGULATOR=y
+
+CONFIG_REGULATOR_VCTRL=y
+
+CONFIG_KASAN=n
+CONFIG_KASAN_INLINE=n
+CONFIG_STACKTRACE=n
+
+CONFIG_TMPFS=y
+
+CONFIG_PROVE_LOCKING=n
+CONFIG_DEBUG_LOCKDEP=n
+CONFIG_SOFTLOCKUP_DETECTOR=y
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
+
+CONFIG_DETECT_HUNG_TASK=y
+
+CONFIG_USB_USBNET=y
+CONFIG_NETDEVICES=y
+CONFIG_USB_NET_DRIVERS=y
+CONFIG_USB_RTL8152=y
+CONFIG_USB_NET_AX8817X=y
+CONFIG_USB_NET_SMSC95XX=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_ETH=y
+
+CONFIG_FW_LOADER_COMPRESS=y
+
+# options for AMD devices
+CONFIG_X86_AMD_PLATFORM_DEVICE=y
+CONFIG_ACPI_VIDEO=y
+CONFIG_X86_AMD_FREQ_SENSITIVITY=y
+CONFIG_PINCTRL=y
+CONFIG_PINCTRL_AMD=y
+CONFIG_DRM_AMDGPU=m
+CONFIG_DRM_AMDGPU_SI=y
+CONFIG_DRM_AMDGPU_USERPTR=y
+CONFIG_DRM_AMD_ACP=n
+CONFIG_ACPI_WMI=y
+CONFIG_MXM_WMI=y
+CONFIG_PARPORT=y
+CONFIG_PARPORT_PC=y
+CONFIG_PARPORT_SERIAL=y
+CONFIG_SERIAL_8250_DW=y
+CONFIG_CHROME_PLATFORMS=y
+CONFIG_KVM_AMD=m
+
+#options for Intel devices
+CONFIG_MFD_INTEL_LPSS_PCI=y
+CONFIG_KVM_INTEL=m
+
+#options for KVM guests
+CONFIG_FUSE_FS=y
+CONFIG_HYPERVISOR_GUEST=y
+CONFIG_KVM=y
+CONFIG_KVM_GUEST=y
+CONFIG_VIRT_DRIVERS=y
+CONFIG_VIRTIO_FS=y
+CONFIG_DRM_VIRTIO_GPU=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_VIRTIO_NET=y
+CONFIG_VIRTIO_CONSOLE=y
+CONFIG_PARAVIRT=y
+CONFIG_VIRTIO_BLK=y
+CONFIG_VIRTUALIZATION=y
+CONFIG_VIRTIO=y
+CONFIG_VIRTIO_PCI=y
+CONFIG_VIRTIO_MMIO=y
+CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y
+CONFIG_CRYPTO_DEV_VIRTIO=y
+CONFIG_HW_RANDOM_VIRTIO=y
+CONFIG_BLK_MQ_VIRTIO=y
+CONFIG_TUN=y
+CONFIG_VSOCKETS=y
+CONFIG_VIRTIO_VSOCKETS=y
+CONFIG_VHOST_VSOCK=m
diff --git a/drivers/gpu/drm/ci/xfails/amdgpu-stoney-fails.txt b/drivers/gpu/drm/ci/xfails/amdgpu-stoney-fails.txt
new file mode 100644
index 000000000000..bd9392536e7c
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/amdgpu-stoney-fails.txt
@@ -0,0 +1,19 @@
+kms_addfb_basic@bad-pitch-65536,Fail
+kms_addfb_basic@bo-too-small,Fail
+kms_async_flips@invalid-async-flip,Fail
+kms_atomic@plane-immutable-zpos,Fail
+kms_atomic_transition@plane-toggle-modeset-transition,Fail
+kms_bw@linear-tiling-1-displays-2560x1440p,Fail
+kms_bw@linear-tiling-1-displays-3840x2160p,Fail
+kms_bw@linear-tiling-2-displays-3840x2160p,Fail
+kms_bw@linear-tiling-3-displays-1920x1080p,Fail
+kms_color@degamma,Fail
+kms_cursor_crc@cursor-size-change,Fail
+kms_cursor_crc@pipe-A-cursor-size-change,Fail
+kms_cursor_crc@pipe-B-cursor-size-change,Fail
+kms_cursor_legacy@forked-move,Fail
+kms_hdr@bpc-switch,Fail
+kms_hdr@bpc-switch-dpms,Fail
+kms_plane_multiple@atomic-pipe-A-tiling-none,Fail
+kms_rmfb@close-fd,Fail
+kms_rotation_crc@primary-rotation-180,Fail
diff --git a/drivers/gpu/drm/ci/xfails/amdgpu-stoney-flakes.txt b/drivers/gpu/drm/ci/xfails/amdgpu-stoney-flakes.txt
new file mode 100644
index 000000000000..f8defa0f9e67
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/amdgpu-stoney-flakes.txt
@@ -0,0 +1,21 @@
+kms_addfb_basic@too-high
+kms_async_flips@alternate-sync-async-flip
+kms_async_flips@async-flip-with-page-flip-events
+kms_async_flips@crc
+kms_async_flips@test-cursor
+kms_async_flips@test-time-stamp
+kms_atomic_transition@plane-all-modeset-transition-internal-panels
+kms_atomic_transition@plane-all-transition
+kms_atomic_transition@plane-use-after-nonblocking-unbind
+kms_bw@linear-tiling-1-displays-1920x1080p
+kms_bw@linear-tiling-2-displays-1920x1080p
+kms_bw@linear-tiling-2-displays-2560x1440p
+kms_bw@linear-tiling-3-displays-2560x1440p
+kms_bw@linear-tiling-3-displays-3840x2160p
+kms_cursor_crc@pipe-A-cursor-alpha-opaque
+kms_cursor_crc@pipe-B-cursor-alpha-opaque
+kms_plane@pixel-format
+kms_plane_multiple@atomic-pipe-B-tiling-none
+kms_plane_scaling@downscale-with-rotation-factor-0-5
+kms_universal_plane@disable-primary-vs-flip-pipe-A
+kms_universal_plane@disable-primary-vs-flip-pipe-B
diff --git a/drivers/gpu/drm/ci/xfails/amdgpu-stoney-skips.txt b/drivers/gpu/drm/ci/xfails/amdgpu-stoney-skips.txt
new file mode 100644
index 000000000000..e2c538a0f954
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/amdgpu-stoney-skips.txt
@@ -0,0 +1,2 @@
+# Suspend to RAM seems to be broken on this machine
+.*suspend.* \ No newline at end of file
diff --git a/drivers/gpu/drm/ci/xfails/i915-amly-fails.txt b/drivers/gpu/drm/ci/xfails/i915-amly-fails.txt
new file mode 100644
index 000000000000..5f513c638beb
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/i915-amly-fails.txt
@@ -0,0 +1,17 @@
+kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-upscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-downscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling,Fail
+kms_plane_alpha_blend@alpha-basic,Fail
+kms_plane_alpha_blend@alpha-opaque-fb,Fail
+kms_plane_alpha_blend@alpha-transparent-fb,Fail
+kms_plane_alpha_blend@constant-alpha-max,Fail
diff --git a/drivers/gpu/drm/ci/xfails/i915-amly-flakes.txt b/drivers/gpu/drm/ci/xfails/i915-amly-flakes.txt
new file mode 100644
index 000000000000..d5000515a315
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/i915-amly-flakes.txt
@@ -0,0 +1,32 @@
+kms_bw@linear-tiling-2-displays-1920x1080p
+kms_bw@linear-tiling-2-displays-2560x1440p
+kms_bw@linear-tiling-2-displays-3840x2160p
+kms_bw@linear-tiling-3-displays-1920x1080p
+kms_bw@linear-tiling-3-displays-2560x1440p
+kms_bw@linear-tiling-3-displays-3840x2160p
+kms_bw@linear-tiling-4-displays-1920x1080p
+kms_bw@linear-tiling-4-displays-2560x1440p
+kms_bw@linear-tiling-4-displays-3840x2160p
+kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-downscaling
+kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-upscaling
+kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-downscaling
+kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-upscaling
+kms_flip_scaled_crc@flip-64bpp-ytile-to-16bpp-ytile-downscaling
+kms_flip_scaled_crc@flip-64bpp-ytile-to-16bpp-ytile-upscaling
+kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-downscaling
+kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling
+kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling
+kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-upscaling
+kms_plane_alpha_blend@pipe-A-alpha-basic
+kms_plane_alpha_blend@pipe-A-alpha-opaque-fb
+kms_plane_alpha_blend@pipe-A-alpha-transparent-fb
+kms_plane_alpha_blend@pipe-A-constant-alpha-max
+kms_plane_alpha_blend@pipe-B-alpha-basic
+kms_plane_alpha_blend@pipe-B-alpha-opaque-fb
+kms_plane_alpha_blend@pipe-B-alpha-transparent-fb
+kms_plane_alpha_blend@pipe-B-constant-alpha-max
+kms_plane_alpha_blend@pipe-C-alpha-basic
+kms_plane_alpha_blend@pipe-C-alpha-opaque-fb
+kms_plane_alpha_blend@pipe-C-alpha-transparent-fb
+kms_plane_alpha_blend@pipe-C-constant-alpha-max
+kms_sysfs_edid_timing
diff --git a/drivers/gpu/drm/ci/xfails/i915-amly-skips.txt b/drivers/gpu/drm/ci/xfails/i915-amly-skips.txt
new file mode 100644
index 000000000000..fe55540a3f9a
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/i915-amly-skips.txt
@@ -0,0 +1,4 @@
+# Suspend to RAM seems to be broken on this machine
+.*suspend.*
+# This is generating kernel oops with divide error
+kms_plane_scaling@invalid-parameters \ No newline at end of file
diff --git a/drivers/gpu/drm/ci/xfails/i915-apl-fails.txt b/drivers/gpu/drm/ci/xfails/i915-apl-fails.txt
new file mode 100644
index 000000000000..46397ce38d5a
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/i915-apl-fails.txt
@@ -0,0 +1,58 @@
+kms_3d,Timeout
+kms_bw@linear-tiling-2-displays-1920x1080p,Fail
+kms_bw@linear-tiling-2-displays-2560x1440p,Fail
+kms_bw@linear-tiling-2-displays-3840x2160p,Fail
+kms_bw@linear-tiling-3-displays-1920x1080p,Fail
+kms_bw@linear-tiling-3-displays-2560x1440p,Fail
+kms_bw@linear-tiling-3-displays-3840x2160p,Fail
+kms_bw@linear-tiling-4-displays-1920x1080p,Fail
+kms_bw@linear-tiling-4-displays-2560x1440p,Fail
+kms_bw@linear-tiling-4-displays-3840x2160p,Fail
+kms_color@ctm-0-25,Fail
+kms_color@ctm-0-50,Fail
+kms_color@ctm-0-75,Fail
+kms_color@ctm-max,Fail
+kms_color@ctm-negative,Fail
+kms_color@ctm-red-to-blue,Fail
+kms_color@ctm-signed,Fail
+kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-upscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-downscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-upscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-downscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-upscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-downscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-ytile-to-16bpp-ytile-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-ytile-to-16bpp-ytile-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-upscaling,Fail
+kms_hdmi_inject@inject-4k,Timeout
+kms_plane@plane-position-hole,Timeout
+kms_plane_alpha_blend@alpha-basic,Fail
+kms_plane_alpha_blend@alpha-opaque-fb,Fail
+kms_plane_alpha_blend@alpha-transparent-fb,Fail
+kms_plane_alpha_blend@constant-alpha-max,Fail
+kms_plane_alpha_blend@pipe-A-alpha-opaque-fb,Fail
+kms_plane_alpha_blend@pipe-A-alpha-transparent-fb,Fail
+kms_plane_alpha_blend@pipe-A-constant-alpha-max,Fail
+kms_plane_alpha_blend@pipe-B-alpha-opaque-fb,Fail
+kms_plane_alpha_blend@pipe-B-alpha-transparent-fb,Fail
+kms_plane_alpha_blend@pipe-B-constant-alpha-max,Fail
+kms_plane_alpha_blend@pipe-C-alpha-opaque-fb,Fail
+kms_plane_alpha_blend@pipe-C-alpha-transparent-fb,Fail
+kms_plane_alpha_blend@pipe-C-constant-alpha-max,Fail
+kms_plane_multiple@tiling-y,Timeout
+kms_pwrite_crc,Timeout
+kms_sysfs_edid_timing,Fail
diff --git a/drivers/gpu/drm/ci/xfails/i915-apl-flakes.txt b/drivers/gpu/drm/ci/xfails/i915-apl-flakes.txt
new file mode 100644
index 000000000000..331c5841bb41
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/i915-apl-flakes.txt
@@ -0,0 +1 @@
+kms_frontbuffer_tracking@fbc-tiling-linear
diff --git a/drivers/gpu/drm/ci/xfails/i915-apl-skips.txt b/drivers/gpu/drm/ci/xfails/i915-apl-skips.txt
new file mode 100644
index 000000000000..3430b215c06e
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/i915-apl-skips.txt
@@ -0,0 +1,6 @@
+# Suspend to RAM seems to be broken on this machine
+.*suspend.*
+# This is generating kernel oops with divide error
+kms_plane_scaling@invalid-parameters
+# This is cascading issues
+kms_3d \ No newline at end of file
diff --git a/drivers/gpu/drm/ci/xfails/i915-cml-fails.txt b/drivers/gpu/drm/ci/xfails/i915-cml-fails.txt
new file mode 100644
index 000000000000..6139b410e767
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/i915-cml-fails.txt
@@ -0,0 +1,18 @@
+kms_color@ctm-0-25,Fail
+kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-upscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-downscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling,Fail
+kms_plane_alpha_blend@alpha-basic,Fail
+kms_plane_alpha_blend@alpha-opaque-fb,Fail
+kms_plane_alpha_blend@alpha-transparent-fb,Fail
+kms_plane_alpha_blend@constant-alpha-max,Fail
diff --git a/drivers/gpu/drm/ci/xfails/i915-cml-flakes.txt b/drivers/gpu/drm/ci/xfails/i915-cml-flakes.txt
new file mode 100644
index 000000000000..0514a7b3fdb0
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/i915-cml-flakes.txt
@@ -0,0 +1,38 @@
+kms_bw@linear-tiling-2-displays-1920x1080p
+kms_bw@linear-tiling-2-displays-2560x1440p
+kms_bw@linear-tiling-2-displays-3840x2160p
+kms_bw@linear-tiling-3-displays-1920x1080p
+kms_bw@linear-tiling-3-displays-2560x1440p
+kms_bw@linear-tiling-3-displays-3840x2160p
+kms_bw@linear-tiling-4-displays-1920x1080p
+kms_bw@linear-tiling-4-displays-2560x1440p
+kms_bw@linear-tiling-4-displays-3840x2160p
+kms_draw_crc@draw-method-xrgb8888-render-xtiled
+kms_flip@flip-vs-suspend
+kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-downscaling
+kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-upscaling
+kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-downscaling
+kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-upscaling
+kms_flip_scaled_crc@flip-64bpp-ytile-to-16bpp-ytile-downscaling
+kms_flip_scaled_crc@flip-64bpp-ytile-to-16bpp-ytile-upscaling
+kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-downscaling
+kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling
+kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling
+kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-upscaling
+kms_hdr@bpc-switch-suspend
+kms_plane_alpha_blend@constant-alpha-min
+kms_plane_alpha_blend@pipe-A-alpha-basic
+kms_plane_alpha_blend@pipe-A-alpha-opaque-fb
+kms_plane_alpha_blend@pipe-A-alpha-transparent-fb
+kms_plane_alpha_blend@pipe-A-constant-alpha-max
+kms_plane_alpha_blend@pipe-B-alpha-basic
+kms_plane_alpha_blend@pipe-B-alpha-opaque-fb
+kms_plane_alpha_blend@pipe-B-alpha-transparent-fb
+kms_plane_alpha_blend@pipe-B-constant-alpha-max
+kms_plane_alpha_blend@pipe-C-alpha-basic
+kms_plane_alpha_blend@pipe-C-alpha-opaque-fb
+kms_plane_alpha_blend@pipe-C-alpha-transparent-fb
+kms_plane_alpha_blend@pipe-C-constant-alpha-max
+kms_psr2_su@page_flip-NV12
+kms_psr2_su@page_flip-P010
+kms_setmode@basic
diff --git a/drivers/gpu/drm/ci/xfails/i915-cml-skips.txt b/drivers/gpu/drm/ci/xfails/i915-cml-skips.txt
new file mode 100644
index 000000000000..6d3d7ddc377f
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/i915-cml-skips.txt
@@ -0,0 +1,2 @@
+# This is generating kernel oops with divide error
+kms_plane_scaling@invalid-parameters
diff --git a/drivers/gpu/drm/ci/xfails/i915-glk-fails.txt b/drivers/gpu/drm/ci/xfails/i915-glk-fails.txt
new file mode 100644
index 000000000000..5bd432e78129
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/i915-glk-fails.txt
@@ -0,0 +1,19 @@
+kms_fbcon_fbt@fbc,Fail
+kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-upscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-downscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling,Fail
+kms_frontbuffer_tracking@fbcdrrs-tiling-linear,Fail
+kms_plane_alpha_blend@alpha-basic,Fail
+kms_plane_alpha_blend@alpha-opaque-fb,Fail
+kms_plane_alpha_blend@alpha-transparent-fb,Fail
+kms_plane_alpha_blend@constant-alpha-max,Fail
diff --git a/drivers/gpu/drm/ci/xfails/i915-glk-flakes.txt b/drivers/gpu/drm/ci/xfails/i915-glk-flakes.txt
new file mode 100644
index 000000000000..fc41d13a2d56
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/i915-glk-flakes.txt
@@ -0,0 +1,41 @@
+kms_bw@linear-tiling-1-displays-3840x2160p
+kms_bw@linear-tiling-2-displays-1920x1080p
+kms_bw@linear-tiling-2-displays-2560x1440p
+kms_bw@linear-tiling-2-displays-3840x2160p
+kms_bw@linear-tiling-3-displays-1920x1080p
+kms_bw@linear-tiling-3-displays-2560x1440p
+kms_bw@linear-tiling-3-displays-3840x2160p
+kms_bw@linear-tiling-4-displays-1920x1080p
+kms_bw@linear-tiling-4-displays-2560x1440p
+kms_bw@linear-tiling-4-displays-3840x2160p
+kms_flip@blocking-wf_vblank
+kms_flip@wf_vblank-ts-check
+kms_flip@wf_vblank-ts-check-interruptible
+kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-downscaling
+kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-upscaling
+kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-downscaling
+kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-upscaling
+kms_flip_scaled_crc@flip-64bpp-ytile-to-16bpp-ytile-downscaling
+kms_flip_scaled_crc@flip-64bpp-ytile-to-16bpp-ytile-upscaling
+kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-downscaling
+kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling
+kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling
+kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-upscaling
+kms_frontbuffer_tracking@fbc-tiling-linear
+kms_plane_alpha_blend@pipe-A-alpha-basic
+kms_plane_alpha_blend@pipe-A-alpha-opaque-fb
+kms_plane_alpha_blend@pipe-A-alpha-transparent-fb
+kms_plane_alpha_blend@pipe-A-constant-alpha-max
+kms_plane_alpha_blend@pipe-B-alpha-basic
+kms_plane_alpha_blend@pipe-B-alpha-opaque-fb
+kms_plane_alpha_blend@pipe-B-alpha-transparent-fb
+kms_plane_alpha_blend@pipe-B-constant-alpha-max
+kms_plane_alpha_blend@pipe-C-alpha-basic
+kms_plane_alpha_blend@pipe-C-alpha-opaque-fb
+kms_plane_alpha_blend@pipe-C-alpha-transparent-fb
+kms_plane_alpha_blend@pipe-C-constant-alpha-max
+kms_prop_blob@invalid-set-prop-any
+kms_rotation_crc@multiplane-rotation
+kms_rotation_crc@multiplane-rotation-cropping-bottom
+kms_rotation_crc@multiplane-rotation-cropping-top
+kms_setmode@basic
diff --git a/drivers/gpu/drm/ci/xfails/i915-glk-skips.txt b/drivers/gpu/drm/ci/xfails/i915-glk-skips.txt
new file mode 100644
index 000000000000..4c7d00ce14bc
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/i915-glk-skips.txt
@@ -0,0 +1,5 @@
+# Suspend to RAM seems to be broken on this machine
+.*suspend.*
+
+# This is generating kernel oops with divide error
+kms_plane_scaling@invalid-parameters \ No newline at end of file
diff --git a/drivers/gpu/drm/ci/xfails/i915-kbl-fails.txt b/drivers/gpu/drm/ci/xfails/i915-kbl-fails.txt
new file mode 100644
index 000000000000..56ec021a7679
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/i915-kbl-fails.txt
@@ -0,0 +1,25 @@
+kms_bw@linear-tiling-2-displays-2560x1440p,Fail
+kms_bw@linear-tiling-4-displays-2560x1440p,Fail
+kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-upscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-downscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-upscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-upscaling,Fail
+kms_plane_alpha_blend@alpha-basic,Fail
+kms_plane_alpha_blend@alpha-opaque-fb,Fail
+kms_plane_alpha_blend@alpha-transparent-fb,Fail
+kms_plane_alpha_blend@constant-alpha-max,Fail
+kms_plane_alpha_blend@pipe-A-constant-alpha-max,Fail
+kms_plane_alpha_blend@pipe-B-alpha-opaque-fb,Fail
+kms_plane_alpha_blend@pipe-C-constant-alpha-max,Fail
diff --git a/drivers/gpu/drm/ci/xfails/i915-kbl-flakes.txt b/drivers/gpu/drm/ci/xfails/i915-kbl-flakes.txt
new file mode 100644
index 000000000000..f3ba1c4c5d46
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/i915-kbl-flakes.txt
@@ -0,0 +1,26 @@
+kms_async_flips@crc
+kms_bw@linear-tiling-2-displays-1920x1080p
+kms_bw@linear-tiling-2-displays-3840x2160p
+kms_bw@linear-tiling-3-displays-1920x1080p
+kms_bw@linear-tiling-3-displays-2560x1440p
+kms_bw@linear-tiling-3-displays-3840x2160p
+kms_bw@linear-tiling-4-displays-1920x1080p
+kms_bw@linear-tiling-4-displays-3840x2160p
+kms_color@ctm-0-25
+kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-downscaling
+kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-downscaling
+kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-upscaling
+kms_flip_scaled_crc@flip-64bpp-ytile-to-16bpp-ytile-downscaling
+kms_flip_scaled_crc@flip-64bpp-ytile-to-16bpp-ytile-upscaling
+kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling
+kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling
+kms_plane_alpha_blend@pipe-A-alpha-basic
+kms_plane_alpha_blend@pipe-A-alpha-opaque-fb
+kms_plane_alpha_blend@pipe-A-alpha-transparent-fb
+kms_plane_alpha_blend@pipe-B-alpha-basic
+kms_plane_alpha_blend@pipe-B-alpha-transparent-fb
+kms_plane_alpha_blend@pipe-B-constant-alpha-max
+kms_plane_alpha_blend@pipe-C-alpha-basic
+kms_plane_alpha_blend@pipe-C-alpha-opaque-fb
+kms_plane_alpha_blend@pipe-C-alpha-transparent-fb
+kms_sysfs_edid_timing
diff --git a/drivers/gpu/drm/ci/xfails/i915-kbl-skips.txt b/drivers/gpu/drm/ci/xfails/i915-kbl-skips.txt
new file mode 100644
index 000000000000..4c7d00ce14bc
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/i915-kbl-skips.txt
@@ -0,0 +1,5 @@
+# Suspend to RAM seems to be broken on this machine
+.*suspend.*
+
+# This is generating kernel oops with divide error
+kms_plane_scaling@invalid-parameters \ No newline at end of file
diff --git a/drivers/gpu/drm/ci/xfails/i915-tgl-fails.txt b/drivers/gpu/drm/ci/xfails/i915-tgl-fails.txt
new file mode 100644
index 000000000000..a6da5544e198
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/i915-tgl-fails.txt
@@ -0,0 +1,37 @@
+kms_bw@linear-tiling-2-displays-3840x2160p,Fail
+kms_bw@linear-tiling-3-displays-1920x1080p,Fail
+kms_bw@linear-tiling-3-displays-2560x1440p,Fail
+kms_bw@linear-tiling-3-displays-3840x2160p,Fail
+kms_bw@linear-tiling-4-displays-1920x1080p,Fail
+kms_bw@linear-tiling-4-displays-2560x1440p,Fail
+kms_bw@linear-tiling-4-displays-3840x2160p,Fail
+kms_bw@linear-tiling-5-displays-1920x1080p,Fail
+kms_bw@linear-tiling-5-displays-2560x1440p,Fail
+kms_bw@linear-tiling-5-displays-3840x2160p,Fail
+kms_color@ctm-0-25,Fail
+kms_flip@flip-vs-panning-vs-hang,Timeout
+kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-upscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-downscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-upscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-downscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-ytile-to-16bpp-ytile-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-ytile-to-16bpp-ytile-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-upscaling,Fail
+kms_rotation_crc@bad-pixel-format,Fail
+kms_rotation_crc@multiplane-rotation,Fail
+kms_rotation_crc@multiplane-rotation-cropping-bottom,Fail
+kms_rotation_crc@multiplane-rotation-cropping-top,Fail
diff --git a/drivers/gpu/drm/ci/xfails/i915-tgl-flakes.txt b/drivers/gpu/drm/ci/xfails/i915-tgl-flakes.txt
new file mode 100644
index 000000000000..1cd910ee06df
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/i915-tgl-flakes.txt
@@ -0,0 +1,5 @@
+kms_draw_crc@.*
+kms_flip@blocking-absolute-wf_vblank
+kms_flip@bo-too-big-interruptible
+kms_flip@busy-flip
+kms_flip@flip-vs-rmfb-interruptible
diff --git a/drivers/gpu/drm/ci/xfails/i915-tgl-skips.txt b/drivers/gpu/drm/ci/xfails/i915-tgl-skips.txt
new file mode 100644
index 000000000000..1d0621750b14
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/i915-tgl-skips.txt
@@ -0,0 +1,11 @@
+# Suspend to RAM seems to be broken on this machine
+.*suspend.*
+
+# GPU hangs, then the whole machine
+gem_eio.*
+
+# Whole machine hangs
+kms_flip@absolute-wf_vblank@a-edp1
+
+# This is generating kernel oops with divide error
+kms_plane_scaling@invalid-parameters \ No newline at end of file
diff --git a/drivers/gpu/drm/ci/xfails/i915-whl-fails.txt b/drivers/gpu/drm/ci/xfails/i915-whl-fails.txt
new file mode 100644
index 000000000000..967327ddc1ac
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/i915-whl-fails.txt
@@ -0,0 +1,48 @@
+kms_bw@linear-tiling-2-displays-1920x1080p,Fail
+kms_bw@linear-tiling-2-displays-2560x1440p,Fail
+kms_bw@linear-tiling-2-displays-3840x2160p,Fail
+kms_bw@linear-tiling-3-displays-1920x1080p,Fail
+kms_bw@linear-tiling-3-displays-2560x1440p,Fail
+kms_bw@linear-tiling-3-displays-3840x2160p,Fail
+kms_bw@linear-tiling-4-displays-1920x1080p,Fail
+kms_bw@linear-tiling-4-displays-2560x1440p,Fail
+kms_bw@linear-tiling-4-displays-3840x2160p,Fail
+kms_fbcon_fbt@fbc,Fail
+kms_fbcon_fbt@fbc-suspend,Fail
+kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-upscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-downscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-upscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-downscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-upscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-downscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-ytile-to-16bpp-ytile-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-ytile-to-16bpp-ytile-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-upscaling,Fail
+kms_frontbuffer_tracking@fbc-tiling-linear,Fail
+kms_plane_alpha_blend@alpha-basic,Fail
+kms_plane_alpha_blend@alpha-opaque-fb,Fail
+kms_plane_alpha_blend@alpha-transparent-fb,Fail
+kms_plane_alpha_blend@constant-alpha-max,Fail
+kms_plane_alpha_blend@pipe-A-alpha-opaque-fb,Fail
+kms_plane_alpha_blend@pipe-A-alpha-transparent-fb,Fail
+kms_plane_alpha_blend@pipe-A-constant-alpha-max,Fail
+kms_plane_alpha_blend@pipe-B-alpha-opaque-fb,Fail
+kms_plane_alpha_blend@pipe-B-alpha-transparent-fb,Fail
+kms_plane_alpha_blend@pipe-B-constant-alpha-max,Fail
+kms_plane_alpha_blend@pipe-C-alpha-opaque-fb,Fail
+kms_plane_alpha_blend@pipe-C-alpha-transparent-fb,Fail
+kms_plane_alpha_blend@pipe-C-constant-alpha-max,Fail
diff --git a/drivers/gpu/drm/ci/xfails/i915-whl-flakes.txt b/drivers/gpu/drm/ci/xfails/i915-whl-flakes.txt
new file mode 100644
index 000000000000..c33202e7e2a1
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/i915-whl-flakes.txt
@@ -0,0 +1 @@
+kms_flip@flip-vs-suspend
diff --git a/drivers/gpu/drm/ci/xfails/i915-whl-skips.txt b/drivers/gpu/drm/ci/xfails/i915-whl-skips.txt
new file mode 100644
index 000000000000..f3be0888a214
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/i915-whl-skips.txt
@@ -0,0 +1,2 @@
+# This is generating kernel oops with divide error
+kms_plane_scaling@invalid-parameters \ No newline at end of file
diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt
new file mode 100644
index 000000000000..671916067dba
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt
@@ -0,0 +1,29 @@
+kms_3d,Fail
+kms_addfb_basic@addfb25-bad-modifier,Fail
+kms_bw@linear-tiling-1-displays-1920x1080p,Fail
+kms_bw@linear-tiling-1-displays-2560x1440p,Fail
+kms_bw@linear-tiling-1-displays-3840x2160p,Fail
+kms_bw@linear-tiling-2-displays-1920x1080p,Fail
+kms_bw@linear-tiling-2-displays-2560x1440p,Fail
+kms_bw@linear-tiling-2-displays-3840x2160p,Fail
+kms_bw@linear-tiling-3-displays-1920x1080p,Fail
+kms_bw@linear-tiling-3-displays-2560x1440p,Fail
+kms_bw@linear-tiling-3-displays-3840x2160p,Fail
+kms_color@pipe-A-invalid-gamma-lut-sizes,Fail
+kms_color@pipe-B-invalid-gamma-lut-sizes,Fail
+kms_force_connector_basic@force-connector-state,Fail
+kms_force_connector_basic@force-edid,Fail
+kms_force_connector_basic@force-load-detect,Fail
+kms_force_connector_basic@prune-stale-modes,Fail
+kms_invalid_mode@int-max-clock,Fail
+kms_plane_scaling@planes-upscale-20x20,Fail
+kms_plane_scaling@planes-upscale-20x20-downscale-factor-0-25,Fail
+kms_plane_scaling@planes-upscale-20x20-downscale-factor-0-5,Fail
+kms_plane_scaling@planes-upscale-20x20-downscale-factor-0-75,Fail
+kms_plane_scaling@upscale-with-modifier-20x20,Fail
+kms_plane_scaling@upscale-with-pixel-format-20x20,Fail
+kms_plane_scaling@upscale-with-rotation-20x20,Fail
+kms_properties@get_properties-sanity-atomic,Fail
+kms_properties@plane-properties-atomic,Fail
+kms_properties@plane-properties-legacy,Fail
+kms_rmfb@close-fd,Fail
diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-flakes.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-flakes.txt
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-flakes.txt
diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8183-fails.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8183-fails.txt
new file mode 100644
index 000000000000..6ff81d00e84e
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8183-fails.txt
@@ -0,0 +1,10 @@
+kms_addfb_basic@addfb25-bad-modifier,Fail
+kms_bw@linear-tiling-1-displays-2560x1440p,Fail
+kms_bw@linear-tiling-2-displays-1920x1080p,Fail
+kms_bw@linear-tiling-2-displays-2560x1440p,Fail
+kms_bw@linear-tiling-2-displays-3840x2160p,Fail
+kms_bw@linear-tiling-3-displays-2560x1440p,Fail
+kms_bw@linear-tiling-3-displays-3840x2160p,Fail
+kms_color@pipe-A-invalid-gamma-lut-sizes,Fail
+kms_plane_scaling@upscale-with-rotation-20x20,Fail
+kms_rmfb@close-fd,Fail \ No newline at end of file
diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8183-flakes.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8183-flakes.txt
new file mode 100644
index 000000000000..208890b79eb0
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8183-flakes.txt
@@ -0,0 +1,14 @@
+core_setmaster_vs_auth
+kms_bw@linear-tiling-1-displays-1920x1080p
+kms_bw@linear-tiling-1-displays-3840x2160p
+kms_bw@linear-tiling-3-displays-1920x1080p
+kms_cursor_legacy@cursor-vs-flip-atomic
+kms_plane_scaling@invalid-num-scalers
+kms_plane_scaling@planes-upscale-20x20
+kms_plane_scaling@planes-upscale-20x20-downscale-factor-0-5
+kms_plane_scaling@upscale-with-modifier-20x20
+kms_plane_scaling@upscale-with-pixel-format-20x20
+kms_prop_blob@invalid-set-prop-any
+kms_properties@get_properties-sanity-atomic
+kms_properties@plane-properties-atomic
+kms_properties@plane-properties-legacy \ No newline at end of file
diff --git a/drivers/gpu/drm/ci/xfails/meson-g12b-fails.txt b/drivers/gpu/drm/ci/xfails/meson-g12b-fails.txt
new file mode 100644
index 000000000000..860e702091e2
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/meson-g12b-fails.txt
@@ -0,0 +1,12 @@
+kms_3d,Fail
+kms_properties@connector-properties-atomic,Fail
+kms_properties@get_properties-sanity-atomic,Fail
+kms_properties@get_properties-sanity-non-atomic,Fail
+kms_properties@connector-properties-legacy,Fail
+kms_cursor_legacy@forked-bo,Fail
+kms_cursor_legacy@forked-move,Fail
+kms_cursor_legacy@single-bo,Fail
+kms_cursor_legacy@single-move,Fail
+kms_cursor_legacy@torture-bo,Fail
+kms_cursor_legacy@torture-move,Fail
+kms_hdmi_inject@inject-4k,Fail \ No newline at end of file
diff --git a/drivers/gpu/drm/ci/xfails/meson-g12b-flakes.txt b/drivers/gpu/drm/ci/xfails/meson-g12b-flakes.txt
new file mode 100644
index 000000000000..b63329d06767
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/meson-g12b-flakes.txt
@@ -0,0 +1,4 @@
+kms_force_connector_basic@force-connector-state
+kms_force_connector_basic@force-edid
+kms_force_connector_basic@force-load-detect
+kms_force_connector_basic@prune-stale-modes \ No newline at end of file
diff --git a/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt b/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt
new file mode 100644
index 000000000000..9981682feab2
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt
@@ -0,0 +1,15 @@
+kms_3d,Fail
+kms_addfb_basic@addfb25-bad-modifier,Fail
+kms_cursor_legacy@all-pipes-forked-bo,Fail
+kms_cursor_legacy@all-pipes-forked-move,Fail
+kms_cursor_legacy@all-pipes-single-bo,Fail
+kms_cursor_legacy@all-pipes-single-move,Fail
+kms_cursor_legacy@all-pipes-torture-bo,Fail
+kms_cursor_legacy@all-pipes-torture-move,Fail
+kms_cursor_legacy@pipe-A-forked-bo,Fail
+kms_cursor_legacy@pipe-A-forked-move,Fail
+kms_cursor_legacy@pipe-A-single-bo,Fail
+kms_cursor_legacy@pipe-A-single-move,Fail
+kms_cursor_legacy@pipe-A-torture-bo,Fail
+kms_cursor_legacy@pipe-A-torture-move,Fail
+kms_hdmi_inject@inject-4k,Fail
diff --git a/drivers/gpu/drm/ci/xfails/msm-apq8016-flakes.txt b/drivers/gpu/drm/ci/xfails/msm-apq8016-flakes.txt
new file mode 100644
index 000000000000..0e3b60d3fade
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/msm-apq8016-flakes.txt
@@ -0,0 +1,4 @@
+kms_force_connector_basic@force-connector-state
+kms_force_connector_basic@force-edid
+kms_force_connector_basic@force-load-detect
+kms_force_connector_basic@prune-stale-modes
diff --git a/drivers/gpu/drm/ci/xfails/msm-apq8096-fails.txt b/drivers/gpu/drm/ci/xfails/msm-apq8096-fails.txt
new file mode 100644
index 000000000000..88a1fc0a3b0d
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/msm-apq8096-fails.txt
@@ -0,0 +1,2 @@
+kms_3d,Fail
+kms_addfb_basic@addfb25-bad-modifier,Fail
diff --git a/drivers/gpu/drm/ci/xfails/msm-apq8096-flakes.txt b/drivers/gpu/drm/ci/xfails/msm-apq8096-flakes.txt
new file mode 100644
index 000000000000..0e3b60d3fade
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/msm-apq8096-flakes.txt
@@ -0,0 +1,4 @@
+kms_force_connector_basic@force-connector-state
+kms_force_connector_basic@force-edid
+kms_force_connector_basic@force-load-detect
+kms_force_connector_basic@prune-stale-modes
diff --git a/drivers/gpu/drm/ci/xfails/msm-apq8096-skips.txt b/drivers/gpu/drm/ci/xfails/msm-apq8096-skips.txt
new file mode 100644
index 000000000000..cd49c8ce2059
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/msm-apq8096-skips.txt
@@ -0,0 +1,2 @@
+# Whole machine hangs
+kms_cursor_legacy@all-pipes-torture-move \ No newline at end of file
diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-fails.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-fails.txt
new file mode 100644
index 000000000000..14adeba3b62d
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-fails.txt
@@ -0,0 +1,25 @@
+kms_cursor_legacy@cursor-vs-flip-toggle,Fail
+kms_cursor_legacy@cursor-vs-flip-varying-size,Fail
+kms_cursor_legacy@cursorA-vs-flipA-atomic-transitions,Crash
+kms_pipe_crc_basic@compare-crc-sanitycheck-nv12,Fail
+kms_plane@pixel-format,Fail
+kms_plane@pixel-format-source-clamping,Fail
+kms_plane@plane-position-covered,Fail
+kms_plane@plane-position-hole,Fail
+kms_plane@plane-position-hole-dpms,Fail
+kms_plane_alpha_blend@alpha-7efc,Fail
+kms_plane_alpha_blend@coverage-7efc,Fail
+kms_plane_alpha_blend@coverage-vs-premult-vs-constant,Fail
+kms_plane_alpha_blend@pipe-A-alpha-7efc,Fail
+kms_plane_alpha_blend@pipe-A-coverage-7efc,Fail
+kms_plane_alpha_blend@pipe-A-coverage-vs-premult-vs-constant,Fail
+kms_plane_alpha_blend@pipe-B-alpha-7efc,Fail
+kms_plane_alpha_blend@pipe-B-alpha-basic,Fail
+kms_plane_alpha_blend@pipe-B-alpha-opaque-fb,Fail
+kms_plane_alpha_blend@pipe-B-constant-alpha-max,Fail
+kms_plane_alpha_blend@pipe-B-constant-alpha-mid,Fail
+kms_plane_alpha_blend@pipe-B-coverage-7efc,Fail
+kms_plane_alpha_blend@pipe-B-coverage-vs-premult-vs-constant,Fail
+kms_rmfb@close-fd,Fail
+kms_universal_plane@disable-primary-vs-flip-pipe-b,Fail
+kms_universal_plane@universal-plane-pipe-B-sanity,Fail
diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-flakes.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-flakes.txt
new file mode 100644
index 000000000000..636563d3e59a
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-flakes.txt
@@ -0,0 +1,7 @@
+
+# Test ends up reading CRC from frame before cursor update
+# bug
+# sometimes.. tbd if this is a kernel CRC bug or a test
+kms_cursor_crc@.*
+kms_plane_multiple@atomic-pipe-A-tiling-none
+kms_atomic_transition@modeset-transition-nonblocking-fencing,Fail \ No newline at end of file
diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-skips.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-skips.txt
new file mode 100644
index 000000000000..410e0eeb3161
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-skips.txt
@@ -0,0 +1,23 @@
+# Suspend to RAM seems to be broken on this machine
+.*suspend.*
+
+# Test incorrectly assumes that CTM support implies gamma/degamma
+# LUT support. None of the subtests handle the case of only having
+# CTM support
+kms_color.*
+
+# 4k@60 is not supported on this hw, but driver doesn't handle it
+# too gracefully.. https://gitlab.freedesktop.org/drm/msm/-/issues/15
+kms_bw@linear-tiling-.*-displays-3840x2160p
+
+# Until igt fix lands: https://patchwork.freedesktop.org/patch/493175/
+kms_bw@linear-tiling-2.*
+kms_bw@linear-tiling-3.*
+kms_bw@linear-tiling-4.*
+kms_bw@linear-tiling-5.*
+kms_bw@linear-tiling-6.*
+
+# igt fix posted: https://patchwork.freedesktop.org/patch/499926/
+# failure mode is flakey due to randomization but fails frequently
+# enough to be detected as a Crash or occasionally UnexpectedPass.
+kms_plane_multiple@atomic-pipe-A-tiling-none
diff --git a/drivers/gpu/drm/ci/xfails/msm-sdm845-fails.txt b/drivers/gpu/drm/ci/xfails/msm-sdm845-fails.txt
new file mode 100644
index 000000000000..09c0c623cd75
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/msm-sdm845-fails.txt
@@ -0,0 +1,68 @@
+kms_color@ctm-0-25,Fail
+kms_color@ctm-0-50,Fail
+kms_color@ctm-0-75,Fail
+kms_color@ctm-blue-to-red,Fail
+kms_color@ctm-green-to-red,Fail
+kms_color@ctm-negative,Fail
+kms_color@ctm-red-to-blue,Fail
+kms_color@ctm-signed,Fail
+kms_color@pipe-A-ctm-0-25,Fail
+kms_color@pipe-A-ctm-0-5,Fail
+kms_color@pipe-A-ctm-0-75,Fail
+kms_color@pipe-A-ctm-blue-to-red,Fail
+kms_color@pipe-A-ctm-green-to-red,Fail
+kms_color@pipe-A-ctm-max,Fail
+kms_color@pipe-A-ctm-negative,Fail
+kms_color@pipe-A-ctm-red-to-blue,Fail
+kms_color@pipe-A-legacy-gamma,Fail
+kms_cursor_legacy@basic-flip-after-cursor-atomic,Fail
+kms_cursor_legacy@basic-flip-after-cursor-legacy,Fail
+kms_cursor_legacy@basic-flip-after-cursor-varying-size,Fail
+kms_cursor_legacy@basic-flip-before-cursor-atomic,Fail
+kms_cursor_legacy@basic-flip-before-cursor-legacy,Fail
+kms_cursor_legacy@basic-flip-before-cursor-varying-size,Fail
+kms_cursor_legacy@cursor-vs-flip-atomic,Fail
+kms_cursor_legacy@cursor-vs-flip-atomic-transitions,Fail
+kms_cursor_legacy@cursor-vs-flip-atomic-transitions-varying-size,Fail
+kms_cursor_legacy@cursor-vs-flip-legacy,Fail
+kms_cursor_legacy@cursor-vs-flip-toggle,Fail
+kms_cursor_legacy@cursor-vs-flip-varying-size,Fail
+kms_cursor_legacy@cursorA-vs-flipA-toggle,Fail
+kms_cursor_legacy@flip-vs-cursor-atomic,Fail
+kms_cursor_legacy@flip-vs-cursor-crc-atomic,Fail
+kms_cursor_legacy@flip-vs-cursor-crc-legacy,Fail
+kms_cursor_legacy@flip-vs-cursor-legacy,Fail
+kms_cursor_legacy@short-flip-after-cursor-atomic-transitions,Fail
+kms_cursor_legacy@short-flip-after-cursor-atomic-transitions-varying-size,Fail
+kms_cursor_legacy@short-flip-after-cursor-toggle,Fail
+kms_cursor_legacy@short-flip-before-cursor-atomic-transitions,Fail
+kms_cursor_legacy@short-flip-before-cursor-atomic-transitions-varying-size,Fail
+kms_pipe_crc_basic@compare-crc-sanitycheck-nv12,Fail
+kms_plane@pixel-format,Fail
+kms_plane@pixel-format-source-clamping,Fail
+kms_plane_alpha_blend@alpha-7efc,Fail
+kms_plane_alpha_blend@coverage-7efc,Fail
+kms_plane_alpha_blend@coverage-vs-premult-vs-constant,Fail
+kms_plane_alpha_blend@pipe-A-alpha-7efc,Fail
+kms_plane_alpha_blend@pipe-A-coverage-7efc,Fail
+kms_plane_alpha_blend@pipe-A-coverage-vs-premult-vs-constant,Fail
+kms_plane_cursor@overlay,Fail
+kms_plane_cursor@pipe-A-overlay-size-128,Fail
+kms_plane_cursor@pipe-A-overlay-size-256,Fail
+kms_plane_cursor@pipe-A-overlay-size-64,Fail
+kms_plane_cursor@pipe-A-viewport-size-128,Fail
+kms_plane_cursor@pipe-A-viewport-size-256,Fail
+kms_plane_cursor@pipe-A-viewport-size-64,Fail
+kms_plane_cursor@viewport,Fail
+kms_plane_scaling@downscale-with-pixel-format-factor-0-25,Timeout
+kms_plane_scaling@downscale-with-pixel-format-factor-0-5,Timeout
+kms_plane_scaling@downscale-with-pixel-format-factor-0-75,Timeout
+kms_plane_scaling@plane-downscale-with-pixel-format-factor-0-25,Timeout
+kms_plane_scaling@plane-downscale-with-pixel-format-factor-0-5,Timeout
+kms_plane_scaling@plane-downscale-with-pixel-format-factor-0-75,Timeout
+kms_plane_scaling@plane-scaler-with-clipping-clamping-pixel-formats,Timeout
+kms_plane_scaling@plane-scaler-with-pixel-format-unity-scaling,Timeout
+kms_plane_scaling@planes-downscale-factor-0-25,Fail
+kms_plane_scaling@scaler-with-clipping-clamping,Timeout
+kms_plane_scaling@scaler-with-pixel-format-unity-scaling,Timeout
+kms_rmfb@close-fd,Fail
diff --git a/drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt b/drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt
new file mode 100644
index 000000000000..5b3aaab7ac3f
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt
@@ -0,0 +1,11 @@
+
+
+# Test ends up reading CRC from frame before cursor update
+# bug
+# sometimes.. tbd if this is a kernel CRC bug or a test
+kms_cursor_crc@.*
+kms_cursor_legacy@flip-vs-cursor-toggle
+kms_cursor_legacy@pipe-A-forked-bo
+kms_cursor_legacy@pipe-A-forked-move
+kms_cursor_legacy@short-flip-before-cursor-toggle
+kms_flip@dpms-vs-vblank-race-interruptible
diff --git a/drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt b/drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt
new file mode 100644
index 000000000000..42675f1c6d76
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt
@@ -0,0 +1,2 @@
+# Hangs machine
+kms_bw.* \ No newline at end of file
diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3288-fails.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3288-fails.txt
new file mode 100644
index 000000000000..2a1baa948e12
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3288-fails.txt
@@ -0,0 +1,48 @@
+kms_3d,Crash
+kms_bw@linear-tiling-2-displays-1920x1080p,Fail
+kms_bw@linear-tiling-2-displays-2560x1440p,Fail
+kms_bw@linear-tiling-2-displays-3840x2160p,Fail
+kms_bw@linear-tiling-3-displays-1920x1080p,Fail
+kms_bw@linear-tiling-3-displays-2560x1440p,Fail
+kms_bw@linear-tiling-3-displays-3840x2160p,Fail
+kms_force_connector_basic@force-load-detect,Fail
+kms_invalid_mode@int-max-clock,Crash
+kms_plane@pixel-format,Crash
+kms_plane@pixel-format-source-clamping,Crash
+kms_plane@plane-position-hole,Crash
+kms_plane@plane-position-hole-dpms,Crash
+kms_plane_cursor@overlay,Crash
+kms_plane_cursor@pipe-A-overlay-size-128,Fail
+kms_plane_cursor@pipe-A-overlay-size-256,Fail
+kms_plane_cursor@pipe-A-overlay-size-64,Fail
+kms_plane_cursor@pipe-A-primary-size-128,Fail
+kms_plane_cursor@pipe-A-primary-size-256,Fail
+kms_plane_cursor@pipe-A-primary-size-64,Fail
+kms_plane_cursor@pipe-A-viewport-size-128,Fail
+kms_plane_cursor@pipe-A-viewport-size-256,Fail
+kms_plane_cursor@pipe-A-viewport-size-64,Fail
+kms_plane_cursor@pipe-B-overlay-size-128,Fail
+kms_plane_cursor@pipe-B-overlay-size-256,Fail
+kms_plane_cursor@pipe-B-overlay-size-64,Fail
+kms_plane_cursor@pipe-B-primary-size-128,Fail
+kms_plane_cursor@pipe-B-primary-size-256,Fail
+kms_plane_cursor@pipe-B-primary-size-64,Fail
+kms_plane_cursor@pipe-B-viewport-size-128,Fail
+kms_plane_cursor@pipe-B-viewport-size-256,Fail
+kms_plane_cursor@pipe-B-viewport-size-64,Fail
+kms_plane_cursor@primary,Crash
+kms_plane_cursor@viewport,Crash
+kms_plane_lowres@tiling-none,Fail
+kms_plane_scaling@downscale-with-modifier-factor-0-25,Fail
+kms_plane_scaling@downscale-with-rotation-factor-0-25,Fail
+kms_plane_scaling@upscale-with-modifier-20x20,Fail
+kms_plane_scaling@upscale-with-modifier-factor-0-25,Fail
+kms_plane_scaling@upscale-with-pixel-format-20x20,Fail
+kms_plane_scaling@upscale-with-pixel-format-factor-0-25,Fail
+kms_plane_scaling@upscale-with-rotation-20x20,Fail
+kms_prime@basic-crc,Fail
+kms_properties@connector-properties-atomic,Crash
+kms_properties@connector-properties-legacy,Crash
+kms_properties@get_properties-sanity-atomic,Crash
+kms_properties@get_properties-sanity-non-atomic,Crash
+kms_setmode@invalid-clone-single-crtc,Crash
diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3288-flakes.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3288-flakes.txt
new file mode 100644
index 000000000000..45c54c75c899
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3288-flakes.txt
@@ -0,0 +1,9 @@
+kms_addfb_basic@addfb25-bad-modifier
+kms_cursor_crc@.*
+kms_flip@basic-flip-vs-wf_vblank
+kms_invalid_mode@int-max-clock,Crash
+kms_pipe_crc_basic@.*
+kms_properties@connector-properties-atomic,Crash
+kms_properties@get_properties-sanity-atomic,Crash
+kms_properties@get_properties-sanity-non-atomic,Crash
+kms_rmfb@close-fd
diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3288-skips.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3288-skips.txt
new file mode 100644
index 000000000000..f20c3574b75a
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3288-skips.txt
@@ -0,0 +1,52 @@
+# Suspend to RAM seems to be broken on this machine
+.*suspend.*
+
+# Too unstable, machine ends up hanging after lots of Oopses
+kms_cursor_legacy.*
+
+# Started hanging the machine on Linux 5.19-rc2:
+#
+# [IGT] kms_plane_lowres: executing
+# [IGT] kms_plane_lowres: starting subtest pipe-F-tiling-y
+# [IGT] kms_plane_lowres: exiting, ret=77
+# Console: switching to colour frame buffer device 170x48
+# rockchip-drm display-subsystem: [drm] *ERROR* flip_done timed out
+# rockchip-drm display-subsystem: [drm] *ERROR* [CRTC:35:crtc-0] commit wait timed out
+# BUG: spinlock bad magic on CPU#3, kms_plane_lowre/482
+# 8<--- cut here ---
+# Unable to handle kernel paging request at virtual address 7812078e
+# [7812078e] *pgd=00000000
+# Internal error: Oops: 5 [#1] SMP ARM
+# Modules linked in:
+# CPU: 3 PID: 482 Comm: kms_plane_lowre Tainted: G W 5.19.0-rc2-323596-g00535de92171 #1
+# Hardware name: Rockchip (Device Tree)
+# Process kms_plane_lowre (pid: 482, stack limit = 0x1193ac2b)
+# spin_dump from do_raw_spin_lock+0xa4/0xe8
+# do_raw_spin_lock from wait_for_completion_timeout+0x2c/0x120
+# wait_for_completion_timeout from drm_crtc_commit_wait+0x18/0x7c
+# drm_crtc_commit_wait from drm_atomic_helper_wait_for_dependencies+0x44/0x168
+# drm_atomic_helper_wait_for_dependencies from commit_tail+0x34/0x180
+# commit_tail from drm_atomic_helper_commit+0x164/0x18c
+# drm_atomic_helper_commit from drm_atomic_commit+0xac/0xe4
+# drm_atomic_commit from drm_client_modeset_commit_atomic+0x23c/0x284
+# drm_client_modeset_commit_atomic from drm_client_modeset_commit_locked+0x60/0x1c8
+# drm_client_modeset_commit_locked from drm_client_modeset_commit+0x24/0x40
+# drm_client_modeset_commit from drm_fbdev_client_restore+0x58/0x94
+# drm_fbdev_client_restore from drm_client_dev_restore+0x70/0xbc
+# drm_client_dev_restore from drm_release+0xf4/0x114
+# drm_release from __fput+0x74/0x240
+# __fput from task_work_run+0x84/0xb4
+# task_work_run from do_exit+0x34c/0xa20
+# do_exit from do_group_exit+0x34/0x98
+# do_group_exit from __wake_up_parent+0x0/0x18
+# Code: e595c008 12843d19 03e00000 03093168 (15940508)
+# ---[ end trace 0000000000000000 ]---
+# note: kms_plane_lowre[482] exited with preempt_count 1
+# Fixing recursive fault but reboot is needed!
+kms_plane_lowres@pipe-F-tiling-y
+
+# Take too long, we have only two machines, and these are very flaky
+kms_cursor_crc.*
+
+# Machine is hanging in this test, so skip it
+kms_pipe_crc_basic@disable-crc-after-crtc \ No newline at end of file
diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-fails.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-fails.txt
new file mode 100644
index 000000000000..6db08ba6b008
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-fails.txt
@@ -0,0 +1,37 @@
+kms_color@legacy-gamma,Fail
+kms_color@pipe-A-legacy-gamma,Fail
+kms_color@pipe-B-legacy-gamma,Fail
+kms_flip@basic-flip-vs-wf_vblank,Fail
+kms_flip@blocking-wf_vblank,Fail
+kms_flip@dpms-vs-vblank-race,Fail
+kms_flip@flip-vs-absolute-wf_vblank,Fail
+kms_flip@flip-vs-absolute-wf_vblank-interruptible,Fail
+kms_flip@flip-vs-blocking-wf-vblank,Fail
+kms_flip@flip-vs-panning,Fail
+kms_flip@flip-vs-panning-interruptible,Fail
+kms_flip@flip-vs-wf_vblank-interruptible,Fail
+kms_flip@plain-flip-fb-recreate,Fail
+kms_flip@plain-flip-fb-recreate-interruptible,Fail
+kms_flip@plain-flip-ts-check,Fail
+kms_flip@plain-flip-ts-check-interruptible,Fail
+kms_flip@wf_vblank-ts-check,Fail
+kms_flip@wf_vblank-ts-check-interruptible,Fail
+kms_invalid_mode@int-max-clock,Fail
+kms_plane@pixel-format,Fail
+kms_plane@pixel-format-source-clamping,Fail
+kms_plane@plane-panning-bottom-right,Fail
+kms_plane@plane-panning-top-left,Fail
+kms_plane@plane-position-covered,Fail
+kms_plane_cursor@pipe-B-overlay-size-128,Fail
+kms_plane_cursor@pipe-B-overlay-size-256,Fail
+kms_plane_cursor@pipe-B-overlay-size-64,Fail
+kms_plane_cursor@pipe-B-primary-size-128,Fail
+kms_plane_cursor@pipe-B-primary-size-256,Fail
+kms_plane_cursor@pipe-B-primary-size-64,Fail
+kms_plane_cursor@pipe-B-viewport-size-128,Fail
+kms_plane_cursor@pipe-B-viewport-size-256,Fail
+kms_plane_cursor@pipe-B-viewport-size-64,Fail
+kms_plane_multiple@atomic-pipe-B-tiling-none,Fail
+kms_plane_multiple@tiling-none,Fail
+kms_prime@basic-crc,Fail
+kms_rmfb@close-fd,Fail
diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-flakes.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-flakes.txt
new file mode 100644
index 000000000000..4c0539b4beaf
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-flakes.txt
@@ -0,0 +1,23 @@
+
+kms_cursor_crc@.*
+kms_flip@dpms-vs-vblank-race-interruptible
+kms_flip@flip-vs-expired-vblank
+kms_flip@modeset-vs-vblank-race-interruptible
+kms_pipe_crc_basic@.*
+kms_pipe_crc_basic@compare-crc-sanitycheck-pipe-A
+kms_pipe_crc_basic@compare-crc-sanitycheck-pipe-B
+kms_plane@plane-position-hole
+kms_plane_multiple@atomic-pipe-A-tiling-none
+kms_plane_multiple@atomic-pipe-B-tiling-none
+kms_sequence@get-forked
+kms_sequence@get-forked-busy
+kms_setmode@basic
+kms_universal_plane@universal-plane-pipe-B-functional,UnexpectedPass
+kms_vblank@pipe-A-accuracy-idle
+kms_vblank@pipe-A-query-busy
+kms_vblank@pipe-A-query-forked-busy
+kms_vblank@pipe-A-wait-idle
+kms_vblank@pipe-B-accuracy-idle
+kms_vblank@pipe-B-query-busy
+kms_vblank@pipe-B-query-forked-busy
+kms_vblank@pipe-B-wait-idle
diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-skips.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-skips.txt
new file mode 100644
index 000000000000..10c3d81a919a
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-skips.txt
@@ -0,0 +1,5 @@
+# Suspend to RAM seems to be broken on this machine
+.*suspend.*
+
+# Too unstable, machine ends up hanging after lots of Oopses
+kms_cursor_legacy.*
diff --git a/drivers/gpu/drm/ci/xfails/virtio_gpu-none-fails.txt b/drivers/gpu/drm/ci/xfails/virtio_gpu-none-fails.txt
new file mode 100644
index 000000000000..9586b2339f6f
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/virtio_gpu-none-fails.txt
@@ -0,0 +1,38 @@
+kms_addfb_basic@addfb25-bad-modifier,Fail
+kms_addfb_basic@bad-pitch-65536,Fail
+kms_addfb_basic@bo-too-small,Fail
+kms_addfb_basic@size-max,Fail
+kms_addfb_basic@too-high,Fail
+kms_atomic_transition@plane-primary-toggle-with-vblank-wait,Fail
+kms_bw@linear-tiling-1-displays-1920x1080p,Fail
+kms_bw@linear-tiling-1-displays-2560x1440p,Fail
+kms_bw@linear-tiling-1-displays-3840x2160p,Fail
+kms_bw@linear-tiling-2-displays-1920x1080p,Fail
+kms_bw@linear-tiling-2-displays-2560x1440p,Fail
+kms_bw@linear-tiling-2-displays-3840x2160p,Fail
+kms_invalid_mode@int-max-clock,Fail
+kms_plane_scaling@downscale-with-modifier-factor-0-25,Fail
+kms_plane_scaling@downscale-with-rotation-factor-0-25,Fail
+kms_plane_scaling@planes-upscale-20x20,Fail
+kms_plane_scaling@planes-upscale-20x20-downscale-factor-0-25,Fail
+kms_plane_scaling@planes-upscale-20x20-downscale-factor-0-5,Fail
+kms_plane_scaling@planes-upscale-20x20-downscale-factor-0-75,Fail
+kms_plane_scaling@upscale-with-modifier-20x20,Fail
+kms_plane_scaling@upscale-with-modifier-factor-0-25,Fail
+kms_plane_scaling@upscale-with-pixel-format-20x20,Fail
+kms_plane_scaling@upscale-with-pixel-format-factor-0-25,Fail
+kms_plane_scaling@upscale-with-rotation-20x20,Fail
+kms_vblank@crtc-id,Fail
+kms_vblank@invalid,Fail
+kms_vblank@pipe-A-accuracy-idle,Fail
+kms_vblank@pipe-A-query-busy,Fail
+kms_vblank@pipe-A-query-forked,Fail
+kms_vblank@pipe-A-query-forked-busy,Fail
+kms_vblank@pipe-A-query-idle,Fail
+kms_vblank@pipe-A-ts-continuation-idle,Fail
+kms_vblank@pipe-A-ts-continuation-modeset,Fail
+kms_vblank@pipe-A-ts-continuation-suspend,Fail
+kms_vblank@pipe-A-wait-busy,Fail
+kms_vblank@pipe-A-wait-forked,Fail
+kms_vblank@pipe-A-wait-forked-busy,Fail
+kms_vblank@pipe-A-wait-idle,Fail
diff --git a/drivers/gpu/drm/ci/xfails/virtio_gpu-none-flakes.txt b/drivers/gpu/drm/ci/xfails/virtio_gpu-none-flakes.txt
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/virtio_gpu-none-flakes.txt
diff --git a/drivers/gpu/drm/ci/xfails/virtio_gpu-none-skips.txt b/drivers/gpu/drm/ci/xfails/virtio_gpu-none-skips.txt
new file mode 100644
index 000000000000..78be18174012
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/virtio_gpu-none-skips.txt
@@ -0,0 +1,6 @@
+# Hits a "refcount_t: underflow; use-after-free" in virtio_gpu_fence_event_process
+# When run in a particular order with other tests
+kms_cursor_legacy.*
+
+# Job just hangs without any output
+kms_flip@flip-vs-suspend.* \ No newline at end of file
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index e99a6fa03d45..a7e677598004 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -58,6 +58,7 @@ struct i915_perf_group;
typedef u32 intel_engine_mask_t;
#define ALL_ENGINES ((intel_engine_mask_t)~0ul)
+#define VIRTUAL_ENGINES BIT(BITS_PER_TYPE(intel_engine_mask_t) - 1)
struct intel_hw_status_page {
struct list_head timelines;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index a0e3ef1c65d2..b5b7f2fe8c78 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -5470,6 +5470,9 @@ guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count,
ve->base.flags = I915_ENGINE_IS_VIRTUAL;
+ BUILD_BUG_ON(ilog2(VIRTUAL_ENGINES) < I915_NUM_ENGINES);
+ ve->base.mask = VIRTUAL_ENGINES;
+
intel_context_init(&ve->context, &ve->base);
for (n = 0; n < count; n++) {
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 7c7da284990d..f59081066a19 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -134,9 +134,7 @@ static void i915_fence_release(struct dma_fence *fence)
i915_sw_fence_fini(&rq->semaphore);
/*
- * Keep one request on each engine for reserved use under mempressure
- * do not use with virtual engines as this really is only needed for
- * kernel contexts.
+ * Keep one request on each engine for reserved use under mempressure.
*
* We do not hold a reference to the engine here and so have to be
* very careful in what rq->engine we poke. The virtual engine is
@@ -166,8 +164,7 @@ static void i915_fence_release(struct dma_fence *fence)
* know that if the rq->execution_mask is a single bit, rq->engine
* can be a physical engine with the exact corresponding mask.
*/
- if (!intel_engine_is_virtual(rq->engine) &&
- is_power_of_2(rq->execution_mask) &&
+ if (is_power_of_2(rq->execution_mask) &&
!cmpxchg(&rq->engine->request_pool, NULL, rq))
return;
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index a34924523133..a34917b048f9 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -1122,18 +1122,11 @@ nv04_page_flip_emit(struct nouveau_channel *chan,
PUSH_NVSQ(push, NV_SW, NV_SW_PAGE_FLIP, 0x00000000);
PUSH_KICK(push);
- ret = nouveau_fence_new(pfence);
+ ret = nouveau_fence_new(pfence, chan);
if (ret)
goto fail;
- ret = nouveau_fence_emit(*pfence, chan);
- if (ret)
- goto fail_fence_unref;
-
return 0;
-
-fail_fence_unref:
- nouveau_fence_unref(pfence);
fail:
spin_lock_irqsave(&dev->event_lock, flags);
list_del(&s->head);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 19cab37ac69c..0f3bd187ede6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -875,16 +875,10 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict,
if (ret)
goto out_unlock;
- ret = nouveau_fence_new(&fence);
+ ret = nouveau_fence_new(&fence, chan);
if (ret)
goto out_unlock;
- ret = nouveau_fence_emit(fence, chan);
- if (ret) {
- nouveau_fence_unref(&fence);
- goto out_unlock;
- }
-
/* TODO: figure out a better solution here
*
* wait on the fence here explicitly as going through
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index 1fd5ccf41128..bb3d6e5c122f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -70,11 +70,9 @@ nouveau_channel_idle(struct nouveau_channel *chan)
struct nouveau_fence *fence = NULL;
int ret;
- ret = nouveau_fence_new(&fence);
+ ret = nouveau_fence_new(&fence, chan);
if (!ret) {
- ret = nouveau_fence_emit(fence, chan);
- if (!ret)
- ret = nouveau_fence_wait(fence, false, false);
+ ret = nouveau_fence_wait(fence, false, false);
nouveau_fence_unref(&fence);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
index 61e84562094a..12feecf71e75 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -209,8 +209,7 @@ static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf)
goto done;
}
- if (!nouveau_fence_new(&fence))
- nouveau_fence_emit(fence, dmem->migrate.chan);
+ nouveau_fence_new(&fence, dmem->migrate.chan);
migrate_vma_pages(&args);
nouveau_dmem_fence_done(&fence);
dma_unmap_page(drm->dev->dev, dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
@@ -403,8 +402,7 @@ nouveau_dmem_evict_chunk(struct nouveau_dmem_chunk *chunk)
}
}
- if (!nouveau_fence_new(&fence))
- nouveau_fence_emit(fence, chunk->drm->dmem->migrate.chan);
+ nouveau_fence_new(&fence, chunk->drm->dmem->migrate.chan);
migrate_device_pages(src_pfns, dst_pfns, npages);
nouveau_dmem_fence_done(&fence);
migrate_device_finalize(src_pfns, dst_pfns, npages);
@@ -677,8 +675,7 @@ static void nouveau_dmem_migrate_chunk(struct nouveau_drm *drm,
addr += PAGE_SIZE;
}
- if (!nouveau_fence_new(&fence))
- nouveau_fence_emit(fence, drm->dmem->migrate.chan);
+ nouveau_fence_new(&fence, drm->dmem->migrate.chan);
migrate_vma_pages(args);
nouveau_dmem_fence_done(&fence);
nouveau_pfns_map(svmm, args->vma->vm_mm, args->start, pfns, i);
diff --git a/drivers/gpu/drm/nouveau/nouveau_exec.c b/drivers/gpu/drm/nouveau/nouveau_exec.c
index a90c4cd8cbb2..19024ce21fbb 100644
--- a/drivers/gpu/drm/nouveau/nouveau_exec.c
+++ b/drivers/gpu/drm/nouveau/nouveau_exec.c
@@ -96,7 +96,8 @@ nouveau_exec_job_submit(struct nouveau_job *job)
unsigned long index;
int ret;
- ret = nouveau_fence_new(&exec_job->fence);
+ /* Create a new fence, but do not emit yet. */
+ ret = nouveau_fence_create(&exec_job->fence, exec_job->chan);
if (ret)
return ret;
@@ -170,13 +171,17 @@ nouveau_exec_job_run(struct nouveau_job *job)
nv50_dma_push(chan, p->va, p->va_len, no_prefetch);
}
- ret = nouveau_fence_emit(fence, chan);
+ ret = nouveau_fence_emit(fence);
if (ret) {
+ nouveau_fence_unref(&exec_job->fence);
NV_PRINTK(err, job->cli, "error fencing pushbuf: %d\n", ret);
WIND_RING(chan);
return ERR_PTR(ret);
}
+ /* The fence was emitted successfully, set the job's fence pointer to
+ * NULL in order to avoid freeing it up when the job is cleaned up.
+ */
exec_job->fence = NULL;
return &fence->base;
@@ -189,7 +194,7 @@ nouveau_exec_job_free(struct nouveau_job *job)
nouveau_job_free(job);
- nouveau_fence_unref(&exec_job->fence);
+ kfree(exec_job->fence);
kfree(exec_job->push.s);
kfree(exec_job);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 77c739a55b19..61d9e70da9fd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -205,16 +205,13 @@ nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_cha
}
int
-nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
+nouveau_fence_emit(struct nouveau_fence *fence)
{
+ struct nouveau_channel *chan = fence->channel;
struct nouveau_fence_chan *fctx = chan->fence;
struct nouveau_fence_priv *priv = (void*)chan->drm->fence;
int ret;
- if (unlikely(!chan->fence))
- return -ENODEV;
-
- fence->channel = chan;
fence->timeout = jiffies + (15 * HZ);
if (priv->uevent)
@@ -406,18 +403,41 @@ nouveau_fence_unref(struct nouveau_fence **pfence)
}
int
-nouveau_fence_new(struct nouveau_fence **pfence)
+nouveau_fence_create(struct nouveau_fence **pfence,
+ struct nouveau_channel *chan)
{
struct nouveau_fence *fence;
+ if (unlikely(!chan->fence))
+ return -ENODEV;
+
fence = kzalloc(sizeof(*fence), GFP_KERNEL);
if (!fence)
return -ENOMEM;
+ fence->channel = chan;
+
*pfence = fence;
return 0;
}
+int
+nouveau_fence_new(struct nouveau_fence **pfence,
+ struct nouveau_channel *chan)
+{
+ int ret = 0;
+
+ ret = nouveau_fence_create(pfence, chan);
+ if (ret)
+ return ret;
+
+ ret = nouveau_fence_emit(*pfence);
+ if (ret)
+ nouveau_fence_unref(pfence);
+
+ return ret;
+}
+
static const char *nouveau_fence_get_get_driver_name(struct dma_fence *fence)
{
return "nouveau";
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index 2c72d96ef17d..64d33ae7f356 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -17,10 +17,11 @@ struct nouveau_fence {
unsigned long timeout;
};
-int nouveau_fence_new(struct nouveau_fence **);
+int nouveau_fence_create(struct nouveau_fence **, struct nouveau_channel *);
+int nouveau_fence_new(struct nouveau_fence **, struct nouveau_channel *);
void nouveau_fence_unref(struct nouveau_fence **);
-int nouveau_fence_emit(struct nouveau_fence *, struct nouveau_channel *);
+int nouveau_fence_emit(struct nouveau_fence *);
bool nouveau_fence_done(struct nouveau_fence *);
int nouveau_fence_wait(struct nouveau_fence *, bool lazy, bool intr);
int nouveau_fence_sync(struct nouveau_bo *, struct nouveau_channel *, bool exclusive, bool intr);
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index c0b10d8d3d03..a0d303e5ce3d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -914,11 +914,8 @@ revalidate:
}
}
- ret = nouveau_fence_new(&fence);
- if (!ret)
- ret = nouveau_fence_emit(fence, chan);
+ ret = nouveau_fence_new(&fence, chan);
if (ret) {
- nouveau_fence_unref(&fence);
NV_PRINTK(err, cli, "error fencing pushbuf: %d\n", ret);
WIND_RING(chan);
goto out;
diff --git a/drivers/media/dvb-frontends/ascot2e.c b/drivers/media/dvb-frontends/ascot2e.c
index 9b00b56230b6..cf8e5f1bd101 100644
--- a/drivers/media/dvb-frontends/ascot2e.c
+++ b/drivers/media/dvb-frontends/ascot2e.c
@@ -533,7 +533,7 @@ struct dvb_frontend *ascot2e_attach(struct dvb_frontend *fe,
priv->i2c_address, priv->i2c);
return fe;
}
-EXPORT_SYMBOL(ascot2e_attach);
+EXPORT_SYMBOL_GPL(ascot2e_attach);
MODULE_DESCRIPTION("Sony ASCOT2E terr/cab tuner driver");
MODULE_AUTHOR("info@netup.ru");
diff --git a/drivers/media/dvb-frontends/atbm8830.c b/drivers/media/dvb-frontends/atbm8830.c
index bdd16b9c5824..778c865085bf 100644
--- a/drivers/media/dvb-frontends/atbm8830.c
+++ b/drivers/media/dvb-frontends/atbm8830.c
@@ -489,7 +489,7 @@ error_out:
return NULL;
}
-EXPORT_SYMBOL(atbm8830_attach);
+EXPORT_SYMBOL_GPL(atbm8830_attach);
MODULE_DESCRIPTION("AltoBeam ATBM8830/8831 GB20600 demodulator driver");
MODULE_AUTHOR("David T. L. Wong <davidtlwong@gmail.com>");
diff --git a/drivers/media/dvb-frontends/au8522_dig.c b/drivers/media/dvb-frontends/au8522_dig.c
index 78cafdf27961..230436bf6cbd 100644
--- a/drivers/media/dvb-frontends/au8522_dig.c
+++ b/drivers/media/dvb-frontends/au8522_dig.c
@@ -879,7 +879,7 @@ error:
au8522_release_state(state);
return NULL;
}
-EXPORT_SYMBOL(au8522_attach);
+EXPORT_SYMBOL_GPL(au8522_attach);
static const struct dvb_frontend_ops au8522_ops = {
.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
diff --git a/drivers/media/dvb-frontends/bcm3510.c b/drivers/media/dvb-frontends/bcm3510.c
index 68b92b4419cf..b3f5c49accaf 100644
--- a/drivers/media/dvb-frontends/bcm3510.c
+++ b/drivers/media/dvb-frontends/bcm3510.c
@@ -835,7 +835,7 @@ error:
kfree(state);
return NULL;
}
-EXPORT_SYMBOL(bcm3510_attach);
+EXPORT_SYMBOL_GPL(bcm3510_attach);
static const struct dvb_frontend_ops bcm3510_ops = {
.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
diff --git a/drivers/media/dvb-frontends/cx22700.c b/drivers/media/dvb-frontends/cx22700.c
index b39ff516271b..1d04c0a652b2 100644
--- a/drivers/media/dvb-frontends/cx22700.c
+++ b/drivers/media/dvb-frontends/cx22700.c
@@ -432,4 +432,4 @@ MODULE_DESCRIPTION("Conexant CX22700 DVB-T Demodulator driver");
MODULE_AUTHOR("Holger Waechtler");
MODULE_LICENSE("GPL");
-EXPORT_SYMBOL(cx22700_attach);
+EXPORT_SYMBOL_GPL(cx22700_attach);
diff --git a/drivers/media/dvb-frontends/cx22702.c b/drivers/media/dvb-frontends/cx22702.c
index cc6acbf6393d..61ad34b7004b 100644
--- a/drivers/media/dvb-frontends/cx22702.c
+++ b/drivers/media/dvb-frontends/cx22702.c
@@ -604,7 +604,7 @@ error:
kfree(state);
return NULL;
}
-EXPORT_SYMBOL(cx22702_attach);
+EXPORT_SYMBOL_GPL(cx22702_attach);
static const struct dvb_frontend_ops cx22702_ops = {
.delsys = { SYS_DVBT },
diff --git a/drivers/media/dvb-frontends/cx24110.c b/drivers/media/dvb-frontends/cx24110.c
index 6f99d6a27be2..9aeea089756f 100644
--- a/drivers/media/dvb-frontends/cx24110.c
+++ b/drivers/media/dvb-frontends/cx24110.c
@@ -653,4 +653,4 @@ MODULE_DESCRIPTION("Conexant CX24110 DVB-S Demodulator driver");
MODULE_AUTHOR("Peter Hettkamp");
MODULE_LICENSE("GPL");
-EXPORT_SYMBOL(cx24110_attach);
+EXPORT_SYMBOL_GPL(cx24110_attach);
diff --git a/drivers/media/dvb-frontends/cx24113.c b/drivers/media/dvb-frontends/cx24113.c
index dd55d314bf9a..203cb6b3f941 100644
--- a/drivers/media/dvb-frontends/cx24113.c
+++ b/drivers/media/dvb-frontends/cx24113.c
@@ -590,7 +590,7 @@ error:
return NULL;
}
-EXPORT_SYMBOL(cx24113_attach);
+EXPORT_SYMBOL_GPL(cx24113_attach);
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Activates frontend debugging (default:0)");
diff --git a/drivers/media/dvb-frontends/cx24116.c b/drivers/media/dvb-frontends/cx24116.c
index ea8264ccbb4e..8b978a9f74a4 100644
--- a/drivers/media/dvb-frontends/cx24116.c
+++ b/drivers/media/dvb-frontends/cx24116.c
@@ -1133,7 +1133,7 @@ struct dvb_frontend *cx24116_attach(const struct cx24116_config *config,
state->frontend.demodulator_priv = state;
return &state->frontend;
}
-EXPORT_SYMBOL(cx24116_attach);
+EXPORT_SYMBOL_GPL(cx24116_attach);
/*
* Initialise or wake up device
diff --git a/drivers/media/dvb-frontends/cx24120.c b/drivers/media/dvb-frontends/cx24120.c
index 0f778660c72b..44515fdbe91d 100644
--- a/drivers/media/dvb-frontends/cx24120.c
+++ b/drivers/media/dvb-frontends/cx24120.c
@@ -305,7 +305,7 @@ error:
kfree(state);
return NULL;
}
-EXPORT_SYMBOL(cx24120_attach);
+EXPORT_SYMBOL_GPL(cx24120_attach);
static int cx24120_test_rom(struct cx24120_state *state)
{
diff --git a/drivers/media/dvb-frontends/cx24123.c b/drivers/media/dvb-frontends/cx24123.c
index 3d84ee17e54c..539889e638cc 100644
--- a/drivers/media/dvb-frontends/cx24123.c
+++ b/drivers/media/dvb-frontends/cx24123.c
@@ -1096,7 +1096,7 @@ error:
return NULL;
}
-EXPORT_SYMBOL(cx24123_attach);
+EXPORT_SYMBOL_GPL(cx24123_attach);
static const struct dvb_frontend_ops cx24123_ops = {
.delsys = { SYS_DVBS },
diff --git a/drivers/media/dvb-frontends/cxd2820r_core.c b/drivers/media/dvb-frontends/cxd2820r_core.c
index d7ee294c6833..7feb08dccfa1 100644
--- a/drivers/media/dvb-frontends/cxd2820r_core.c
+++ b/drivers/media/dvb-frontends/cxd2820r_core.c
@@ -536,7 +536,7 @@ struct dvb_frontend *cxd2820r_attach(const struct cxd2820r_config *config,
return pdata.get_dvb_frontend(client);
}
-EXPORT_SYMBOL(cxd2820r_attach);
+EXPORT_SYMBOL_GPL(cxd2820r_attach);
static struct dvb_frontend *cxd2820r_get_dvb_frontend(struct i2c_client *client)
{
diff --git a/drivers/media/dvb-frontends/cxd2841er.c b/drivers/media/dvb-frontends/cxd2841er.c
index ef403a9fb753..d925ca24183b 100644
--- a/drivers/media/dvb-frontends/cxd2841er.c
+++ b/drivers/media/dvb-frontends/cxd2841er.c
@@ -3930,14 +3930,14 @@ struct dvb_frontend *cxd2841er_attach_s(struct cxd2841er_config *cfg,
{
return cxd2841er_attach(cfg, i2c, SYS_DVBS);
}
-EXPORT_SYMBOL(cxd2841er_attach_s);
+EXPORT_SYMBOL_GPL(cxd2841er_attach_s);
struct dvb_frontend *cxd2841er_attach_t_c(struct cxd2841er_config *cfg,
struct i2c_adapter *i2c)
{
return cxd2841er_attach(cfg, i2c, 0);
}
-EXPORT_SYMBOL(cxd2841er_attach_t_c);
+EXPORT_SYMBOL_GPL(cxd2841er_attach_t_c);
static const struct dvb_frontend_ops cxd2841er_dvbs_s2_ops = {
.delsys = { SYS_DVBS, SYS_DVBS2 },
diff --git a/drivers/media/dvb-frontends/cxd2880/cxd2880_top.c b/drivers/media/dvb-frontends/cxd2880/cxd2880_top.c
index f67b6d24b8d4..a06d8368ca79 100644
--- a/drivers/media/dvb-frontends/cxd2880/cxd2880_top.c
+++ b/drivers/media/dvb-frontends/cxd2880/cxd2880_top.c
@@ -1950,7 +1950,7 @@ struct dvb_frontend *cxd2880_attach(struct dvb_frontend *fe,
return fe;
}
-EXPORT_SYMBOL(cxd2880_attach);
+EXPORT_SYMBOL_GPL(cxd2880_attach);
MODULE_DESCRIPTION("Sony CXD2880 DVB-T2/T tuner + demod driver");
MODULE_AUTHOR("Sony Semiconductor Solutions Corporation");
diff --git a/drivers/media/dvb-frontends/dib0070.c b/drivers/media/dvb-frontends/dib0070.c
index cafb41dba861..9a8e7cdd2a24 100644
--- a/drivers/media/dvb-frontends/dib0070.c
+++ b/drivers/media/dvb-frontends/dib0070.c
@@ -762,7 +762,7 @@ free_mem:
fe->tuner_priv = NULL;
return NULL;
}
-EXPORT_SYMBOL(dib0070_attach);
+EXPORT_SYMBOL_GPL(dib0070_attach);
MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>");
MODULE_DESCRIPTION("Driver for the DiBcom 0070 base-band RF Tuner");
diff --git a/drivers/media/dvb-frontends/dib0090.c b/drivers/media/dvb-frontends/dib0090.c
index 903da33642df..c958bcff026e 100644
--- a/drivers/media/dvb-frontends/dib0090.c
+++ b/drivers/media/dvb-frontends/dib0090.c
@@ -2634,7 +2634,7 @@ struct dvb_frontend *dib0090_register(struct dvb_frontend *fe, struct i2c_adapte
return NULL;
}
-EXPORT_SYMBOL(dib0090_register);
+EXPORT_SYMBOL_GPL(dib0090_register);
struct dvb_frontend *dib0090_fw_register(struct dvb_frontend *fe, struct i2c_adapter *i2c, const struct dib0090_config *config)
{
@@ -2660,7 +2660,7 @@ free_mem:
fe->tuner_priv = NULL;
return NULL;
}
-EXPORT_SYMBOL(dib0090_fw_register);
+EXPORT_SYMBOL_GPL(dib0090_fw_register);
MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>");
MODULE_AUTHOR("Olivier Grenie <olivier.grenie@parrot.com>");
diff --git a/drivers/media/dvb-frontends/dib3000mb.c b/drivers/media/dvb-frontends/dib3000mb.c
index a6c2fc4586eb..c598b2a63325 100644
--- a/drivers/media/dvb-frontends/dib3000mb.c
+++ b/drivers/media/dvb-frontends/dib3000mb.c
@@ -815,4 +815,4 @@ MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
-EXPORT_SYMBOL(dib3000mb_attach);
+EXPORT_SYMBOL_GPL(dib3000mb_attach);
diff --git a/drivers/media/dvb-frontends/dib3000mc.c b/drivers/media/dvb-frontends/dib3000mc.c
index 2e11a246aae0..c2fca8289aba 100644
--- a/drivers/media/dvb-frontends/dib3000mc.c
+++ b/drivers/media/dvb-frontends/dib3000mc.c
@@ -935,7 +935,7 @@ error:
kfree(st);
return NULL;
}
-EXPORT_SYMBOL(dib3000mc_attach);
+EXPORT_SYMBOL_GPL(dib3000mc_attach);
static const struct dvb_frontend_ops dib3000mc_ops = {
.delsys = { SYS_DVBT },
diff --git a/drivers/media/dvb-frontends/dib7000m.c b/drivers/media/dvb-frontends/dib7000m.c
index 97ce97789c9e..fdb22f32e3a1 100644
--- a/drivers/media/dvb-frontends/dib7000m.c
+++ b/drivers/media/dvb-frontends/dib7000m.c
@@ -1434,7 +1434,7 @@ error:
kfree(st);
return NULL;
}
-EXPORT_SYMBOL(dib7000m_attach);
+EXPORT_SYMBOL_GPL(dib7000m_attach);
static const struct dvb_frontend_ops dib7000m_ops = {
.delsys = { SYS_DVBT },
diff --git a/drivers/media/dvb-frontends/dib7000p.c b/drivers/media/dvb-frontends/dib7000p.c
index 9273758bf140..444fe1c4bf2d 100644
--- a/drivers/media/dvb-frontends/dib7000p.c
+++ b/drivers/media/dvb-frontends/dib7000p.c
@@ -2822,7 +2822,7 @@ void *dib7000p_attach(struct dib7000p_ops *ops)
return ops;
}
-EXPORT_SYMBOL(dib7000p_attach);
+EXPORT_SYMBOL_GPL(dib7000p_attach);
static const struct dvb_frontend_ops dib7000p_ops = {
.delsys = { SYS_DVBT },
diff --git a/drivers/media/dvb-frontends/dib8000.c b/drivers/media/dvb-frontends/dib8000.c
index 2abda7d1cb6e..2f5165918163 100644
--- a/drivers/media/dvb-frontends/dib8000.c
+++ b/drivers/media/dvb-frontends/dib8000.c
@@ -4527,7 +4527,7 @@ void *dib8000_attach(struct dib8000_ops *ops)
return ops;
}
-EXPORT_SYMBOL(dib8000_attach);
+EXPORT_SYMBOL_GPL(dib8000_attach);
MODULE_AUTHOR("Olivier Grenie <Olivier.Grenie@parrot.com, Patrick Boettcher <patrick.boettcher@posteo.de>");
MODULE_DESCRIPTION("Driver for the DiBcom 8000 ISDB-T demodulator");
diff --git a/drivers/media/dvb-frontends/dib9000.c b/drivers/media/dvb-frontends/dib9000.c
index 1c57587a917a..83cf6eadd49c 100644
--- a/drivers/media/dvb-frontends/dib9000.c
+++ b/drivers/media/dvb-frontends/dib9000.c
@@ -2546,7 +2546,7 @@ error:
kfree(st);
return NULL;
}
-EXPORT_SYMBOL(dib9000_attach);
+EXPORT_SYMBOL_GPL(dib9000_attach);
static const struct dvb_frontend_ops dib9000_ops = {
.delsys = { SYS_DVBT },
diff --git a/drivers/media/dvb-frontends/drx39xyj/drxj.c b/drivers/media/dvb-frontends/drx39xyj/drxj.c
index 68f4e8b5a0ab..a738573c8cd7 100644
--- a/drivers/media/dvb-frontends/drx39xyj/drxj.c
+++ b/drivers/media/dvb-frontends/drx39xyj/drxj.c
@@ -12372,7 +12372,7 @@ error:
return NULL;
}
-EXPORT_SYMBOL(drx39xxj_attach);
+EXPORT_SYMBOL_GPL(drx39xxj_attach);
static const struct dvb_frontend_ops drx39xxj_ops = {
.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
diff --git a/drivers/media/dvb-frontends/drxd_hard.c b/drivers/media/dvb-frontends/drxd_hard.c
index 9860cae65f1c..6a531937f4bb 100644
--- a/drivers/media/dvb-frontends/drxd_hard.c
+++ b/drivers/media/dvb-frontends/drxd_hard.c
@@ -2939,7 +2939,7 @@ error:
kfree(state);
return NULL;
}
-EXPORT_SYMBOL(drxd_attach);
+EXPORT_SYMBOL_GPL(drxd_attach);
MODULE_DESCRIPTION("DRXD driver");
MODULE_AUTHOR("Micronas");
diff --git a/drivers/media/dvb-frontends/drxk_hard.c b/drivers/media/dvb-frontends/drxk_hard.c
index 2770baebbbbc..87f3d4f0eb8c 100644
--- a/drivers/media/dvb-frontends/drxk_hard.c
+++ b/drivers/media/dvb-frontends/drxk_hard.c
@@ -6814,7 +6814,7 @@ error:
kfree(state);
return NULL;
}
-EXPORT_SYMBOL(drxk_attach);
+EXPORT_SYMBOL_GPL(drxk_attach);
MODULE_DESCRIPTION("DRX-K driver");
MODULE_AUTHOR("Ralph Metzler");
diff --git a/drivers/media/dvb-frontends/ds3000.c b/drivers/media/dvb-frontends/ds3000.c
index 20fcf31af165..515aa7c7baf2 100644
--- a/drivers/media/dvb-frontends/ds3000.c
+++ b/drivers/media/dvb-frontends/ds3000.c
@@ -859,7 +859,7 @@ struct dvb_frontend *ds3000_attach(const struct ds3000_config *config,
ds3000_set_voltage(&state->frontend, SEC_VOLTAGE_OFF);
return &state->frontend;
}
-EXPORT_SYMBOL(ds3000_attach);
+EXPORT_SYMBOL_GPL(ds3000_attach);
static int ds3000_set_carrier_offset(struct dvb_frontend *fe,
s32 carrier_offset_khz)
diff --git a/drivers/media/dvb-frontends/dvb-pll.c b/drivers/media/dvb-frontends/dvb-pll.c
index 90cb41eacf98..ef697ab6bc2e 100644
--- a/drivers/media/dvb-frontends/dvb-pll.c
+++ b/drivers/media/dvb-frontends/dvb-pll.c
@@ -866,7 +866,7 @@ out:
return NULL;
}
-EXPORT_SYMBOL(dvb_pll_attach);
+EXPORT_SYMBOL_GPL(dvb_pll_attach);
static int
diff --git a/drivers/media/dvb-frontends/ec100.c b/drivers/media/dvb-frontends/ec100.c
index 03bd80666cf8..2ad0a3c2f756 100644
--- a/drivers/media/dvb-frontends/ec100.c
+++ b/drivers/media/dvb-frontends/ec100.c
@@ -299,7 +299,7 @@ error:
kfree(state);
return NULL;
}
-EXPORT_SYMBOL(ec100_attach);
+EXPORT_SYMBOL_GPL(ec100_attach);
static const struct dvb_frontend_ops ec100_ops = {
.delsys = { SYS_DVBT },
diff --git a/drivers/media/dvb-frontends/helene.c b/drivers/media/dvb-frontends/helene.c
index 68c1a3e0e2ba..f127adee3ebb 100644
--- a/drivers/media/dvb-frontends/helene.c
+++ b/drivers/media/dvb-frontends/helene.c
@@ -1025,7 +1025,7 @@ struct dvb_frontend *helene_attach_s(struct dvb_frontend *fe,
priv->i2c_address, priv->i2c);
return fe;
}
-EXPORT_SYMBOL(helene_attach_s);
+EXPORT_SYMBOL_GPL(helene_attach_s);
struct dvb_frontend *helene_attach(struct dvb_frontend *fe,
const struct helene_config *config,
@@ -1061,7 +1061,7 @@ struct dvb_frontend *helene_attach(struct dvb_frontend *fe,
priv->i2c_address, priv->i2c);
return fe;
}
-EXPORT_SYMBOL(helene_attach);
+EXPORT_SYMBOL_GPL(helene_attach);
static int helene_probe(struct i2c_client *client)
{
diff --git a/drivers/media/dvb-frontends/horus3a.c b/drivers/media/dvb-frontends/horus3a.c
index 24bf5cbcc184..0330b78a5b3f 100644
--- a/drivers/media/dvb-frontends/horus3a.c
+++ b/drivers/media/dvb-frontends/horus3a.c
@@ -395,7 +395,7 @@ struct dvb_frontend *horus3a_attach(struct dvb_frontend *fe,
priv->i2c_address, priv->i2c);
return fe;
}
-EXPORT_SYMBOL(horus3a_attach);
+EXPORT_SYMBOL_GPL(horus3a_attach);
MODULE_DESCRIPTION("Sony HORUS3A satellite tuner driver");
MODULE_AUTHOR("Sergey Kozlov <serjk@netup.ru>");
diff --git a/drivers/media/dvb-frontends/isl6405.c b/drivers/media/dvb-frontends/isl6405.c
index 2cd69b4ff82c..7d28a743f97e 100644
--- a/drivers/media/dvb-frontends/isl6405.c
+++ b/drivers/media/dvb-frontends/isl6405.c
@@ -141,7 +141,7 @@ struct dvb_frontend *isl6405_attach(struct dvb_frontend *fe, struct i2c_adapter
return fe;
}
-EXPORT_SYMBOL(isl6405_attach);
+EXPORT_SYMBOL_GPL(isl6405_attach);
MODULE_DESCRIPTION("Driver for lnb supply and control ic isl6405");
MODULE_AUTHOR("Hartmut Hackmann & Oliver Endriss");
diff --git a/drivers/media/dvb-frontends/isl6421.c b/drivers/media/dvb-frontends/isl6421.c
index 43b0dfc6f453..2e9f6f12f849 100644
--- a/drivers/media/dvb-frontends/isl6421.c
+++ b/drivers/media/dvb-frontends/isl6421.c
@@ -213,7 +213,7 @@ struct dvb_frontend *isl6421_attach(struct dvb_frontend *fe, struct i2c_adapter
return fe;
}
-EXPORT_SYMBOL(isl6421_attach);
+EXPORT_SYMBOL_GPL(isl6421_attach);
MODULE_DESCRIPTION("Driver for lnb supply and control ic isl6421");
MODULE_AUTHOR("Andrew de Quincey & Oliver Endriss");
diff --git a/drivers/media/dvb-frontends/isl6423.c b/drivers/media/dvb-frontends/isl6423.c
index 8cd1bb88ce6e..a0d0a3834057 100644
--- a/drivers/media/dvb-frontends/isl6423.c
+++ b/drivers/media/dvb-frontends/isl6423.c
@@ -289,7 +289,7 @@ exit:
fe->sec_priv = NULL;
return NULL;
}
-EXPORT_SYMBOL(isl6423_attach);
+EXPORT_SYMBOL_GPL(isl6423_attach);
MODULE_DESCRIPTION("ISL6423 SEC");
MODULE_AUTHOR("Manu Abraham");
diff --git a/drivers/media/dvb-frontends/itd1000.c b/drivers/media/dvb-frontends/itd1000.c
index 1b33478653d1..f8f362f50e78 100644
--- a/drivers/media/dvb-frontends/itd1000.c
+++ b/drivers/media/dvb-frontends/itd1000.c
@@ -389,7 +389,7 @@ struct dvb_frontend *itd1000_attach(struct dvb_frontend *fe, struct i2c_adapter
return fe;
}
-EXPORT_SYMBOL(itd1000_attach);
+EXPORT_SYMBOL_GPL(itd1000_attach);
MODULE_AUTHOR("Patrick Boettcher <pb@linuxtv.org>");
MODULE_DESCRIPTION("Integrant ITD1000 driver");
diff --git a/drivers/media/dvb-frontends/ix2505v.c b/drivers/media/dvb-frontends/ix2505v.c
index 73f27105c139..3212e333d472 100644
--- a/drivers/media/dvb-frontends/ix2505v.c
+++ b/drivers/media/dvb-frontends/ix2505v.c
@@ -302,7 +302,7 @@ error:
kfree(state);
return NULL;
}
-EXPORT_SYMBOL(ix2505v_attach);
+EXPORT_SYMBOL_GPL(ix2505v_attach);
module_param_named(debug, ix2505v_debug, int, 0644);
MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off).");
diff --git a/drivers/media/dvb-frontends/l64781.c b/drivers/media/dvb-frontends/l64781.c
index c5106a1ea1cd..fe5af2453d55 100644
--- a/drivers/media/dvb-frontends/l64781.c
+++ b/drivers/media/dvb-frontends/l64781.c
@@ -593,4 +593,4 @@ MODULE_DESCRIPTION("LSI L64781 DVB-T Demodulator driver");
MODULE_AUTHOR("Holger Waechtler, Marko Kohtala");
MODULE_LICENSE("GPL");
-EXPORT_SYMBOL(l64781_attach);
+EXPORT_SYMBOL_GPL(l64781_attach);
diff --git a/drivers/media/dvb-frontends/lg2160.c b/drivers/media/dvb-frontends/lg2160.c
index f343066c297e..fe700aa56bff 100644
--- a/drivers/media/dvb-frontends/lg2160.c
+++ b/drivers/media/dvb-frontends/lg2160.c
@@ -1426,7 +1426,7 @@ struct dvb_frontend *lg2160_attach(const struct lg2160_config *config,
return &state->frontend;
}
-EXPORT_SYMBOL(lg2160_attach);
+EXPORT_SYMBOL_GPL(lg2160_attach);
MODULE_DESCRIPTION("LG Electronics LG216x ATSC/MH Demodulator Driver");
MODULE_AUTHOR("Michael Krufky <mkrufky@linuxtv.org>");
diff --git a/drivers/media/dvb-frontends/lgdt3305.c b/drivers/media/dvb-frontends/lgdt3305.c
index c15d3735d34c..bdc8311e1c0b 100644
--- a/drivers/media/dvb-frontends/lgdt3305.c
+++ b/drivers/media/dvb-frontends/lgdt3305.c
@@ -1148,7 +1148,7 @@ fail:
kfree(state);
return NULL;
}
-EXPORT_SYMBOL(lgdt3305_attach);
+EXPORT_SYMBOL_GPL(lgdt3305_attach);
static const struct dvb_frontend_ops lgdt3304_ops = {
.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
diff --git a/drivers/media/dvb-frontends/lgdt3306a.c b/drivers/media/dvb-frontends/lgdt3306a.c
index 3c6650f6e9a3..263887592415 100644
--- a/drivers/media/dvb-frontends/lgdt3306a.c
+++ b/drivers/media/dvb-frontends/lgdt3306a.c
@@ -1859,7 +1859,7 @@ fail:
kfree(state);
return NULL;
}
-EXPORT_SYMBOL(lgdt3306a_attach);
+EXPORT_SYMBOL_GPL(lgdt3306a_attach);
#ifdef DBG_DUMP
diff --git a/drivers/media/dvb-frontends/lgdt330x.c b/drivers/media/dvb-frontends/lgdt330x.c
index 97a10996c7fa..081d6ad3ce72 100644
--- a/drivers/media/dvb-frontends/lgdt330x.c
+++ b/drivers/media/dvb-frontends/lgdt330x.c
@@ -927,7 +927,7 @@ struct dvb_frontend *lgdt330x_attach(const struct lgdt330x_config *_config,
return lgdt330x_get_dvb_frontend(client);
}
-EXPORT_SYMBOL(lgdt330x_attach);
+EXPORT_SYMBOL_GPL(lgdt330x_attach);
static const struct dvb_frontend_ops lgdt3302_ops = {
.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
diff --git a/drivers/media/dvb-frontends/lgs8gxx.c b/drivers/media/dvb-frontends/lgs8gxx.c
index 30014979b985..ffaf60e16ecd 100644
--- a/drivers/media/dvb-frontends/lgs8gxx.c
+++ b/drivers/media/dvb-frontends/lgs8gxx.c
@@ -1043,7 +1043,7 @@ error_out:
return NULL;
}
-EXPORT_SYMBOL(lgs8gxx_attach);
+EXPORT_SYMBOL_GPL(lgs8gxx_attach);
MODULE_DESCRIPTION("Legend Silicon LGS8913/LGS8GXX DMB-TH demodulator driver");
MODULE_AUTHOR("David T. L. Wong <davidtlwong@gmail.com>");
diff --git a/drivers/media/dvb-frontends/lnbh25.c b/drivers/media/dvb-frontends/lnbh25.c
index 9ffe06cd787d..41bec050642b 100644
--- a/drivers/media/dvb-frontends/lnbh25.c
+++ b/drivers/media/dvb-frontends/lnbh25.c
@@ -173,7 +173,7 @@ struct dvb_frontend *lnbh25_attach(struct dvb_frontend *fe,
__func__, priv->i2c_address);
return fe;
}
-EXPORT_SYMBOL(lnbh25_attach);
+EXPORT_SYMBOL_GPL(lnbh25_attach);
MODULE_DESCRIPTION("ST LNBH25 driver");
MODULE_AUTHOR("info@netup.ru");
diff --git a/drivers/media/dvb-frontends/lnbp21.c b/drivers/media/dvb-frontends/lnbp21.c
index e564974162d6..32593b1f75a3 100644
--- a/drivers/media/dvb-frontends/lnbp21.c
+++ b/drivers/media/dvb-frontends/lnbp21.c
@@ -155,7 +155,7 @@ struct dvb_frontend *lnbh24_attach(struct dvb_frontend *fe,
return lnbx2x_attach(fe, i2c, override_set, override_clear,
i2c_addr, LNBH24_TTX);
}
-EXPORT_SYMBOL(lnbh24_attach);
+EXPORT_SYMBOL_GPL(lnbh24_attach);
struct dvb_frontend *lnbp21_attach(struct dvb_frontend *fe,
struct i2c_adapter *i2c, u8 override_set,
@@ -164,7 +164,7 @@ struct dvb_frontend *lnbp21_attach(struct dvb_frontend *fe,
return lnbx2x_attach(fe, i2c, override_set, override_clear,
0x08, LNBP21_ISEL);
}
-EXPORT_SYMBOL(lnbp21_attach);
+EXPORT_SYMBOL_GPL(lnbp21_attach);
MODULE_DESCRIPTION("Driver for lnb supply and control ic lnbp21, lnbh24");
MODULE_AUTHOR("Oliver Endriss, Igor M. Liplianin");
diff --git a/drivers/media/dvb-frontends/lnbp22.c b/drivers/media/dvb-frontends/lnbp22.c
index b8c7145d4cef..cb4ea5d3fad4 100644
--- a/drivers/media/dvb-frontends/lnbp22.c
+++ b/drivers/media/dvb-frontends/lnbp22.c
@@ -125,7 +125,7 @@ struct dvb_frontend *lnbp22_attach(struct dvb_frontend *fe,
return fe;
}
-EXPORT_SYMBOL(lnbp22_attach);
+EXPORT_SYMBOL_GPL(lnbp22_attach);
MODULE_DESCRIPTION("Driver for lnb supply and control ic lnbp22");
MODULE_AUTHOR("Dominik Kuhlen");
diff --git a/drivers/media/dvb-frontends/m88ds3103.c b/drivers/media/dvb-frontends/m88ds3103.c
index cf49ac56a37e..cf037b61b226 100644
--- a/drivers/media/dvb-frontends/m88ds3103.c
+++ b/drivers/media/dvb-frontends/m88ds3103.c
@@ -1695,7 +1695,7 @@ struct dvb_frontend *m88ds3103_attach(const struct m88ds3103_config *cfg,
*tuner_i2c_adapter = pdata.get_i2c_adapter(client);
return pdata.get_dvb_frontend(client);
}
-EXPORT_SYMBOL(m88ds3103_attach);
+EXPORT_SYMBOL_GPL(m88ds3103_attach);
static const struct dvb_frontend_ops m88ds3103_ops = {
.delsys = {SYS_DVBS, SYS_DVBS2},
diff --git a/drivers/media/dvb-frontends/m88rs2000.c b/drivers/media/dvb-frontends/m88rs2000.c
index b294ba87e934..2aa98203cd65 100644
--- a/drivers/media/dvb-frontends/m88rs2000.c
+++ b/drivers/media/dvb-frontends/m88rs2000.c
@@ -808,7 +808,7 @@ error:
return NULL;
}
-EXPORT_SYMBOL(m88rs2000_attach);
+EXPORT_SYMBOL_GPL(m88rs2000_attach);
MODULE_DESCRIPTION("M88RS2000 DVB-S Demodulator driver");
MODULE_AUTHOR("Malcolm Priestley tvboxspy@gmail.com");
diff --git a/drivers/media/dvb-frontends/mb86a16.c b/drivers/media/dvb-frontends/mb86a16.c
index 3ec2cb4fa504..0fc45896e7b8 100644
--- a/drivers/media/dvb-frontends/mb86a16.c
+++ b/drivers/media/dvb-frontends/mb86a16.c
@@ -1853,6 +1853,6 @@ error:
kfree(state);
return NULL;
}
-EXPORT_SYMBOL(mb86a16_attach);
+EXPORT_SYMBOL_GPL(mb86a16_attach);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Manu Abraham");
diff --git a/drivers/media/dvb-frontends/mb86a20s.c b/drivers/media/dvb-frontends/mb86a20s.c
index 125fed4891ba..f8e4bbee5bd5 100644
--- a/drivers/media/dvb-frontends/mb86a20s.c
+++ b/drivers/media/dvb-frontends/mb86a20s.c
@@ -2078,7 +2078,7 @@ struct dvb_frontend *mb86a20s_attach(const struct mb86a20s_config *config,
dev_info(&i2c->dev, "Detected a Fujitsu mb86a20s frontend\n");
return &state->frontend;
}
-EXPORT_SYMBOL(mb86a20s_attach);
+EXPORT_SYMBOL_GPL(mb86a20s_attach);
static const struct dvb_frontend_ops mb86a20s_ops = {
.delsys = { SYS_ISDBT },
diff --git a/drivers/media/dvb-frontends/mt312.c b/drivers/media/dvb-frontends/mt312.c
index d43a67045dbe..fb867dd8a26b 100644
--- a/drivers/media/dvb-frontends/mt312.c
+++ b/drivers/media/dvb-frontends/mt312.c
@@ -827,7 +827,7 @@ error:
kfree(state);
return NULL;
}
-EXPORT_SYMBOL(mt312_attach);
+EXPORT_SYMBOL_GPL(mt312_attach);
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off).");
diff --git a/drivers/media/dvb-frontends/mt352.c b/drivers/media/dvb-frontends/mt352.c
index 399d5c519027..1b2889f5cf67 100644
--- a/drivers/media/dvb-frontends/mt352.c
+++ b/drivers/media/dvb-frontends/mt352.c
@@ -593,4 +593,4 @@ MODULE_DESCRIPTION("Zarlink MT352 DVB-T Demodulator driver");
MODULE_AUTHOR("Holger Waechtler, Daniel Mack, Antonio Mancuso");
MODULE_LICENSE("GPL");
-EXPORT_SYMBOL(mt352_attach);
+EXPORT_SYMBOL_GPL(mt352_attach);
diff --git a/drivers/media/dvb-frontends/nxt200x.c b/drivers/media/dvb-frontends/nxt200x.c
index 200b6dbc75f8..1c549ada6ebf 100644
--- a/drivers/media/dvb-frontends/nxt200x.c
+++ b/drivers/media/dvb-frontends/nxt200x.c
@@ -1216,5 +1216,5 @@ MODULE_DESCRIPTION("NXT200X (ATSC 8VSB & ITU-T J.83 AnnexB 64/256 QAM) Demodulat
MODULE_AUTHOR("Kirk Lapray, Michael Krufky, Jean-Francois Thibert, and Taylor Jacob");
MODULE_LICENSE("GPL");
-EXPORT_SYMBOL(nxt200x_attach);
+EXPORT_SYMBOL_GPL(nxt200x_attach);
diff --git a/drivers/media/dvb-frontends/nxt6000.c b/drivers/media/dvb-frontends/nxt6000.c
index 136918f82dda..e8d4940370dd 100644
--- a/drivers/media/dvb-frontends/nxt6000.c
+++ b/drivers/media/dvb-frontends/nxt6000.c
@@ -621,4 +621,4 @@ MODULE_DESCRIPTION("NxtWave NXT6000 DVB-T demodulator driver");
MODULE_AUTHOR("Florian Schirmer");
MODULE_LICENSE("GPL");
-EXPORT_SYMBOL(nxt6000_attach);
+EXPORT_SYMBOL_GPL(nxt6000_attach);
diff --git a/drivers/media/dvb-frontends/or51132.c b/drivers/media/dvb-frontends/or51132.c
index 355f3598627b..74e04c7cca1e 100644
--- a/drivers/media/dvb-frontends/or51132.c
+++ b/drivers/media/dvb-frontends/or51132.c
@@ -605,4 +605,4 @@ MODULE_AUTHOR("Kirk Lapray");
MODULE_AUTHOR("Trent Piepho");
MODULE_LICENSE("GPL");
-EXPORT_SYMBOL(or51132_attach);
+EXPORT_SYMBOL_GPL(or51132_attach);
diff --git a/drivers/media/dvb-frontends/or51211.c b/drivers/media/dvb-frontends/or51211.c
index ae732dc5116e..2e8e7071a67a 100644
--- a/drivers/media/dvb-frontends/or51211.c
+++ b/drivers/media/dvb-frontends/or51211.c
@@ -551,5 +551,5 @@ MODULE_DESCRIPTION("Oren OR51211 VSB [pcHDTV HD-2000] Demodulator Driver");
MODULE_AUTHOR("Kirk Lapray");
MODULE_LICENSE("GPL");
-EXPORT_SYMBOL(or51211_attach);
+EXPORT_SYMBOL_GPL(or51211_attach);
diff --git a/drivers/media/dvb-frontends/s5h1409.c b/drivers/media/dvb-frontends/s5h1409.c
index 3089cc174a6f..28b1dca077ea 100644
--- a/drivers/media/dvb-frontends/s5h1409.c
+++ b/drivers/media/dvb-frontends/s5h1409.c
@@ -981,7 +981,7 @@ error:
kfree(state);
return NULL;
}
-EXPORT_SYMBOL(s5h1409_attach);
+EXPORT_SYMBOL_GPL(s5h1409_attach);
static const struct dvb_frontend_ops s5h1409_ops = {
.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
diff --git a/drivers/media/dvb-frontends/s5h1411.c b/drivers/media/dvb-frontends/s5h1411.c
index 2563a72e98b7..fc48e659c2d8 100644
--- a/drivers/media/dvb-frontends/s5h1411.c
+++ b/drivers/media/dvb-frontends/s5h1411.c
@@ -900,7 +900,7 @@ error:
kfree(state);
return NULL;
}
-EXPORT_SYMBOL(s5h1411_attach);
+EXPORT_SYMBOL_GPL(s5h1411_attach);
static const struct dvb_frontend_ops s5h1411_ops = {
.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
diff --git a/drivers/media/dvb-frontends/s5h1420.c b/drivers/media/dvb-frontends/s5h1420.c
index 6bdec2898bc8..d700de1ea6c2 100644
--- a/drivers/media/dvb-frontends/s5h1420.c
+++ b/drivers/media/dvb-frontends/s5h1420.c
@@ -918,7 +918,7 @@ error:
kfree(state);
return NULL;
}
-EXPORT_SYMBOL(s5h1420_attach);
+EXPORT_SYMBOL_GPL(s5h1420_attach);
static const struct dvb_frontend_ops s5h1420_ops = {
.delsys = { SYS_DVBS },
diff --git a/drivers/media/dvb-frontends/s5h1432.c b/drivers/media/dvb-frontends/s5h1432.c
index 956e8ee4b388..ff5d3bdf3bc6 100644
--- a/drivers/media/dvb-frontends/s5h1432.c
+++ b/drivers/media/dvb-frontends/s5h1432.c
@@ -355,7 +355,7 @@ struct dvb_frontend *s5h1432_attach(const struct s5h1432_config *config,
return &state->frontend;
}
-EXPORT_SYMBOL(s5h1432_attach);
+EXPORT_SYMBOL_GPL(s5h1432_attach);
static const struct dvb_frontend_ops s5h1432_ops = {
.delsys = { SYS_DVBT },
diff --git a/drivers/media/dvb-frontends/s921.c b/drivers/media/dvb-frontends/s921.c
index f118d8e64103..7e461ac159fc 100644
--- a/drivers/media/dvb-frontends/s921.c
+++ b/drivers/media/dvb-frontends/s921.c
@@ -495,7 +495,7 @@ struct dvb_frontend *s921_attach(const struct s921_config *config,
return &state->frontend;
}
-EXPORT_SYMBOL(s921_attach);
+EXPORT_SYMBOL_GPL(s921_attach);
static const struct dvb_frontend_ops s921_ops = {
.delsys = { SYS_ISDBT },
diff --git a/drivers/media/dvb-frontends/si21xx.c b/drivers/media/dvb-frontends/si21xx.c
index 2d29d2c4d434..210ccd356e2b 100644
--- a/drivers/media/dvb-frontends/si21xx.c
+++ b/drivers/media/dvb-frontends/si21xx.c
@@ -937,7 +937,7 @@ error:
kfree(state);
return NULL;
}
-EXPORT_SYMBOL(si21xx_attach);
+EXPORT_SYMBOL_GPL(si21xx_attach);
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off).");
diff --git a/drivers/media/dvb-frontends/sp887x.c b/drivers/media/dvb-frontends/sp887x.c
index 146e7f2dd3c5..f59c0f96416b 100644
--- a/drivers/media/dvb-frontends/sp887x.c
+++ b/drivers/media/dvb-frontends/sp887x.c
@@ -624,4 +624,4 @@ MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off).");
MODULE_DESCRIPTION("Spase sp887x DVB-T demodulator driver");
MODULE_LICENSE("GPL");
-EXPORT_SYMBOL(sp887x_attach);
+EXPORT_SYMBOL_GPL(sp887x_attach);
diff --git a/drivers/media/dvb-frontends/stb0899_drv.c b/drivers/media/dvb-frontends/stb0899_drv.c
index 4ee6c1e1e9f7..2f4d8fb400cd 100644
--- a/drivers/media/dvb-frontends/stb0899_drv.c
+++ b/drivers/media/dvb-frontends/stb0899_drv.c
@@ -1638,7 +1638,7 @@ error:
kfree(state);
return NULL;
}
-EXPORT_SYMBOL(stb0899_attach);
+EXPORT_SYMBOL_GPL(stb0899_attach);
MODULE_PARM_DESC(verbose, "Set Verbosity level");
MODULE_AUTHOR("Manu Abraham");
MODULE_DESCRIPTION("STB0899 Multi-Std frontend");
diff --git a/drivers/media/dvb-frontends/stb6000.c b/drivers/media/dvb-frontends/stb6000.c
index 8c9800d577e0..d74e34677b92 100644
--- a/drivers/media/dvb-frontends/stb6000.c
+++ b/drivers/media/dvb-frontends/stb6000.c
@@ -232,7 +232,7 @@ struct dvb_frontend *stb6000_attach(struct dvb_frontend *fe, int addr,
return fe;
}
-EXPORT_SYMBOL(stb6000_attach);
+EXPORT_SYMBOL_GPL(stb6000_attach);
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off).");
diff --git a/drivers/media/dvb-frontends/stb6100.c b/drivers/media/dvb-frontends/stb6100.c
index 698866c4f15a..c5818a15a0d7 100644
--- a/drivers/media/dvb-frontends/stb6100.c
+++ b/drivers/media/dvb-frontends/stb6100.c
@@ -557,7 +557,7 @@ static void stb6100_release(struct dvb_frontend *fe)
kfree(state);
}
-EXPORT_SYMBOL(stb6100_attach);
+EXPORT_SYMBOL_GPL(stb6100_attach);
MODULE_PARM_DESC(verbose, "Set Verbosity level");
MODULE_AUTHOR("Manu Abraham");
diff --git a/drivers/media/dvb-frontends/stv0288.c b/drivers/media/dvb-frontends/stv0288.c
index 3ae1f3a2f142..a5581bd60f9e 100644
--- a/drivers/media/dvb-frontends/stv0288.c
+++ b/drivers/media/dvb-frontends/stv0288.c
@@ -590,7 +590,7 @@ error:
return NULL;
}
-EXPORT_SYMBOL(stv0288_attach);
+EXPORT_SYMBOL_GPL(stv0288_attach);
module_param(debug_legacy_dish_switch, int, 0444);
MODULE_PARM_DESC(debug_legacy_dish_switch,
diff --git a/drivers/media/dvb-frontends/stv0297.c b/drivers/media/dvb-frontends/stv0297.c
index 6d5962d5697a..9d4dbd99a5a7 100644
--- a/drivers/media/dvb-frontends/stv0297.c
+++ b/drivers/media/dvb-frontends/stv0297.c
@@ -710,4 +710,4 @@ MODULE_DESCRIPTION("ST STV0297 DVB-C Demodulator driver");
MODULE_AUTHOR("Dennis Noermann and Andrew de Quincey");
MODULE_LICENSE("GPL");
-EXPORT_SYMBOL(stv0297_attach);
+EXPORT_SYMBOL_GPL(stv0297_attach);
diff --git a/drivers/media/dvb-frontends/stv0299.c b/drivers/media/dvb-frontends/stv0299.c
index b5263a0ee5aa..da7ff2c2e8e5 100644
--- a/drivers/media/dvb-frontends/stv0299.c
+++ b/drivers/media/dvb-frontends/stv0299.c
@@ -752,4 +752,4 @@ MODULE_DESCRIPTION("ST STV0299 DVB Demodulator driver");
MODULE_AUTHOR("Ralph Metzler, Holger Waechtler, Peter Schildmann, Felix Domke, Andreas Oberritter, Andrew de Quincey, Kenneth Aafly");
MODULE_LICENSE("GPL");
-EXPORT_SYMBOL(stv0299_attach);
+EXPORT_SYMBOL_GPL(stv0299_attach);
diff --git a/drivers/media/dvb-frontends/stv0367.c b/drivers/media/dvb-frontends/stv0367.c
index a93f40617469..48326434488c 100644
--- a/drivers/media/dvb-frontends/stv0367.c
+++ b/drivers/media/dvb-frontends/stv0367.c
@@ -1750,7 +1750,7 @@ error:
kfree(state);
return NULL;
}
-EXPORT_SYMBOL(stv0367ter_attach);
+EXPORT_SYMBOL_GPL(stv0367ter_attach);
static int stv0367cab_gate_ctrl(struct dvb_frontend *fe, int enable)
{
@@ -2919,7 +2919,7 @@ error:
kfree(state);
return NULL;
}
-EXPORT_SYMBOL(stv0367cab_attach);
+EXPORT_SYMBOL_GPL(stv0367cab_attach);
/*
* Functions for operation on Digital Devices hardware
@@ -3340,7 +3340,7 @@ error:
kfree(state);
return NULL;
}
-EXPORT_SYMBOL(stv0367ddb_attach);
+EXPORT_SYMBOL_GPL(stv0367ddb_attach);
MODULE_PARM_DESC(debug, "Set debug");
MODULE_PARM_DESC(i2c_debug, "Set i2c debug");
diff --git a/drivers/media/dvb-frontends/stv0900_core.c b/drivers/media/dvb-frontends/stv0900_core.c
index 212312d20ff6..e7b9b9b11d7d 100644
--- a/drivers/media/dvb-frontends/stv0900_core.c
+++ b/drivers/media/dvb-frontends/stv0900_core.c
@@ -1957,7 +1957,7 @@ error:
kfree(state);
return NULL;
}
-EXPORT_SYMBOL(stv0900_attach);
+EXPORT_SYMBOL_GPL(stv0900_attach);
MODULE_PARM_DESC(debug, "Set debug");
diff --git a/drivers/media/dvb-frontends/stv090x.c b/drivers/media/dvb-frontends/stv090x.c
index a07dc5fdeb3d..cc45139057ba 100644
--- a/drivers/media/dvb-frontends/stv090x.c
+++ b/drivers/media/dvb-frontends/stv090x.c
@@ -5071,7 +5071,7 @@ error:
kfree(state);
return NULL;
}
-EXPORT_SYMBOL(stv090x_attach);
+EXPORT_SYMBOL_GPL(stv090x_attach);
static const struct i2c_device_id stv090x_id_table[] = {
{"stv090x", 0},
diff --git a/drivers/media/dvb-frontends/stv6110.c b/drivers/media/dvb-frontends/stv6110.c
index 963f6a896102..1cf9c095dbff 100644
--- a/drivers/media/dvb-frontends/stv6110.c
+++ b/drivers/media/dvb-frontends/stv6110.c
@@ -427,7 +427,7 @@ struct dvb_frontend *stv6110_attach(struct dvb_frontend *fe,
return fe;
}
-EXPORT_SYMBOL(stv6110_attach);
+EXPORT_SYMBOL_GPL(stv6110_attach);
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off).");
diff --git a/drivers/media/dvb-frontends/stv6110x.c b/drivers/media/dvb-frontends/stv6110x.c
index 11653f846c12..c678f47d2449 100644
--- a/drivers/media/dvb-frontends/stv6110x.c
+++ b/drivers/media/dvb-frontends/stv6110x.c
@@ -467,7 +467,7 @@ const struct stv6110x_devctl *stv6110x_attach(struct dvb_frontend *fe,
dev_info(&stv6110x->i2c->dev, "Attaching STV6110x\n");
return stv6110x->devctl;
}
-EXPORT_SYMBOL(stv6110x_attach);
+EXPORT_SYMBOL_GPL(stv6110x_attach);
static const struct i2c_device_id stv6110x_id_table[] = {
{"stv6110x", 0},
diff --git a/drivers/media/dvb-frontends/tda10021.c b/drivers/media/dvb-frontends/tda10021.c
index faa6e54b3372..462e12ab6bd1 100644
--- a/drivers/media/dvb-frontends/tda10021.c
+++ b/drivers/media/dvb-frontends/tda10021.c
@@ -523,4 +523,4 @@ MODULE_DESCRIPTION("Philips TDA10021 DVB-C demodulator driver");
MODULE_AUTHOR("Ralph Metzler, Holger Waechtler, Markus Schulz");
MODULE_LICENSE("GPL");
-EXPORT_SYMBOL(tda10021_attach);
+EXPORT_SYMBOL_GPL(tda10021_attach);
diff --git a/drivers/media/dvb-frontends/tda10023.c b/drivers/media/dvb-frontends/tda10023.c
index 8f32edf6b700..4c2541ecd743 100644
--- a/drivers/media/dvb-frontends/tda10023.c
+++ b/drivers/media/dvb-frontends/tda10023.c
@@ -594,4 +594,4 @@ MODULE_DESCRIPTION("Philips TDA10023 DVB-C demodulator driver");
MODULE_AUTHOR("Georg Acher, Hartmut Birr");
MODULE_LICENSE("GPL");
-EXPORT_SYMBOL(tda10023_attach);
+EXPORT_SYMBOL_GPL(tda10023_attach);
diff --git a/drivers/media/dvb-frontends/tda10048.c b/drivers/media/dvb-frontends/tda10048.c
index 3cb4e5270e4f..5d5e4e9e4422 100644
--- a/drivers/media/dvb-frontends/tda10048.c
+++ b/drivers/media/dvb-frontends/tda10048.c
@@ -1138,7 +1138,7 @@ error:
kfree(state);
return NULL;
}
-EXPORT_SYMBOL(tda10048_attach);
+EXPORT_SYMBOL_GPL(tda10048_attach);
static const struct dvb_frontend_ops tda10048_ops = {
.delsys = { SYS_DVBT },
diff --git a/drivers/media/dvb-frontends/tda1004x.c b/drivers/media/dvb-frontends/tda1004x.c
index 83a798ca9b00..6f306db6c615 100644
--- a/drivers/media/dvb-frontends/tda1004x.c
+++ b/drivers/media/dvb-frontends/tda1004x.c
@@ -1378,5 +1378,5 @@ MODULE_DESCRIPTION("Philips TDA10045H & TDA10046H DVB-T Demodulator");
MODULE_AUTHOR("Andrew de Quincey & Robert Schlabbach");
MODULE_LICENSE("GPL");
-EXPORT_SYMBOL(tda10045_attach);
-EXPORT_SYMBOL(tda10046_attach);
+EXPORT_SYMBOL_GPL(tda10045_attach);
+EXPORT_SYMBOL_GPL(tda10046_attach);
diff --git a/drivers/media/dvb-frontends/tda10086.c b/drivers/media/dvb-frontends/tda10086.c
index cdcf97664bba..b449514ae585 100644
--- a/drivers/media/dvb-frontends/tda10086.c
+++ b/drivers/media/dvb-frontends/tda10086.c
@@ -764,4 +764,4 @@ MODULE_DESCRIPTION("Philips TDA10086 DVB-S Demodulator");
MODULE_AUTHOR("Andrew de Quincey");
MODULE_LICENSE("GPL");
-EXPORT_SYMBOL(tda10086_attach);
+EXPORT_SYMBOL_GPL(tda10086_attach);
diff --git a/drivers/media/dvb-frontends/tda665x.c b/drivers/media/dvb-frontends/tda665x.c
index 13e8969da7f8..346be5011fb7 100644
--- a/drivers/media/dvb-frontends/tda665x.c
+++ b/drivers/media/dvb-frontends/tda665x.c
@@ -227,7 +227,7 @@ struct dvb_frontend *tda665x_attach(struct dvb_frontend *fe,
return fe;
}
-EXPORT_SYMBOL(tda665x_attach);
+EXPORT_SYMBOL_GPL(tda665x_attach);
MODULE_DESCRIPTION("TDA665x driver");
MODULE_AUTHOR("Manu Abraham");
diff --git a/drivers/media/dvb-frontends/tda8083.c b/drivers/media/dvb-frontends/tda8083.c
index e3e1c3db2c85..44f53624557b 100644
--- a/drivers/media/dvb-frontends/tda8083.c
+++ b/drivers/media/dvb-frontends/tda8083.c
@@ -481,4 +481,4 @@ MODULE_DESCRIPTION("Philips TDA8083 DVB-S Demodulator");
MODULE_AUTHOR("Ralph Metzler, Holger Waechtler");
MODULE_LICENSE("GPL");
-EXPORT_SYMBOL(tda8083_attach);
+EXPORT_SYMBOL_GPL(tda8083_attach);
diff --git a/drivers/media/dvb-frontends/tda8261.c b/drivers/media/dvb-frontends/tda8261.c
index 0d576d41c67d..8b06f92745dc 100644
--- a/drivers/media/dvb-frontends/tda8261.c
+++ b/drivers/media/dvb-frontends/tda8261.c
@@ -188,7 +188,7 @@ exit:
return NULL;
}
-EXPORT_SYMBOL(tda8261_attach);
+EXPORT_SYMBOL_GPL(tda8261_attach);
MODULE_AUTHOR("Manu Abraham");
MODULE_DESCRIPTION("TDA8261 8PSK/QPSK Tuner");
diff --git a/drivers/media/dvb-frontends/tda826x.c b/drivers/media/dvb-frontends/tda826x.c
index f9703a1dd758..eafcf5f7da3d 100644
--- a/drivers/media/dvb-frontends/tda826x.c
+++ b/drivers/media/dvb-frontends/tda826x.c
@@ -164,7 +164,7 @@ struct dvb_frontend *tda826x_attach(struct dvb_frontend *fe, int addr, struct i2
return fe;
}
-EXPORT_SYMBOL(tda826x_attach);
+EXPORT_SYMBOL_GPL(tda826x_attach);
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off).");
diff --git a/drivers/media/dvb-frontends/ts2020.c b/drivers/media/dvb-frontends/ts2020.c
index f5b60f827697..a5ebce57f35e 100644
--- a/drivers/media/dvb-frontends/ts2020.c
+++ b/drivers/media/dvb-frontends/ts2020.c
@@ -525,7 +525,7 @@ struct dvb_frontend *ts2020_attach(struct dvb_frontend *fe,
return fe;
}
-EXPORT_SYMBOL(ts2020_attach);
+EXPORT_SYMBOL_GPL(ts2020_attach);
/*
* We implement own regmap locking due to legacy DVB attach which uses frontend
diff --git a/drivers/media/dvb-frontends/tua6100.c b/drivers/media/dvb-frontends/tua6100.c
index 2483f614d0e7..41dd9b6d3190 100644
--- a/drivers/media/dvb-frontends/tua6100.c
+++ b/drivers/media/dvb-frontends/tua6100.c
@@ -186,7 +186,7 @@ struct dvb_frontend *tua6100_attach(struct dvb_frontend *fe, int addr, struct i2
fe->tuner_priv = priv;
return fe;
}
-EXPORT_SYMBOL(tua6100_attach);
+EXPORT_SYMBOL_GPL(tua6100_attach);
MODULE_DESCRIPTION("DVB tua6100 driver");
MODULE_AUTHOR("Andrew de Quincey");
diff --git a/drivers/media/dvb-frontends/ves1820.c b/drivers/media/dvb-frontends/ves1820.c
index 9df14d0be1c1..ee5620e731e9 100644
--- a/drivers/media/dvb-frontends/ves1820.c
+++ b/drivers/media/dvb-frontends/ves1820.c
@@ -434,4 +434,4 @@ MODULE_DESCRIPTION("VLSI VES1820 DVB-C Demodulator driver");
MODULE_AUTHOR("Ralph Metzler, Holger Waechtler");
MODULE_LICENSE("GPL");
-EXPORT_SYMBOL(ves1820_attach);
+EXPORT_SYMBOL_GPL(ves1820_attach);
diff --git a/drivers/media/dvb-frontends/ves1x93.c b/drivers/media/dvb-frontends/ves1x93.c
index b74727286302..c60e21d26b88 100644
--- a/drivers/media/dvb-frontends/ves1x93.c
+++ b/drivers/media/dvb-frontends/ves1x93.c
@@ -540,4 +540,4 @@ MODULE_DESCRIPTION("VLSI VES1x93 DVB-S Demodulator driver");
MODULE_AUTHOR("Ralph Metzler");
MODULE_LICENSE("GPL");
-EXPORT_SYMBOL(ves1x93_attach);
+EXPORT_SYMBOL_GPL(ves1x93_attach);
diff --git a/drivers/media/dvb-frontends/zl10036.c b/drivers/media/dvb-frontends/zl10036.c
index d392c7cce2ce..7ba575e9c55f 100644
--- a/drivers/media/dvb-frontends/zl10036.c
+++ b/drivers/media/dvb-frontends/zl10036.c
@@ -496,7 +496,7 @@ error:
kfree(state);
return NULL;
}
-EXPORT_SYMBOL(zl10036_attach);
+EXPORT_SYMBOL_GPL(zl10036_attach);
module_param_named(debug, zl10036_debug, int, 0644);
MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off).");
diff --git a/drivers/media/dvb-frontends/zl10039.c b/drivers/media/dvb-frontends/zl10039.c
index 1335bf78d5b7..a3e4d219400c 100644
--- a/drivers/media/dvb-frontends/zl10039.c
+++ b/drivers/media/dvb-frontends/zl10039.c
@@ -295,7 +295,7 @@ error:
kfree(state);
return NULL;
}
-EXPORT_SYMBOL(zl10039_attach);
+EXPORT_SYMBOL_GPL(zl10039_attach);
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off).");
diff --git a/drivers/media/dvb-frontends/zl10353.c b/drivers/media/dvb-frontends/zl10353.c
index 2a2cf20a73d6..8849d05475c2 100644
--- a/drivers/media/dvb-frontends/zl10353.c
+++ b/drivers/media/dvb-frontends/zl10353.c
@@ -665,4 +665,4 @@ MODULE_DESCRIPTION("Zarlink ZL10353 DVB-T demodulator driver");
MODULE_AUTHOR("Chris Pascoe");
MODULE_LICENSE("GPL");
-EXPORT_SYMBOL(zl10353_attach);
+EXPORT_SYMBOL_GPL(zl10353_attach);
diff --git a/drivers/media/pci/bt8xx/dst.c b/drivers/media/pci/bt8xx/dst.c
index 3e52a51982d7..110651e47831 100644
--- a/drivers/media/pci/bt8xx/dst.c
+++ b/drivers/media/pci/bt8xx/dst.c
@@ -1722,7 +1722,7 @@ struct dst_state *dst_attach(struct dst_state *state, struct dvb_adapter *dvb_ad
return state; /* Manu (DST is a card not a frontend) */
}
-EXPORT_SYMBOL(dst_attach);
+EXPORT_SYMBOL_GPL(dst_attach);
static const struct dvb_frontend_ops dst_dvbt_ops = {
.delsys = { SYS_DVBT },
diff --git a/drivers/media/pci/bt8xx/dst_ca.c b/drivers/media/pci/bt8xx/dst_ca.c
index d234a0f404d6..a9cc6e7a57f9 100644
--- a/drivers/media/pci/bt8xx/dst_ca.c
+++ b/drivers/media/pci/bt8xx/dst_ca.c
@@ -668,7 +668,7 @@ struct dvb_device *dst_ca_attach(struct dst_state *dst, struct dvb_adapter *dvb_
return NULL;
}
-EXPORT_SYMBOL(dst_ca_attach);
+EXPORT_SYMBOL_GPL(dst_ca_attach);
MODULE_DESCRIPTION("DST DVB-S/T/C Combo CA driver");
MODULE_AUTHOR("Manu Abraham");
diff --git a/drivers/media/pci/ddbridge/ddbridge-dummy-fe.c b/drivers/media/pci/ddbridge/ddbridge-dummy-fe.c
index 6868a0c4fc82..520ebd16b0c4 100644
--- a/drivers/media/pci/ddbridge/ddbridge-dummy-fe.c
+++ b/drivers/media/pci/ddbridge/ddbridge-dummy-fe.c
@@ -112,7 +112,7 @@ struct dvb_frontend *ddbridge_dummy_fe_qam_attach(void)
state->frontend.demodulator_priv = state;
return &state->frontend;
}
-EXPORT_SYMBOL(ddbridge_dummy_fe_qam_attach);
+EXPORT_SYMBOL_GPL(ddbridge_dummy_fe_qam_attach);
static const struct dvb_frontend_ops ddbridge_dummy_fe_qam_ops = {
.delsys = { SYS_DVBC_ANNEX_A },
diff --git a/drivers/media/tuners/fc0011.c b/drivers/media/tuners/fc0011.c
index eaa3bbc903d7..3d3b54be2955 100644
--- a/drivers/media/tuners/fc0011.c
+++ b/drivers/media/tuners/fc0011.c
@@ -499,7 +499,7 @@ struct dvb_frontend *fc0011_attach(struct dvb_frontend *fe,
return fe;
}
-EXPORT_SYMBOL(fc0011_attach);
+EXPORT_SYMBOL_GPL(fc0011_attach);
MODULE_DESCRIPTION("Fitipower FC0011 silicon tuner driver");
MODULE_AUTHOR("Michael Buesch <m@bues.ch>");
diff --git a/drivers/media/tuners/fc0012.c b/drivers/media/tuners/fc0012.c
index 4429d5e8c579..81e65acbdb17 100644
--- a/drivers/media/tuners/fc0012.c
+++ b/drivers/media/tuners/fc0012.c
@@ -495,7 +495,7 @@ err:
return fe;
}
-EXPORT_SYMBOL(fc0012_attach);
+EXPORT_SYMBOL_GPL(fc0012_attach);
MODULE_DESCRIPTION("Fitipower FC0012 silicon tuner driver");
MODULE_AUTHOR("Hans-Frieder Vogt <hfvogt@gmx.net>");
diff --git a/drivers/media/tuners/fc0013.c b/drivers/media/tuners/fc0013.c
index 29dd9b55ff33..1006a2798eef 100644
--- a/drivers/media/tuners/fc0013.c
+++ b/drivers/media/tuners/fc0013.c
@@ -608,7 +608,7 @@ struct dvb_frontend *fc0013_attach(struct dvb_frontend *fe,
return fe;
}
-EXPORT_SYMBOL(fc0013_attach);
+EXPORT_SYMBOL_GPL(fc0013_attach);
MODULE_DESCRIPTION("Fitipower FC0013 silicon tuner driver");
MODULE_AUTHOR("Hans-Frieder Vogt <hfvogt@gmx.net>");
diff --git a/drivers/media/tuners/max2165.c b/drivers/media/tuners/max2165.c
index 1c746bed51fe..1575ab94e1c8 100644
--- a/drivers/media/tuners/max2165.c
+++ b/drivers/media/tuners/max2165.c
@@ -410,7 +410,7 @@ struct dvb_frontend *max2165_attach(struct dvb_frontend *fe,
return fe;
}
-EXPORT_SYMBOL(max2165_attach);
+EXPORT_SYMBOL_GPL(max2165_attach);
MODULE_AUTHOR("David T. L. Wong <davidtlwong@gmail.com>");
MODULE_DESCRIPTION("Maxim MAX2165 silicon tuner driver");
diff --git a/drivers/media/tuners/mc44s803.c b/drivers/media/tuners/mc44s803.c
index 0c9161516abd..ed8bdf7ebd99 100644
--- a/drivers/media/tuners/mc44s803.c
+++ b/drivers/media/tuners/mc44s803.c
@@ -356,7 +356,7 @@ error:
kfree(priv);
return NULL;
}
-EXPORT_SYMBOL(mc44s803_attach);
+EXPORT_SYMBOL_GPL(mc44s803_attach);
MODULE_AUTHOR("Jochen Friedrich");
MODULE_DESCRIPTION("Freescale MC44S803 silicon tuner driver");
diff --git a/drivers/media/tuners/mt2060.c b/drivers/media/tuners/mt2060.c
index 0278a9f0aeef..4205ed4cf467 100644
--- a/drivers/media/tuners/mt2060.c
+++ b/drivers/media/tuners/mt2060.c
@@ -440,7 +440,7 @@ struct dvb_frontend * mt2060_attach(struct dvb_frontend *fe, struct i2c_adapter
return fe;
}
-EXPORT_SYMBOL(mt2060_attach);
+EXPORT_SYMBOL_GPL(mt2060_attach);
static int mt2060_probe(struct i2c_client *client)
{
diff --git a/drivers/media/tuners/mt2131.c b/drivers/media/tuners/mt2131.c
index 37f50ff6c0bd..eebc06088341 100644
--- a/drivers/media/tuners/mt2131.c
+++ b/drivers/media/tuners/mt2131.c
@@ -274,7 +274,7 @@ struct dvb_frontend * mt2131_attach(struct dvb_frontend *fe,
fe->tuner_priv = priv;
return fe;
}
-EXPORT_SYMBOL(mt2131_attach);
+EXPORT_SYMBOL_GPL(mt2131_attach);
MODULE_AUTHOR("Steven Toth");
MODULE_DESCRIPTION("Microtune MT2131 silicon tuner driver");
diff --git a/drivers/media/tuners/mt2266.c b/drivers/media/tuners/mt2266.c
index 6136f20fa9b7..2e92885a6bcb 100644
--- a/drivers/media/tuners/mt2266.c
+++ b/drivers/media/tuners/mt2266.c
@@ -336,7 +336,7 @@ struct dvb_frontend * mt2266_attach(struct dvb_frontend *fe, struct i2c_adapter
mt2266_calibrate(priv);
return fe;
}
-EXPORT_SYMBOL(mt2266_attach);
+EXPORT_SYMBOL_GPL(mt2266_attach);
MODULE_AUTHOR("Olivier DANET");
MODULE_DESCRIPTION("Microtune MT2266 silicon tuner driver");
diff --git a/drivers/media/tuners/mxl5005s.c b/drivers/media/tuners/mxl5005s.c
index 06dfab9fb8cb..d9bfa257a005 100644
--- a/drivers/media/tuners/mxl5005s.c
+++ b/drivers/media/tuners/mxl5005s.c
@@ -4120,7 +4120,7 @@ struct dvb_frontend *mxl5005s_attach(struct dvb_frontend *fe,
fe->tuner_priv = state;
return fe;
}
-EXPORT_SYMBOL(mxl5005s_attach);
+EXPORT_SYMBOL_GPL(mxl5005s_attach);
MODULE_DESCRIPTION("MaxLinear MXL5005S silicon tuner driver");
MODULE_AUTHOR("Steven Toth");
diff --git a/drivers/media/tuners/qt1010.c b/drivers/media/tuners/qt1010.c
index a7b19863f489..48fc79cd4027 100644
--- a/drivers/media/tuners/qt1010.c
+++ b/drivers/media/tuners/qt1010.c
@@ -441,7 +441,7 @@ struct dvb_frontend * qt1010_attach(struct dvb_frontend *fe,
fe->tuner_priv = priv;
return fe;
}
-EXPORT_SYMBOL(qt1010_attach);
+EXPORT_SYMBOL_GPL(qt1010_attach);
MODULE_DESCRIPTION("Quantek QT1010 silicon tuner driver");
MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
diff --git a/drivers/media/tuners/tda18218.c b/drivers/media/tuners/tda18218.c
index 4ed94646116f..7d8d84dcb245 100644
--- a/drivers/media/tuners/tda18218.c
+++ b/drivers/media/tuners/tda18218.c
@@ -336,7 +336,7 @@ struct dvb_frontend *tda18218_attach(struct dvb_frontend *fe,
return fe;
}
-EXPORT_SYMBOL(tda18218_attach);
+EXPORT_SYMBOL_GPL(tda18218_attach);
MODULE_DESCRIPTION("NXP TDA18218HN silicon tuner driver");
MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
diff --git a/drivers/media/tuners/xc2028.c b/drivers/media/tuners/xc2028.c
index 69c2e1b99bf1..5a967edceca9 100644
--- a/drivers/media/tuners/xc2028.c
+++ b/drivers/media/tuners/xc2028.c
@@ -1512,7 +1512,7 @@ fail:
return NULL;
}
-EXPORT_SYMBOL(xc2028_attach);
+EXPORT_SYMBOL_GPL(xc2028_attach);
MODULE_DESCRIPTION("Xceive xc2028/xc3028 tuner driver");
MODULE_AUTHOR("Michel Ludwig <michel.ludwig@gmail.com>");
diff --git a/drivers/media/tuners/xc4000.c b/drivers/media/tuners/xc4000.c
index d59b4ab77430..57ded9ff3f04 100644
--- a/drivers/media/tuners/xc4000.c
+++ b/drivers/media/tuners/xc4000.c
@@ -1742,7 +1742,7 @@ fail2:
xc4000_release(fe);
return NULL;
}
-EXPORT_SYMBOL(xc4000_attach);
+EXPORT_SYMBOL_GPL(xc4000_attach);
MODULE_AUTHOR("Steven Toth, Davide Ferri");
MODULE_DESCRIPTION("Xceive xc4000 silicon tuner driver");
diff --git a/drivers/media/tuners/xc5000.c b/drivers/media/tuners/xc5000.c
index 7b7d9fe4f945..2182e5b7b606 100644
--- a/drivers/media/tuners/xc5000.c
+++ b/drivers/media/tuners/xc5000.c
@@ -1460,7 +1460,7 @@ fail:
xc5000_release(fe);
return NULL;
}
-EXPORT_SYMBOL(xc5000_attach);
+EXPORT_SYMBOL_GPL(xc5000_attach);
MODULE_AUTHOR("Steven Toth");
MODULE_DESCRIPTION("Xceive xc5000 silicon tuner driver");
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 31f664ee4d77..b940dcd3ace6 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -756,8 +756,6 @@ static void macb_mac_link_up(struct phylink_config *config,
if (rx_pause)
ctrl |= MACB_BIT(PAE);
- macb_set_tx_clk(bp, speed);
-
/* Initialize rings & buffers as clearing MACB_BIT(TE) in link down
* cleared the pipeline and control registers.
*/
@@ -777,6 +775,9 @@ static void macb_mac_link_up(struct phylink_config *config,
spin_unlock_irqrestore(&bp->lock, flags);
+ if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC))
+ macb_set_tx_clk(bp, speed);
+
/* Enable Rx and Tx; Enable PTP unicast */
ctrl = macb_readl(bp, NCR);
if (gem_has_ptp(bp))
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 13ba9c74bd84..76b34cee1da3 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -3827,8 +3827,11 @@ static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs, bool reinit)
}
/* only call pci_enable_sriov() if no VFs are allocated already */
- if (!old_vfs)
+ if (!old_vfs) {
err = pci_enable_sriov(pdev, adapter->vfs_allocated_count);
+ if (err)
+ goto err_out;
+ }
goto out;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index 0310af851086..9339edbd9082 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -979,6 +979,7 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
u32 tsync_tx_ctl = IXGBE_TSYNCTXCTL_ENABLED;
u32 tsync_rx_ctl = IXGBE_TSYNCRXCTL_ENABLED;
u32 tsync_rx_mtrl = PTP_EV_PORT << 16;
+ u32 aflags = adapter->flags;
bool is_l2 = false;
u32 regval;
@@ -996,20 +997,20 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
case HWTSTAMP_FILTER_NONE:
tsync_rx_ctl = 0;
tsync_rx_mtrl = 0;
- adapter->flags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
- IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
+ aflags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
+ IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
break;
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1;
tsync_rx_mtrl |= IXGBE_RXMTRL_V1_SYNC_MSG;
- adapter->flags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
- IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
+ aflags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
+ IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
break;
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1;
tsync_rx_mtrl |= IXGBE_RXMTRL_V1_DELAY_REQ_MSG;
- adapter->flags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
- IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
+ aflags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
+ IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
break;
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
@@ -1023,8 +1024,8 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_EVENT_V2;
is_l2 = true;
config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
- adapter->flags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
- IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
+ aflags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
+ IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
break;
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
case HWTSTAMP_FILTER_NTP_ALL:
@@ -1035,7 +1036,7 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
if (hw->mac.type >= ixgbe_mac_X550) {
tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_ALL;
config->rx_filter = HWTSTAMP_FILTER_ALL;
- adapter->flags |= IXGBE_FLAG_RX_HWTSTAMP_ENABLED;
+ aflags |= IXGBE_FLAG_RX_HWTSTAMP_ENABLED;
break;
}
fallthrough;
@@ -1046,8 +1047,6 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
* Delay_Req messages and hardware does not support
* timestamping all packets => return error
*/
- adapter->flags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
- IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
config->rx_filter = HWTSTAMP_FILTER_NONE;
return -ERANGE;
}
@@ -1079,8 +1078,8 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
IXGBE_TSYNCRXCTL_TYPE_ALL |
IXGBE_TSYNCRXCTL_TSIP_UT_EN;
config->rx_filter = HWTSTAMP_FILTER_ALL;
- adapter->flags |= IXGBE_FLAG_RX_HWTSTAMP_ENABLED;
- adapter->flags &= ~IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER;
+ aflags |= IXGBE_FLAG_RX_HWTSTAMP_ENABLED;
+ aflags &= ~IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER;
is_l2 = true;
break;
default:
@@ -1113,6 +1112,9 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
IXGBE_WRITE_FLUSH(hw);
+ /* configure adapter flags only when HW is actually configured */
+ adapter->flags = aflags;
+
/* clear TX/RX time stamp registers, just to be sure */
ixgbe_ptp_clear_tx_timestamp(adapter);
IXGBE_READ_REG(hw, IXGBE_RXSTMPH);
diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch.c
index 6083b1c8e4fb..ea9186178091 100644
--- a/drivers/net/ethernet/renesas/rswitch.c
+++ b/drivers/net/ethernet/renesas/rswitch.c
@@ -799,6 +799,7 @@ static int rswitch_poll(struct napi_struct *napi, int budget)
struct net_device *ndev = napi->dev;
struct rswitch_private *priv;
struct rswitch_device *rdev;
+ unsigned long flags;
int quota = budget;
rdev = netdev_priv(ndev);
@@ -816,10 +817,12 @@ retry:
netif_wake_subqueue(ndev, 0);
- napi_complete(napi);
-
- rswitch_enadis_data_irq(priv, rdev->tx_queue->index, true);
- rswitch_enadis_data_irq(priv, rdev->rx_queue->index, true);
+ if (napi_complete_done(napi, budget - quota)) {
+ spin_lock_irqsave(&priv->lock, flags);
+ rswitch_enadis_data_irq(priv, rdev->tx_queue->index, true);
+ rswitch_enadis_data_irq(priv, rdev->rx_queue->index, true);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ }
out:
return budget - quota;
@@ -835,8 +838,10 @@ static void rswitch_queue_interrupt(struct net_device *ndev)
struct rswitch_device *rdev = netdev_priv(ndev);
if (napi_schedule_prep(&rdev->napi)) {
+ spin_lock(&rdev->priv->lock);
rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, false);
rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, false);
+ spin_unlock(&rdev->priv->lock);
__napi_schedule(&rdev->napi);
}
}
@@ -1440,14 +1445,17 @@ static void rswitch_ether_port_deinit_all(struct rswitch_private *priv)
static int rswitch_open(struct net_device *ndev)
{
struct rswitch_device *rdev = netdev_priv(ndev);
+ unsigned long flags;
phy_start(ndev->phydev);
napi_enable(&rdev->napi);
netif_start_queue(ndev);
+ spin_lock_irqsave(&rdev->priv->lock, flags);
rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, true);
rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, true);
+ spin_unlock_irqrestore(&rdev->priv->lock, flags);
if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS))
iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDIE);
@@ -1461,6 +1469,7 @@ static int rswitch_stop(struct net_device *ndev)
{
struct rswitch_device *rdev = netdev_priv(ndev);
struct rswitch_gwca_ts_info *ts_info, *ts_info2;
+ unsigned long flags;
netif_tx_stop_all_queues(ndev);
bitmap_clear(rdev->priv->opened_ports, rdev->port, 1);
@@ -1476,8 +1485,10 @@ static int rswitch_stop(struct net_device *ndev)
kfree(ts_info);
}
+ spin_lock_irqsave(&rdev->priv->lock, flags);
rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, false);
rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, false);
+ spin_unlock_irqrestore(&rdev->priv->lock, flags);
phy_stop(ndev->phydev);
napi_disable(&rdev->napi);
@@ -1887,6 +1898,7 @@ static int renesas_eth_sw_probe(struct platform_device *pdev)
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
+ spin_lock_init(&priv->lock);
attr = soc_device_match(rswitch_soc_no_speed_change);
if (attr)
diff --git a/drivers/net/ethernet/renesas/rswitch.h b/drivers/net/ethernet/renesas/rswitch.h
index 54f397effbc6..f0c16a37ea55 100644
--- a/drivers/net/ethernet/renesas/rswitch.h
+++ b/drivers/net/ethernet/renesas/rswitch.h
@@ -1011,6 +1011,8 @@ struct rswitch_private {
struct rswitch_etha etha[RSWITCH_NUM_PORTS];
struct rswitch_mfwd mfwd;
+ spinlock_t lock; /* lock interrupt registers' control */
+
bool etha_no_runtime_change;
bool gwca_halt;
};
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 9c6f4f83f22b..0deefd1573cf 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -1446,6 +1446,8 @@ static int veth_open(struct net_device *dev)
netif_carrier_on(peer);
}
+ veth_set_xdp_features(dev);
+
return 0;
}
diff --git a/drivers/ntb/hw/amd/ntb_hw_amd.c b/drivers/ntb/hw/amd/ntb_hw_amd.c
index 4940b6301d83..d687e8c2cc78 100644
--- a/drivers/ntb/hw/amd/ntb_hw_amd.c
+++ b/drivers/ntb/hw/amd/ntb_hw_amd.c
@@ -941,13 +941,10 @@ static void ndev_init_debugfs(struct amd_ntb_dev *ndev)
ndev->debugfs_dir =
debugfs_create_dir(pci_name(ndev->ntb.pdev),
debugfs_dir);
- if (IS_ERR(ndev->debugfs_dir))
- ndev->debugfs_info = NULL;
- else
- ndev->debugfs_info =
- debugfs_create_file("info", S_IRUSR,
- ndev->debugfs_dir, ndev,
- &amd_ntb_debugfs_info);
+ ndev->debugfs_info =
+ debugfs_create_file("info", S_IRUSR,
+ ndev->debugfs_dir, ndev,
+ &amd_ntb_debugfs_info);
}
}
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index 2abd2235bbca..f9e7847a378e 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -909,7 +909,7 @@ static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw,
return 0;
}
-static void ntb_qp_link_down_reset(struct ntb_transport_qp *qp)
+static void ntb_qp_link_context_reset(struct ntb_transport_qp *qp)
{
qp->link_is_up = false;
qp->active = false;
@@ -932,6 +932,13 @@ static void ntb_qp_link_down_reset(struct ntb_transport_qp *qp)
qp->tx_async = 0;
}
+static void ntb_qp_link_down_reset(struct ntb_transport_qp *qp)
+{
+ ntb_qp_link_context_reset(qp);
+ if (qp->remote_rx_info)
+ qp->remote_rx_info->entry = qp->rx_max_entry - 1;
+}
+
static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp)
{
struct ntb_transport_ctx *nt = qp->transport;
@@ -1174,7 +1181,7 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt,
qp->ndev = nt->ndev;
qp->client_ready = false;
qp->event_handler = NULL;
- ntb_qp_link_down_reset(qp);
+ ntb_qp_link_context_reset(qp);
if (mw_num < qp_count % mw_count)
num_qps_mw = qp_count / mw_count + 1;
@@ -1894,7 +1901,7 @@ err:
static int ntb_process_tx(struct ntb_transport_qp *qp,
struct ntb_queue_entry *entry)
{
- if (qp->tx_index == qp->remote_rx_info->entry) {
+ if (!ntb_transport_tx_free_entry(qp)) {
qp->tx_ring_full++;
return -EAGAIN;
}
@@ -2276,9 +2283,13 @@ int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
struct ntb_queue_entry *entry;
int rc;
- if (!qp || !qp->link_is_up || !len)
+ if (!qp || !len)
return -EINVAL;
+ /* If the qp link is down already, just ignore. */
+ if (!qp->link_is_up)
+ return 0;
+
entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
if (!entry) {
qp->tx_err_no_buf++;
@@ -2418,7 +2429,7 @@ unsigned int ntb_transport_tx_free_entry(struct ntb_transport_qp *qp)
unsigned int head = qp->tx_index;
unsigned int tail = qp->remote_rx_info->entry;
- return tail > head ? tail - head : qp->tx_max_entry + tail - head;
+ return tail >= head ? tail - head : qp->tx_max_entry + tail - head;
}
EXPORT_SYMBOL_GPL(ntb_transport_tx_free_entry);
diff --git a/drivers/ntb/test/ntb_perf.c b/drivers/ntb/test/ntb_perf.c
index 65e1e5cf1b29..553f1f46bc66 100644
--- a/drivers/ntb/test/ntb_perf.c
+++ b/drivers/ntb/test/ntb_perf.c
@@ -1355,7 +1355,7 @@ static void perf_setup_dbgfs(struct perf_ctx *perf)
struct pci_dev *pdev = perf->ntb->pdev;
perf->dbgfs_dir = debugfs_create_dir(pci_name(pdev), perf_dbgfs_topdir);
- if (!perf->dbgfs_dir) {
+ if (IS_ERR(perf->dbgfs_dir)) {
dev_warn(&perf->ntb->dev, "DebugFS unsupported\n");
return;
}
diff --git a/drivers/ntb/test/ntb_tool.c b/drivers/ntb/test/ntb_tool.c
index eeeb4b1c97d2..641cb7e05a47 100644
--- a/drivers/ntb/test/ntb_tool.c
+++ b/drivers/ntb/test/ntb_tool.c
@@ -370,16 +370,9 @@ static ssize_t tool_fn_write(struct tool_ctx *tc,
if (*offp)
return 0;
- buf = kmalloc(size + 1, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- if (copy_from_user(buf, ubuf, size)) {
- kfree(buf);
- return -EFAULT;
- }
-
- buf[size] = 0;
+ buf = memdup_user_nul(ubuf, size);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
n = sscanf(buf, "%c %lli", &cmd, &bits);
@@ -1495,8 +1488,6 @@ static void tool_setup_dbgfs(struct tool_ctx *tc)
tc->dbgfs_dir = debugfs_create_dir(dev_name(&tc->ntb->dev),
tool_dbgfs_topdir);
- if (!tc->dbgfs_dir)
- return;
debugfs_create_file("port", 0600, tc->dbgfs_dir,
tc, &tool_port_fops);
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c
index 509a4072d50a..9ce0d20a6c58 100644
--- a/drivers/parisc/ccio-dma.c
+++ b/drivers/parisc/ccio-dma.c
@@ -214,7 +214,7 @@ struct ioa_registers {
struct ioc {
struct ioa_registers __iomem *ioc_regs; /* I/O MMU base address */
u8 *res_map; /* resource map, bit == pdir entry */
- u64 *pdir_base; /* physical base address */
+ __le64 *pdir_base; /* physical base address */
u32 pdir_size; /* bytes, function of IOV Space size */
u32 res_hint; /* next available IOVP -
circular search */
@@ -339,7 +339,7 @@ ccio_alloc_range(struct ioc *ioc, struct device *dev, size_t size)
BUG_ON(pages_needed == 0);
BUG_ON((pages_needed * IOVP_SIZE) > DMA_CHUNK_SIZE);
- DBG_RES("%s() size: %d pages_needed %d\n",
+ DBG_RES("%s() size: %zu pages_needed %d\n",
__func__, size, pages_needed);
/*
@@ -427,7 +427,7 @@ ccio_free_range(struct ioc *ioc, dma_addr_t iova, unsigned long pages_mapped)
BUG_ON((pages_mapped * IOVP_SIZE) > DMA_CHUNK_SIZE);
BUG_ON(pages_mapped > BITS_PER_LONG);
- DBG_RES("%s(): res_idx: %d pages_mapped %d\n",
+ DBG_RES("%s(): res_idx: %d pages_mapped %lu\n",
__func__, res_idx, pages_mapped);
#ifdef CCIO_COLLECT_STATS
@@ -543,7 +543,7 @@ static u32 hint_lookup[] = {
* index are bits 12:19 of the value returned by LCI.
*/
static void
-ccio_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba,
+ccio_io_pdir_entry(__le64 *pdir_ptr, space_t sid, unsigned long vba,
unsigned long hints)
{
register unsigned long pa;
@@ -719,7 +719,7 @@ ccio_map_single(struct device *dev, void *addr, size_t size,
unsigned long flags;
dma_addr_t iovp;
dma_addr_t offset;
- u64 *pdir_start;
+ __le64 *pdir_start;
unsigned long hint = hint_lookup[(int)direction];
BUG_ON(!dev);
@@ -746,8 +746,8 @@ ccio_map_single(struct device *dev, void *addr, size_t size,
pdir_start = &(ioc->pdir_base[idx]);
- DBG_RUN("%s() 0x%p -> 0x%lx size: %0x%x\n",
- __func__, addr, (long)iovp | offset, size);
+ DBG_RUN("%s() %px -> %#lx size: %zu\n",
+ __func__, addr, (long)(iovp | offset), size);
/* If not cacheline aligned, force SAFE_DMA on the whole mess */
if((size % L1_CACHE_BYTES) || ((unsigned long)addr % L1_CACHE_BYTES))
@@ -805,7 +805,7 @@ ccio_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
return;
}
- DBG_RUN("%s() iovp 0x%lx/%x\n",
+ DBG_RUN("%s() iovp %#lx/%zx\n",
__func__, (long)iova, size);
iova ^= offset; /* clear offset bits */
@@ -1283,7 +1283,7 @@ ccio_ioc_init(struct ioc *ioc)
iova_space_size>>20,
iov_order + PAGE_SHIFT);
- ioc->pdir_base = (u64 *)__get_free_pages(GFP_KERNEL,
+ ioc->pdir_base = (__le64 *)__get_free_pages(GFP_KERNEL,
get_order(ioc->pdir_size));
if(NULL == ioc->pdir_base) {
panic("%s() could not allocate I/O Page Table\n", __func__);
diff --git a/drivers/parisc/iommu-helpers.h b/drivers/parisc/iommu-helpers.h
index 0905be256de0..c43f1a212a5c 100644
--- a/drivers/parisc/iommu-helpers.h
+++ b/drivers/parisc/iommu-helpers.h
@@ -14,13 +14,13 @@
static inline unsigned int
iommu_fill_pdir(struct ioc *ioc, struct scatterlist *startsg, int nents,
unsigned long hint,
- void (*iommu_io_pdir_entry)(u64 *, space_t, unsigned long,
+ void (*iommu_io_pdir_entry)(__le64 *, space_t, unsigned long,
unsigned long))
{
struct scatterlist *dma_sg = startsg; /* pointer to current DMA */
unsigned int n_mappings = 0;
unsigned long dma_offset = 0, dma_len = 0;
- u64 *pdirp = NULL;
+ __le64 *pdirp = NULL;
/* Horrible hack. For efficiency's sake, dma_sg starts one
* entry below the true start (it is immediately incremented
@@ -31,8 +31,8 @@ iommu_fill_pdir(struct ioc *ioc, struct scatterlist *startsg, int nents,
unsigned long vaddr;
long size;
- DBG_RUN_SG(" %d : %08lx/%05x %p/%05x\n", nents,
- (unsigned long)sg_dma_address(startsg), cnt,
+ DBG_RUN_SG(" %d : %08lx %p/%05x\n", nents,
+ (unsigned long)sg_dma_address(startsg),
sg_virt(startsg), startsg->length
);
diff --git a/drivers/parisc/iosapic.c b/drivers/parisc/iosapic.c
index a7df764f1a72..a4011461189b 100644
--- a/drivers/parisc/iosapic.c
+++ b/drivers/parisc/iosapic.c
@@ -202,9 +202,9 @@ static inline void iosapic_write(void __iomem *iosapic, unsigned int reg, u32 va
static DEFINE_SPINLOCK(iosapic_lock);
-static inline void iosapic_eoi(void __iomem *addr, unsigned int data)
+static inline void iosapic_eoi(__le32 __iomem *addr, __le32 data)
{
- __raw_writel(data, addr);
+ __raw_writel((__force u32)data, addr);
}
/*
diff --git a/drivers/parisc/iosapic_private.h b/drivers/parisc/iosapic_private.h
index 73ecc657ad95..bd8ff40162b4 100644
--- a/drivers/parisc/iosapic_private.h
+++ b/drivers/parisc/iosapic_private.h
@@ -118,8 +118,8 @@ struct iosapic_irt {
struct vector_info {
struct iosapic_info *iosapic; /* I/O SAPIC this vector is on */
struct irt_entry *irte; /* IRT entry */
- u32 __iomem *eoi_addr; /* precalculate EOI reg address */
- u32 eoi_data; /* IA64: ? PA: swapped txn_data */
+ __le32 __iomem *eoi_addr; /* precalculate EOI reg address */
+ __le32 eoi_data; /* IA64: ? PA: swapped txn_data */
int txn_irq; /* virtual IRQ number for processor */
ulong txn_addr; /* IA64: id_eid PA: partial HPA */
u32 txn_data; /* CPU interrupt bit */
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
index f6b510675318..05e7103d1d40 100644
--- a/drivers/parisc/sba_iommu.c
+++ b/drivers/parisc/sba_iommu.c
@@ -46,8 +46,6 @@
#include <linux/module.h>
#include <asm/ropes.h>
-#include <asm/mckinley.h> /* for proc_mckinley_root */
-#include <asm/runway.h> /* for proc_runway_root */
#include <asm/page.h> /* for PAGE0 */
#include <asm/pdc.h> /* for PDC_MODEL_* */
#include <asm/pdcpat.h> /* for is_pdc_pat() */
@@ -122,7 +120,7 @@ MODULE_PARM_DESC(sba_reserve_agpgart, "Reserve half of IO pdir as AGPGART");
#endif
static struct proc_dir_entry *proc_runway_root __ro_after_init;
-struct proc_dir_entry *proc_mckinley_root __ro_after_init;
+static struct proc_dir_entry *proc_mckinley_root __ro_after_init;
/************************************
** SBA register read and write support
@@ -204,7 +202,7 @@ static void
sba_dump_pdir_entry(struct ioc *ioc, char *msg, uint pide)
{
/* start printing from lowest pde in rval */
- u64 *ptr = &(ioc->pdir_base[pide & (~0U * BITS_PER_LONG)]);
+ __le64 *ptr = &(ioc->pdir_base[pide & (~0U * BITS_PER_LONG)]);
unsigned long *rptr = (unsigned long *) &(ioc->res_map[(pide >>3) & ~(sizeof(unsigned long) - 1)]);
uint rcnt;
@@ -571,7 +569,7 @@ typedef unsigned long space_t;
*/
static void
-sba_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba,
+sba_io_pdir_entry(__le64 *pdir_ptr, space_t sid, unsigned long vba,
unsigned long hint)
{
u64 pa; /* physical address */
@@ -615,7 +613,7 @@ static void
sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
{
u32 iovp = (u32) SBA_IOVP(ioc,iova);
- u64 *pdir_ptr = &ioc->pdir_base[PDIR_INDEX(iovp)];
+ __le64 *pdir_ptr = &ioc->pdir_base[PDIR_INDEX(iovp)];
#ifdef ASSERT_PDIR_SANITY
/* Assert first pdir entry is set.
@@ -716,7 +714,7 @@ sba_map_single(struct device *dev, void *addr, size_t size,
unsigned long flags;
dma_addr_t iovp;
dma_addr_t offset;
- u64 *pdir_start;
+ __le64 *pdir_start;
int pide;
ioc = GET_IOC(dev);
@@ -1434,7 +1432,7 @@ sba_ioc_init(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
ioc->pdir_size = pdir_size = (iova_space_size/IOVP_SIZE) * sizeof(u64);
- DBG_INIT("%s() hpa 0x%lx mem %ldMB IOV %dMB (%d bits)\n",
+ DBG_INIT("%s() hpa %px mem %ldMB IOV %dMB (%d bits)\n",
__func__,
ioc->ioc_hpa,
(unsigned long) totalram_pages() >> (20 - PAGE_SHIFT),
@@ -1471,7 +1469,7 @@ sba_ioc_init(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
ioc->iovp_mask = ~(iova_space_mask + PAGE_SIZE - 1);
#endif
- DBG_INIT("%s() IOV base 0x%lx mask 0x%0lx\n",
+ DBG_INIT("%s() IOV base %#lx mask %#0lx\n",
__func__, ioc->ibase, ioc->imask);
/*
@@ -1583,7 +1581,7 @@ printk("sba_hw_init(): mem_boot 0x%x 0x%x 0x%x 0x%x\n", PAGE0->mem_boot.hpa,
if (!IS_PLUTO(sba_dev->dev)) {
ioc_ctl = READ_REG(sba_dev->sba_hpa+IOC_CTRL);
- DBG_INIT("%s() hpa 0x%lx ioc_ctl 0x%Lx ->",
+ DBG_INIT("%s() hpa %px ioc_ctl 0x%Lx ->",
__func__, sba_dev->sba_hpa, ioc_ctl);
ioc_ctl &= ~(IOC_CTRL_RM | IOC_CTRL_NC | IOC_CTRL_CE);
ioc_ctl |= IOC_CTRL_DD | IOC_CTRL_D4 | IOC_CTRL_TC;
@@ -1668,14 +1666,14 @@ printk("sba_hw_init(): mem_boot 0x%x 0x%x 0x%x 0x%x\n", PAGE0->mem_boot.hpa,
/* flush out the last writes */
READ_REG(sba_dev->ioc[i].ioc_hpa + ROPE7_CTL);
- DBG_INIT(" ioc[%d] ROPE_CFG 0x%Lx ROPE_DBG 0x%Lx\n",
+ DBG_INIT(" ioc[%d] ROPE_CFG %#lx ROPE_DBG %lx\n",
i,
- READ_REG(sba_dev->ioc[i].ioc_hpa + 0x40),
- READ_REG(sba_dev->ioc[i].ioc_hpa + 0x50)
+ (unsigned long) READ_REG(sba_dev->ioc[i].ioc_hpa + 0x40),
+ (unsigned long) READ_REG(sba_dev->ioc[i].ioc_hpa + 0x50)
);
- DBG_INIT(" STATUS_CONTROL 0x%Lx FLUSH_CTRL 0x%Lx\n",
- READ_REG(sba_dev->ioc[i].ioc_hpa + 0x108),
- READ_REG(sba_dev->ioc[i].ioc_hpa + 0x400)
+ DBG_INIT(" STATUS_CONTROL %#lx FLUSH_CTRL %#lx\n",
+ (unsigned long) READ_REG(sba_dev->ioc[i].ioc_hpa + 0x108),
+ (unsigned long) READ_REG(sba_dev->ioc[i].ioc_hpa + 0x400)
);
if (IS_PLUTO(sba_dev->dev)) {
@@ -1739,7 +1737,7 @@ sba_common_init(struct sba_device *sba_dev)
#ifdef ASSERT_PDIR_SANITY
/* Mark first bit busy - ie no IOVA 0 */
sba_dev->ioc[i].res_map[0] = 0x80;
- sba_dev->ioc[i].pdir_base[0] = 0xeeffc0addbba0080ULL;
+ sba_dev->ioc[i].pdir_base[0] = (__force __le64) 0xeeffc0addbba0080ULL;
#endif
/* Third (and last) part of PIRANHA BUG */
@@ -1899,9 +1897,7 @@ static int __init sba_driver_callback(struct parisc_device *dev)
int i;
char *version;
void __iomem *sba_addr = ioremap(dev->hpa.start, SBA_FUNC_SIZE);
-#ifdef CONFIG_PROC_FS
- struct proc_dir_entry *root;
-#endif
+ struct proc_dir_entry *root __maybe_unused;
sba_dump_ranges(sba_addr);
@@ -1967,7 +1963,6 @@ static int __init sba_driver_callback(struct parisc_device *dev)
hppa_dma_ops = &sba_ops;
-#ifdef CONFIG_PROC_FS
switch (dev->id.hversion) {
case PLUTO_MCKINLEY_PORT:
if (!proc_mckinley_root)
@@ -1985,7 +1980,6 @@ static int __init sba_driver_callback(struct parisc_device *dev)
proc_create_single("sba_iommu", 0, root, sba_proc_info);
proc_create_single("sba_iommu-bitmap", 0, root, sba_proc_bitmap_info);
-#endif
return 0;
}
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 49bd09c7dd0a..e9ae66cc4189 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -196,7 +196,7 @@ config PCI_HYPERV
config PCI_DYNAMIC_OF_NODES
bool "Create Device tree nodes for PCI devices"
- depends on OF
+ depends on OF_IRQ
select OF_DYNAMIC
help
This option enables support for generating device tree nodes for some
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index ab2a4a3a4c06..795534589b98 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -997,6 +997,7 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge)
res = window->res;
if (!res->flags && !res->start && !res->end) {
release_resource(res);
+ resource_list_destroy_entry(window);
continue;
}
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 5de09d2eb014..eeec1d6f9023 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -3726,7 +3726,7 @@ static void quirk_no_bus_reset(struct pci_dev *dev)
*/
static void quirk_nvidia_no_bus_reset(struct pci_dev *dev)
{
- if ((dev->device & 0xffc0) == 0x2340 || dev->device == 0x1eb8)
+ if ((dev->device & 0xffc0) == 0x2340)
quirk_no_bus_reset(dev);
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
diff --git a/drivers/perf/cxl_pmu.c b/drivers/perf/cxl_pmu.c
index 0a8f597e695b..365d964b0f6a 100644
--- a/drivers/perf/cxl_pmu.c
+++ b/drivers/perf/cxl_pmu.c
@@ -25,7 +25,7 @@
#include "../cxl/pmu.h"
#define CXL_PMU_CAP_REG 0x0
-#define CXL_PMU_CAP_NUM_COUNTERS_MSK GENMASK_ULL(4, 0)
+#define CXL_PMU_CAP_NUM_COUNTERS_MSK GENMASK_ULL(5, 0)
#define CXL_PMU_CAP_COUNTER_WIDTH_MSK GENMASK_ULL(15, 8)
#define CXL_PMU_CAP_NUM_EVN_CAP_REG_SUP_MSK GENMASK_ULL(24, 20)
#define CXL_PMU_CAP_FILTERS_SUP_MSK GENMASK_ULL(39, 32)
diff --git a/drivers/platform/mellanox/Kconfig b/drivers/platform/mellanox/Kconfig
index 382793e73a60..f7dfa0e785fd 100644
--- a/drivers/platform/mellanox/Kconfig
+++ b/drivers/platform/mellanox/Kconfig
@@ -60,6 +60,7 @@ config MLXBF_BOOTCTL
tristate "Mellanox BlueField Firmware Boot Control driver"
depends on ARM64
depends on ACPI
+ depends on NET
help
The Mellanox BlueField firmware implements functionality to
request swapping the primary and alternate eMMC boot partition,
@@ -80,8 +81,8 @@ config MLXBF_PMC
config NVSW_SN2201
tristate "Nvidia SN2201 platform driver support"
- depends on HWMON
- depends on I2C
+ depends on HWMON && I2C
+ depends on ACPI || COMPILE_TEST
select REGMAP_I2C
help
This driver provides support for the Nvidia SN2201 platform.
diff --git a/drivers/platform/mellanox/mlxbf-pmc.c b/drivers/platform/mellanox/mlxbf-pmc.c
index be967d797c28..2d4bbe99959e 100644
--- a/drivers/platform/mellanox/mlxbf-pmc.c
+++ b/drivers/platform/mellanox/mlxbf-pmc.c
@@ -191,6 +191,7 @@ static const struct mlxbf_pmc_events mlxbf_pmc_smgen_events[] = {
};
static const struct mlxbf_pmc_events mlxbf_pmc_trio_events_1[] = {
+ { 0x0, "DISABLE" },
{ 0xa0, "TPIO_DATA_BEAT" },
{ 0xa1, "TDMA_DATA_BEAT" },
{ 0xa2, "MAP_DATA_BEAT" },
@@ -214,6 +215,7 @@ static const struct mlxbf_pmc_events mlxbf_pmc_trio_events_1[] = {
};
static const struct mlxbf_pmc_events mlxbf_pmc_trio_events_2[] = {
+ { 0x0, "DISABLE" },
{ 0xa0, "TPIO_DATA_BEAT" },
{ 0xa1, "TDMA_DATA_BEAT" },
{ 0xa2, "MAP_DATA_BEAT" },
@@ -246,6 +248,7 @@ static const struct mlxbf_pmc_events mlxbf_pmc_trio_events_2[] = {
};
static const struct mlxbf_pmc_events mlxbf_pmc_ecc_events[] = {
+ { 0x0, "DISABLE" },
{ 0x100, "ECC_SINGLE_ERROR_CNT" },
{ 0x104, "ECC_DOUBLE_ERROR_CNT" },
{ 0x114, "SERR_INJ" },
@@ -258,6 +261,7 @@ static const struct mlxbf_pmc_events mlxbf_pmc_ecc_events[] = {
};
static const struct mlxbf_pmc_events mlxbf_pmc_mss_events[] = {
+ { 0x0, "DISABLE" },
{ 0xc0, "RXREQ_MSS" },
{ 0xc1, "RXDAT_MSS" },
{ 0xc2, "TXRSP_MSS" },
@@ -265,6 +269,7 @@ static const struct mlxbf_pmc_events mlxbf_pmc_mss_events[] = {
};
static const struct mlxbf_pmc_events mlxbf_pmc_hnf_events[] = {
+ { 0x0, "DISABLE" },
{ 0x45, "HNF_REQUESTS" },
{ 0x46, "HNF_REJECTS" },
{ 0x47, "ALL_BUSY" },
@@ -323,6 +328,7 @@ static const struct mlxbf_pmc_events mlxbf_pmc_hnf_events[] = {
};
static const struct mlxbf_pmc_events mlxbf_pmc_hnfnet_events[] = {
+ { 0x0, "DISABLE" },
{ 0x12, "CDN_REQ" },
{ 0x13, "DDN_REQ" },
{ 0x14, "NDN_REQ" },
@@ -892,7 +898,7 @@ static int mlxbf_pmc_read_event(int blk_num, uint32_t cnt_num, bool is_l3,
uint64_t *result)
{
uint32_t perfcfg_offset, perfval_offset;
- uint64_t perfmon_cfg, perfevt, perfctl;
+ uint64_t perfmon_cfg, perfevt;
if (cnt_num >= pmc->block[blk_num].counters)
return -EINVAL;
@@ -906,25 +912,6 @@ static int mlxbf_pmc_read_event(int blk_num, uint32_t cnt_num, bool is_l3,
/* Set counter in "read" mode */
perfmon_cfg = FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_ADDR,
- MLXBF_PMC_PERFCTL);
- perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_STROBE, 1);
- perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_WR_R_B, 0);
-
- if (mlxbf_pmc_write(pmc->block[blk_num].mmio_base + perfcfg_offset,
- MLXBF_PMC_WRITE_REG_64, perfmon_cfg))
- return -EFAULT;
-
- /* Check if the counter is enabled */
-
- if (mlxbf_pmc_read(pmc->block[blk_num].mmio_base + perfval_offset,
- MLXBF_PMC_READ_REG_64, &perfctl))
- return -EFAULT;
-
- if (!FIELD_GET(MLXBF_PMC_PERFCTL_EN0, perfctl))
- return -EINVAL;
-
- /* Set counter in "read" mode */
- perfmon_cfg = FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_ADDR,
MLXBF_PMC_PERFEVT);
perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_STROBE, 1);
perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_WR_R_B, 0);
@@ -1008,7 +995,7 @@ static ssize_t mlxbf_pmc_counter_show(struct device *dev,
} else
return -EINVAL;
- return sprintf(buf, "0x%llx\n", value);
+ return sysfs_emit(buf, "0x%llx\n", value);
}
/* Store function for "counter" sysfs files */
@@ -1078,13 +1065,13 @@ static ssize_t mlxbf_pmc_event_show(struct device *dev,
err = mlxbf_pmc_read_event(blk_num, cnt_num, is_l3, &evt_num);
if (err)
- return sprintf(buf, "No event being monitored\n");
+ return sysfs_emit(buf, "No event being monitored\n");
evt_name = mlxbf_pmc_get_event_name(pmc->block_name[blk_num], evt_num);
if (!evt_name)
return -EINVAL;
- return sprintf(buf, "0x%llx: %s\n", evt_num, evt_name);
+ return sysfs_emit(buf, "0x%llx: %s\n", evt_num, evt_name);
}
/* Store function for "event" sysfs files */
@@ -1139,9 +1126,9 @@ static ssize_t mlxbf_pmc_event_list_show(struct device *dev,
return -EINVAL;
for (i = 0, buf[0] = '\0'; i < size; ++i) {
- len += sprintf(e_info, "0x%x: %s\n", events[i].evt_num,
- events[i].evt_name);
- if (len > PAGE_SIZE)
+ len += snprintf(e_info, sizeof(e_info), "0x%x: %s\n",
+ events[i].evt_num, events[i].evt_name);
+ if (len >= PAGE_SIZE)
break;
strcat(buf, e_info);
ret = len;
@@ -1168,7 +1155,7 @@ static ssize_t mlxbf_pmc_enable_show(struct device *dev,
value = FIELD_GET(MLXBF_PMC_L3C_PERF_CNT_CFG_EN, perfcnt_cfg);
- return sprintf(buf, "%d\n", value);
+ return sysfs_emit(buf, "%d\n", value);
}
/* Store function for "enable" sysfs files - only for l3cache */
diff --git a/drivers/platform/mellanox/mlxbf-tmfifo.c b/drivers/platform/mellanox/mlxbf-tmfifo.c
index b600b77d91ef..f3696a54a2bd 100644
--- a/drivers/platform/mellanox/mlxbf-tmfifo.c
+++ b/drivers/platform/mellanox/mlxbf-tmfifo.c
@@ -59,6 +59,7 @@ struct mlxbf_tmfifo;
* @vq: pointer to the virtio virtqueue
* @desc: current descriptor of the pending packet
* @desc_head: head descriptor of the pending packet
+ * @drop_desc: dummy desc for packet dropping
* @cur_len: processed length of the current descriptor
* @rem_len: remaining length of the pending packet
* @pkt_len: total length of the pending packet
@@ -75,6 +76,7 @@ struct mlxbf_tmfifo_vring {
struct virtqueue *vq;
struct vring_desc *desc;
struct vring_desc *desc_head;
+ struct vring_desc drop_desc;
int cur_len;
int rem_len;
u32 pkt_len;
@@ -86,6 +88,14 @@ struct mlxbf_tmfifo_vring {
struct mlxbf_tmfifo *fifo;
};
+/* Check whether vring is in drop mode. */
+#define IS_VRING_DROP(_r) ({ \
+ typeof(_r) (r) = (_r); \
+ (r->desc_head == &r->drop_desc ? true : false); })
+
+/* A stub length to drop maximum length packet. */
+#define VRING_DROP_DESC_MAX_LEN GENMASK(15, 0)
+
/* Interrupt types. */
enum {
MLXBF_TM_RX_LWM_IRQ,
@@ -214,7 +224,7 @@ static u8 mlxbf_tmfifo_net_default_mac[ETH_ALEN] = {
static efi_char16_t mlxbf_tmfifo_efi_name[] = L"RshimMacAddr";
/* Maximum L2 header length. */
-#define MLXBF_TMFIFO_NET_L2_OVERHEAD 36
+#define MLXBF_TMFIFO_NET_L2_OVERHEAD (ETH_HLEN + VLAN_HLEN)
/* Supported virtio-net features. */
#define MLXBF_TMFIFO_NET_FEATURES \
@@ -262,6 +272,7 @@ static int mlxbf_tmfifo_alloc_vrings(struct mlxbf_tmfifo *fifo,
vring->align = SMP_CACHE_BYTES;
vring->index = i;
vring->vdev_id = tm_vdev->vdev.id.device;
+ vring->drop_desc.len = VRING_DROP_DESC_MAX_LEN;
dev = &tm_vdev->vdev.dev;
size = vring_size(vring->num, vring->align);
@@ -367,7 +378,7 @@ static u32 mlxbf_tmfifo_get_pkt_len(struct mlxbf_tmfifo_vring *vring,
return len;
}
-static void mlxbf_tmfifo_release_pending_pkt(struct mlxbf_tmfifo_vring *vring)
+static void mlxbf_tmfifo_release_pkt(struct mlxbf_tmfifo_vring *vring)
{
struct vring_desc *desc_head;
u32 len = 0;
@@ -596,19 +607,25 @@ static void mlxbf_tmfifo_rxtx_word(struct mlxbf_tmfifo_vring *vring,
if (vring->cur_len + sizeof(u64) <= len) {
/* The whole word. */
- if (is_rx)
- memcpy(addr + vring->cur_len, &data, sizeof(u64));
- else
- memcpy(&data, addr + vring->cur_len, sizeof(u64));
+ if (!IS_VRING_DROP(vring)) {
+ if (is_rx)
+ memcpy(addr + vring->cur_len, &data,
+ sizeof(u64));
+ else
+ memcpy(&data, addr + vring->cur_len,
+ sizeof(u64));
+ }
vring->cur_len += sizeof(u64);
} else {
/* Leftover bytes. */
- if (is_rx)
- memcpy(addr + vring->cur_len, &data,
- len - vring->cur_len);
- else
- memcpy(&data, addr + vring->cur_len,
- len - vring->cur_len);
+ if (!IS_VRING_DROP(vring)) {
+ if (is_rx)
+ memcpy(addr + vring->cur_len, &data,
+ len - vring->cur_len);
+ else
+ memcpy(&data, addr + vring->cur_len,
+ len - vring->cur_len);
+ }
vring->cur_len = len;
}
@@ -625,13 +642,14 @@ static void mlxbf_tmfifo_rxtx_word(struct mlxbf_tmfifo_vring *vring,
* flag is set.
*/
static void mlxbf_tmfifo_rxtx_header(struct mlxbf_tmfifo_vring *vring,
- struct vring_desc *desc,
+ struct vring_desc **desc,
bool is_rx, bool *vring_change)
{
struct mlxbf_tmfifo *fifo = vring->fifo;
struct virtio_net_config *config;
struct mlxbf_tmfifo_msg_hdr hdr;
int vdev_id, hdr_len;
+ bool drop_rx = false;
/* Read/Write packet header. */
if (is_rx) {
@@ -651,8 +669,8 @@ static void mlxbf_tmfifo_rxtx_header(struct mlxbf_tmfifo_vring *vring,
if (ntohs(hdr.len) >
__virtio16_to_cpu(virtio_legacy_is_little_endian(),
config->mtu) +
- MLXBF_TMFIFO_NET_L2_OVERHEAD)
- return;
+ MLXBF_TMFIFO_NET_L2_OVERHEAD)
+ drop_rx = true;
} else {
vdev_id = VIRTIO_ID_CONSOLE;
hdr_len = 0;
@@ -667,16 +685,25 @@ static void mlxbf_tmfifo_rxtx_header(struct mlxbf_tmfifo_vring *vring,
if (!tm_dev2)
return;
- vring->desc = desc;
+ vring->desc = *desc;
vring = &tm_dev2->vrings[MLXBF_TMFIFO_VRING_RX];
*vring_change = true;
}
+
+ if (drop_rx && !IS_VRING_DROP(vring)) {
+ if (vring->desc_head)
+ mlxbf_tmfifo_release_pkt(vring);
+ *desc = &vring->drop_desc;
+ vring->desc_head = *desc;
+ vring->desc = *desc;
+ }
+
vring->pkt_len = ntohs(hdr.len) + hdr_len;
} else {
/* Network virtio has an extra header. */
hdr_len = (vring->vdev_id == VIRTIO_ID_NET) ?
sizeof(struct virtio_net_hdr) : 0;
- vring->pkt_len = mlxbf_tmfifo_get_pkt_len(vring, desc);
+ vring->pkt_len = mlxbf_tmfifo_get_pkt_len(vring, *desc);
hdr.type = (vring->vdev_id == VIRTIO_ID_NET) ?
VIRTIO_ID_NET : VIRTIO_ID_CONSOLE;
hdr.len = htons(vring->pkt_len - hdr_len);
@@ -709,15 +736,23 @@ static bool mlxbf_tmfifo_rxtx_one_desc(struct mlxbf_tmfifo_vring *vring,
/* Get the descriptor of the next packet. */
if (!vring->desc) {
desc = mlxbf_tmfifo_get_next_pkt(vring, is_rx);
- if (!desc)
- return false;
+ if (!desc) {
+ /* Drop next Rx packet to avoid stuck. */
+ if (is_rx) {
+ desc = &vring->drop_desc;
+ vring->desc_head = desc;
+ vring->desc = desc;
+ } else {
+ return false;
+ }
+ }
} else {
desc = vring->desc;
}
/* Beginning of a packet. Start to Rx/Tx packet header. */
if (vring->pkt_len == 0) {
- mlxbf_tmfifo_rxtx_header(vring, desc, is_rx, &vring_change);
+ mlxbf_tmfifo_rxtx_header(vring, &desc, is_rx, &vring_change);
(*avail)--;
/* Return if new packet is for another ring. */
@@ -743,17 +778,24 @@ static bool mlxbf_tmfifo_rxtx_one_desc(struct mlxbf_tmfifo_vring *vring,
vring->rem_len -= len;
/* Get the next desc on the chain. */
- if (vring->rem_len > 0 &&
+ if (!IS_VRING_DROP(vring) && vring->rem_len > 0 &&
(virtio16_to_cpu(vdev, desc->flags) & VRING_DESC_F_NEXT)) {
idx = virtio16_to_cpu(vdev, desc->next);
desc = &vr->desc[idx];
goto mlxbf_tmfifo_desc_done;
}
- /* Done and release the pending packet. */
- mlxbf_tmfifo_release_pending_pkt(vring);
+ /* Done and release the packet. */
desc = NULL;
fifo->vring[is_rx] = NULL;
+ if (!IS_VRING_DROP(vring)) {
+ mlxbf_tmfifo_release_pkt(vring);
+ } else {
+ vring->pkt_len = 0;
+ vring->desc_head = NULL;
+ vring->desc = NULL;
+ return false;
+ }
/*
* Make sure the load/store are in order before
@@ -933,7 +975,7 @@ static void mlxbf_tmfifo_virtio_del_vqs(struct virtio_device *vdev)
/* Release the pending packet. */
if (vring->desc)
- mlxbf_tmfifo_release_pending_pkt(vring);
+ mlxbf_tmfifo_release_pkt(vring);
vq = vring->vq;
if (vq) {
vring->vq = NULL;
diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
index fdf7da06af30..d85d895fee89 100644
--- a/drivers/platform/x86/asus-nb-wmi.c
+++ b/drivers/platform/x86/asus-nb-wmi.c
@@ -480,6 +480,15 @@ static const struct dmi_system_id asus_quirks[] = {
},
{
.callback = dmi_matched,
+ .ident = "ASUS ROG FLOW X16",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "GV601V"),
+ },
+ .driver_data = &quirk_asus_tablet_mode,
+ },
+ {
+ .callback = dmi_matched,
.ident = "ASUS VivoBook E410MA",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
diff --git a/drivers/genpd/Makefile b/drivers/pmdomain/Makefile
index 666753676e5c..666753676e5c 100644
--- a/drivers/genpd/Makefile
+++ b/drivers/pmdomain/Makefile
diff --git a/drivers/genpd/actions/Makefile b/drivers/pmdomain/actions/Makefile
index 7e8aa473d12d..7e8aa473d12d 100644
--- a/drivers/genpd/actions/Makefile
+++ b/drivers/pmdomain/actions/Makefile
diff --git a/drivers/genpd/actions/owl-sps-helper.c b/drivers/pmdomain/actions/owl-sps-helper.c
index e3f36603dd53..e3f36603dd53 100644
--- a/drivers/genpd/actions/owl-sps-helper.c
+++ b/drivers/pmdomain/actions/owl-sps-helper.c
diff --git a/drivers/genpd/actions/owl-sps.c b/drivers/pmdomain/actions/owl-sps.c
index 73a9e0bb7e8e..73a9e0bb7e8e 100644
--- a/drivers/genpd/actions/owl-sps.c
+++ b/drivers/pmdomain/actions/owl-sps.c
diff --git a/drivers/genpd/amlogic/Makefile b/drivers/pmdomain/amlogic/Makefile
index 3d58abd574f9..3d58abd574f9 100644
--- a/drivers/genpd/amlogic/Makefile
+++ b/drivers/pmdomain/amlogic/Makefile
diff --git a/drivers/genpd/amlogic/meson-ee-pwrc.c b/drivers/pmdomain/amlogic/meson-ee-pwrc.c
index cfb796d40d9d..cfb796d40d9d 100644
--- a/drivers/genpd/amlogic/meson-ee-pwrc.c
+++ b/drivers/pmdomain/amlogic/meson-ee-pwrc.c
diff --git a/drivers/genpd/amlogic/meson-gx-pwrc-vpu.c b/drivers/pmdomain/amlogic/meson-gx-pwrc-vpu.c
index 33df520eab95..33df520eab95 100644
--- a/drivers/genpd/amlogic/meson-gx-pwrc-vpu.c
+++ b/drivers/pmdomain/amlogic/meson-gx-pwrc-vpu.c
diff --git a/drivers/genpd/amlogic/meson-secure-pwrc.c b/drivers/pmdomain/amlogic/meson-secure-pwrc.c
index 89c881c56cd7..89c881c56cd7 100644
--- a/drivers/genpd/amlogic/meson-secure-pwrc.c
+++ b/drivers/pmdomain/amlogic/meson-secure-pwrc.c
diff --git a/drivers/genpd/apple/Makefile b/drivers/pmdomain/apple/Makefile
index 53665af630be..53665af630be 100644
--- a/drivers/genpd/apple/Makefile
+++ b/drivers/pmdomain/apple/Makefile
diff --git a/drivers/genpd/apple/pmgr-pwrstate.c b/drivers/pmdomain/apple/pmgr-pwrstate.c
index d62a776c89a1..d62a776c89a1 100644
--- a/drivers/genpd/apple/pmgr-pwrstate.c
+++ b/drivers/pmdomain/apple/pmgr-pwrstate.c
diff --git a/drivers/genpd/bcm/Makefile b/drivers/pmdomain/bcm/Makefile
index 6bfbe4e4db13..6bfbe4e4db13 100644
--- a/drivers/genpd/bcm/Makefile
+++ b/drivers/pmdomain/bcm/Makefile
diff --git a/drivers/genpd/bcm/bcm-pmb.c b/drivers/pmdomain/bcm/bcm-pmb.c
index a72ba26ecf9d..a72ba26ecf9d 100644
--- a/drivers/genpd/bcm/bcm-pmb.c
+++ b/drivers/pmdomain/bcm/bcm-pmb.c
diff --git a/drivers/genpd/bcm/bcm2835-power.c b/drivers/pmdomain/bcm/bcm2835-power.c
index 1a179d4e011c..1a179d4e011c 100644
--- a/drivers/genpd/bcm/bcm2835-power.c
+++ b/drivers/pmdomain/bcm/bcm2835-power.c
diff --git a/drivers/genpd/bcm/bcm63xx-power.c b/drivers/pmdomain/bcm/bcm63xx-power.c
index 98b0c2430dbc..98b0c2430dbc 100644
--- a/drivers/genpd/bcm/bcm63xx-power.c
+++ b/drivers/pmdomain/bcm/bcm63xx-power.c
diff --git a/drivers/genpd/bcm/raspberrypi-power.c b/drivers/pmdomain/bcm/raspberrypi-power.c
index 06196ebfe03b..06196ebfe03b 100644
--- a/drivers/genpd/bcm/raspberrypi-power.c
+++ b/drivers/pmdomain/bcm/raspberrypi-power.c
diff --git a/drivers/genpd/imx/Makefile b/drivers/pmdomain/imx/Makefile
index 52d2629014a7..52d2629014a7 100644
--- a/drivers/genpd/imx/Makefile
+++ b/drivers/pmdomain/imx/Makefile
diff --git a/drivers/genpd/imx/gpc.c b/drivers/pmdomain/imx/gpc.c
index 90a8b2c0676f..90a8b2c0676f 100644
--- a/drivers/genpd/imx/gpc.c
+++ b/drivers/pmdomain/imx/gpc.c
diff --git a/drivers/genpd/imx/gpcv2.c b/drivers/pmdomain/imx/gpcv2.c
index fbd3d92f8cd8..fbd3d92f8cd8 100644
--- a/drivers/genpd/imx/gpcv2.c
+++ b/drivers/pmdomain/imx/gpcv2.c
diff --git a/drivers/genpd/imx/imx8m-blk-ctrl.c b/drivers/pmdomain/imx/imx8m-blk-ctrl.c
index cc5ef6e2f0a8..cc5ef6e2f0a8 100644
--- a/drivers/genpd/imx/imx8m-blk-ctrl.c
+++ b/drivers/pmdomain/imx/imx8m-blk-ctrl.c
diff --git a/drivers/genpd/imx/imx8mp-blk-ctrl.c b/drivers/pmdomain/imx/imx8mp-blk-ctrl.c
index c6ac32c1a8c1..c6ac32c1a8c1 100644
--- a/drivers/genpd/imx/imx8mp-blk-ctrl.c
+++ b/drivers/pmdomain/imx/imx8mp-blk-ctrl.c
diff --git a/drivers/genpd/imx/imx93-blk-ctrl.c b/drivers/pmdomain/imx/imx93-blk-ctrl.c
index 40bd90f8b977..40bd90f8b977 100644
--- a/drivers/genpd/imx/imx93-blk-ctrl.c
+++ b/drivers/pmdomain/imx/imx93-blk-ctrl.c
diff --git a/drivers/genpd/imx/imx93-pd.c b/drivers/pmdomain/imx/imx93-pd.c
index b9e60d136875..b9e60d136875 100644
--- a/drivers/genpd/imx/imx93-pd.c
+++ b/drivers/pmdomain/imx/imx93-pd.c
diff --git a/drivers/genpd/imx/scu-pd.c b/drivers/pmdomain/imx/scu-pd.c
index 2f693b67ddb4..2f693b67ddb4 100644
--- a/drivers/genpd/imx/scu-pd.c
+++ b/drivers/pmdomain/imx/scu-pd.c
diff --git a/drivers/genpd/mediatek/Makefile b/drivers/pmdomain/mediatek/Makefile
index 8cde09e654b3..8cde09e654b3 100644
--- a/drivers/genpd/mediatek/Makefile
+++ b/drivers/pmdomain/mediatek/Makefile
diff --git a/drivers/genpd/mediatek/mt6795-pm-domains.h b/drivers/pmdomain/mediatek/mt6795-pm-domains.h
index ef07c9dfdd9b..ef07c9dfdd9b 100644
--- a/drivers/genpd/mediatek/mt6795-pm-domains.h
+++ b/drivers/pmdomain/mediatek/mt6795-pm-domains.h
diff --git a/drivers/genpd/mediatek/mt8167-pm-domains.h b/drivers/pmdomain/mediatek/mt8167-pm-domains.h
index 4d6c32759606..4d6c32759606 100644
--- a/drivers/genpd/mediatek/mt8167-pm-domains.h
+++ b/drivers/pmdomain/mediatek/mt8167-pm-domains.h
diff --git a/drivers/genpd/mediatek/mt8173-pm-domains.h b/drivers/pmdomain/mediatek/mt8173-pm-domains.h
index 1a5dc63b7357..1a5dc63b7357 100644
--- a/drivers/genpd/mediatek/mt8173-pm-domains.h
+++ b/drivers/pmdomain/mediatek/mt8173-pm-domains.h
diff --git a/drivers/genpd/mediatek/mt8183-pm-domains.h b/drivers/pmdomain/mediatek/mt8183-pm-domains.h
index 99de67fe5de8..99de67fe5de8 100644
--- a/drivers/genpd/mediatek/mt8183-pm-domains.h
+++ b/drivers/pmdomain/mediatek/mt8183-pm-domains.h
diff --git a/drivers/genpd/mediatek/mt8186-pm-domains.h b/drivers/pmdomain/mediatek/mt8186-pm-domains.h
index fce86f79c505..fce86f79c505 100644
--- a/drivers/genpd/mediatek/mt8186-pm-domains.h
+++ b/drivers/pmdomain/mediatek/mt8186-pm-domains.h
diff --git a/drivers/genpd/mediatek/mt8188-pm-domains.h b/drivers/pmdomain/mediatek/mt8188-pm-domains.h
index 0692cb444ed0..0692cb444ed0 100644
--- a/drivers/genpd/mediatek/mt8188-pm-domains.h
+++ b/drivers/pmdomain/mediatek/mt8188-pm-domains.h
diff --git a/drivers/genpd/mediatek/mt8192-pm-domains.h b/drivers/pmdomain/mediatek/mt8192-pm-domains.h
index b97b2051920f..b97b2051920f 100644
--- a/drivers/genpd/mediatek/mt8192-pm-domains.h
+++ b/drivers/pmdomain/mediatek/mt8192-pm-domains.h
diff --git a/drivers/genpd/mediatek/mt8195-pm-domains.h b/drivers/pmdomain/mediatek/mt8195-pm-domains.h
index d7387ea1b9c9..d7387ea1b9c9 100644
--- a/drivers/genpd/mediatek/mt8195-pm-domains.h
+++ b/drivers/pmdomain/mediatek/mt8195-pm-domains.h
diff --git a/drivers/genpd/mediatek/mtk-pm-domains.c b/drivers/pmdomain/mediatek/mtk-pm-domains.c
index ee962804b830..ee962804b830 100644
--- a/drivers/genpd/mediatek/mtk-pm-domains.c
+++ b/drivers/pmdomain/mediatek/mtk-pm-domains.c
diff --git a/drivers/genpd/mediatek/mtk-pm-domains.h b/drivers/pmdomain/mediatek/mtk-pm-domains.h
index 5ec53ee073c4..5ec53ee073c4 100644
--- a/drivers/genpd/mediatek/mtk-pm-domains.h
+++ b/drivers/pmdomain/mediatek/mtk-pm-domains.h
diff --git a/drivers/genpd/mediatek/mtk-scpsys.c b/drivers/pmdomain/mediatek/mtk-scpsys.c
index b374d01fdac7..b374d01fdac7 100644
--- a/drivers/genpd/mediatek/mtk-scpsys.c
+++ b/drivers/pmdomain/mediatek/mtk-scpsys.c
diff --git a/drivers/genpd/qcom/Makefile b/drivers/pmdomain/qcom/Makefile
index 403dfc5af095..403dfc5af095 100644
--- a/drivers/genpd/qcom/Makefile
+++ b/drivers/pmdomain/qcom/Makefile
diff --git a/drivers/genpd/qcom/cpr.c b/drivers/pmdomain/qcom/cpr.c
index 94a3f0977212..94a3f0977212 100644
--- a/drivers/genpd/qcom/cpr.c
+++ b/drivers/pmdomain/qcom/cpr.c
diff --git a/drivers/genpd/qcom/rpmhpd.c b/drivers/pmdomain/qcom/rpmhpd.c
index a87e336d5e33..a87e336d5e33 100644
--- a/drivers/genpd/qcom/rpmhpd.c
+++ b/drivers/pmdomain/qcom/rpmhpd.c
diff --git a/drivers/genpd/qcom/rpmpd.c b/drivers/pmdomain/qcom/rpmpd.c
index 3135dd1dafe0..3135dd1dafe0 100644
--- a/drivers/genpd/qcom/rpmpd.c
+++ b/drivers/pmdomain/qcom/rpmpd.c
diff --git a/drivers/genpd/renesas/Makefile b/drivers/pmdomain/renesas/Makefile
index e306e396fc8c..e306e396fc8c 100644
--- a/drivers/genpd/renesas/Makefile
+++ b/drivers/pmdomain/renesas/Makefile
diff --git a/drivers/genpd/renesas/r8a7742-sysc.c b/drivers/pmdomain/renesas/r8a7742-sysc.c
index 219a675f83f4..219a675f83f4 100644
--- a/drivers/genpd/renesas/r8a7742-sysc.c
+++ b/drivers/pmdomain/renesas/r8a7742-sysc.c
diff --git a/drivers/genpd/renesas/r8a7743-sysc.c b/drivers/pmdomain/renesas/r8a7743-sysc.c
index 4e2c0ab951b3..4e2c0ab951b3 100644
--- a/drivers/genpd/renesas/r8a7743-sysc.c
+++ b/drivers/pmdomain/renesas/r8a7743-sysc.c
diff --git a/drivers/genpd/renesas/r8a7745-sysc.c b/drivers/pmdomain/renesas/r8a7745-sysc.c
index 865821a2f0c6..865821a2f0c6 100644
--- a/drivers/genpd/renesas/r8a7745-sysc.c
+++ b/drivers/pmdomain/renesas/r8a7745-sysc.c
diff --git a/drivers/genpd/renesas/r8a77470-sysc.c b/drivers/pmdomain/renesas/r8a77470-sysc.c
index 1eeb8018df50..1eeb8018df50 100644
--- a/drivers/genpd/renesas/r8a77470-sysc.c
+++ b/drivers/pmdomain/renesas/r8a77470-sysc.c
diff --git a/drivers/genpd/renesas/r8a774a1-sysc.c b/drivers/pmdomain/renesas/r8a774a1-sysc.c
index 38ac2c689ff0..38ac2c689ff0 100644
--- a/drivers/genpd/renesas/r8a774a1-sysc.c
+++ b/drivers/pmdomain/renesas/r8a774a1-sysc.c
diff --git a/drivers/genpd/renesas/r8a774b1-sysc.c b/drivers/pmdomain/renesas/r8a774b1-sysc.c
index 5f97ff26f3f8..5f97ff26f3f8 100644
--- a/drivers/genpd/renesas/r8a774b1-sysc.c
+++ b/drivers/pmdomain/renesas/r8a774b1-sysc.c
diff --git a/drivers/genpd/renesas/r8a774c0-sysc.c b/drivers/pmdomain/renesas/r8a774c0-sysc.c
index c1c216f7d073..c1c216f7d073 100644
--- a/drivers/genpd/renesas/r8a774c0-sysc.c
+++ b/drivers/pmdomain/renesas/r8a774c0-sysc.c
diff --git a/drivers/genpd/renesas/r8a774e1-sysc.c b/drivers/pmdomain/renesas/r8a774e1-sysc.c
index 18449f746455..18449f746455 100644
--- a/drivers/genpd/renesas/r8a774e1-sysc.c
+++ b/drivers/pmdomain/renesas/r8a774e1-sysc.c
diff --git a/drivers/genpd/renesas/r8a7779-sysc.c b/drivers/pmdomain/renesas/r8a7779-sysc.c
index e24a7151d55f..e24a7151d55f 100644
--- a/drivers/genpd/renesas/r8a7779-sysc.c
+++ b/drivers/pmdomain/renesas/r8a7779-sysc.c
diff --git a/drivers/genpd/renesas/r8a7790-sysc.c b/drivers/pmdomain/renesas/r8a7790-sysc.c
index b9afe7f6245b..b9afe7f6245b 100644
--- a/drivers/genpd/renesas/r8a7790-sysc.c
+++ b/drivers/pmdomain/renesas/r8a7790-sysc.c
diff --git a/drivers/genpd/renesas/r8a7791-sysc.c b/drivers/pmdomain/renesas/r8a7791-sysc.c
index f00fa24522a3..f00fa24522a3 100644
--- a/drivers/genpd/renesas/r8a7791-sysc.c
+++ b/drivers/pmdomain/renesas/r8a7791-sysc.c
diff --git a/drivers/genpd/renesas/r8a7792-sysc.c b/drivers/pmdomain/renesas/r8a7792-sysc.c
index 60aae242c43f..60aae242c43f 100644
--- a/drivers/genpd/renesas/r8a7792-sysc.c
+++ b/drivers/pmdomain/renesas/r8a7792-sysc.c
diff --git a/drivers/genpd/renesas/r8a7794-sysc.c b/drivers/pmdomain/renesas/r8a7794-sysc.c
index 72ef4e85458f..72ef4e85458f 100644
--- a/drivers/genpd/renesas/r8a7794-sysc.c
+++ b/drivers/pmdomain/renesas/r8a7794-sysc.c
diff --git a/drivers/genpd/renesas/r8a7795-sysc.c b/drivers/pmdomain/renesas/r8a7795-sysc.c
index cbe1ff0fc583..cbe1ff0fc583 100644
--- a/drivers/genpd/renesas/r8a7795-sysc.c
+++ b/drivers/pmdomain/renesas/r8a7795-sysc.c
diff --git a/drivers/genpd/renesas/r8a7796-sysc.c b/drivers/pmdomain/renesas/r8a7796-sysc.c
index 471bd5b3b6ad..471bd5b3b6ad 100644
--- a/drivers/genpd/renesas/r8a7796-sysc.c
+++ b/drivers/pmdomain/renesas/r8a7796-sysc.c
diff --git a/drivers/genpd/renesas/r8a77965-sysc.c b/drivers/pmdomain/renesas/r8a77965-sysc.c
index ff0b0d116992..ff0b0d116992 100644
--- a/drivers/genpd/renesas/r8a77965-sysc.c
+++ b/drivers/pmdomain/renesas/r8a77965-sysc.c
diff --git a/drivers/genpd/renesas/r8a77970-sysc.c b/drivers/pmdomain/renesas/r8a77970-sysc.c
index 706258250600..706258250600 100644
--- a/drivers/genpd/renesas/r8a77970-sysc.c
+++ b/drivers/pmdomain/renesas/r8a77970-sysc.c
diff --git a/drivers/genpd/renesas/r8a77980-sysc.c b/drivers/pmdomain/renesas/r8a77980-sysc.c
index 39ca84a67daa..39ca84a67daa 100644
--- a/drivers/genpd/renesas/r8a77980-sysc.c
+++ b/drivers/pmdomain/renesas/r8a77980-sysc.c
diff --git a/drivers/genpd/renesas/r8a77990-sysc.c b/drivers/pmdomain/renesas/r8a77990-sysc.c
index 9f92737dc352..9f92737dc352 100644
--- a/drivers/genpd/renesas/r8a77990-sysc.c
+++ b/drivers/pmdomain/renesas/r8a77990-sysc.c
diff --git a/drivers/genpd/renesas/r8a77995-sysc.c b/drivers/pmdomain/renesas/r8a77995-sysc.c
index efcc67e3d76d..efcc67e3d76d 100644
--- a/drivers/genpd/renesas/r8a77995-sysc.c
+++ b/drivers/pmdomain/renesas/r8a77995-sysc.c
diff --git a/drivers/genpd/renesas/r8a779a0-sysc.c b/drivers/pmdomain/renesas/r8a779a0-sysc.c
index 04f1bc322ae7..04f1bc322ae7 100644
--- a/drivers/genpd/renesas/r8a779a0-sysc.c
+++ b/drivers/pmdomain/renesas/r8a779a0-sysc.c
diff --git a/drivers/genpd/renesas/r8a779f0-sysc.c b/drivers/pmdomain/renesas/r8a779f0-sysc.c
index 5602aa6bd7ed..5602aa6bd7ed 100644
--- a/drivers/genpd/renesas/r8a779f0-sysc.c
+++ b/drivers/pmdomain/renesas/r8a779f0-sysc.c
diff --git a/drivers/genpd/renesas/r8a779g0-sysc.c b/drivers/pmdomain/renesas/r8a779g0-sysc.c
index b932eba1b804..b932eba1b804 100644
--- a/drivers/genpd/renesas/r8a779g0-sysc.c
+++ b/drivers/pmdomain/renesas/r8a779g0-sysc.c
diff --git a/drivers/genpd/renesas/rcar-gen4-sysc.c b/drivers/pmdomain/renesas/rcar-gen4-sysc.c
index 9e5e6e077abc..9e5e6e077abc 100644
--- a/drivers/genpd/renesas/rcar-gen4-sysc.c
+++ b/drivers/pmdomain/renesas/rcar-gen4-sysc.c
diff --git a/drivers/genpd/renesas/rcar-gen4-sysc.h b/drivers/pmdomain/renesas/rcar-gen4-sysc.h
index 388cfa8f8f9f..388cfa8f8f9f 100644
--- a/drivers/genpd/renesas/rcar-gen4-sysc.h
+++ b/drivers/pmdomain/renesas/rcar-gen4-sysc.h
diff --git a/drivers/genpd/renesas/rcar-sysc.c b/drivers/pmdomain/renesas/rcar-sysc.c
index eed47696e825..eed47696e825 100644
--- a/drivers/genpd/renesas/rcar-sysc.c
+++ b/drivers/pmdomain/renesas/rcar-sysc.c
diff --git a/drivers/genpd/renesas/rcar-sysc.h b/drivers/pmdomain/renesas/rcar-sysc.h
index 266c599a0a9b..266c599a0a9b 100644
--- a/drivers/genpd/renesas/rcar-sysc.h
+++ b/drivers/pmdomain/renesas/rcar-sysc.h
diff --git a/drivers/genpd/renesas/rmobile-sysc.c b/drivers/pmdomain/renesas/rmobile-sysc.c
index 912daadaa10d..912daadaa10d 100644
--- a/drivers/genpd/renesas/rmobile-sysc.c
+++ b/drivers/pmdomain/renesas/rmobile-sysc.c
diff --git a/drivers/genpd/rockchip/Makefile b/drivers/pmdomain/rockchip/Makefile
index 8fb9d88a3492..8fb9d88a3492 100644
--- a/drivers/genpd/rockchip/Makefile
+++ b/drivers/pmdomain/rockchip/Makefile
diff --git a/drivers/genpd/rockchip/pm-domains.c b/drivers/pmdomain/rockchip/pm-domains.c
index d5d3ecb38283..d5d3ecb38283 100644
--- a/drivers/genpd/rockchip/pm-domains.c
+++ b/drivers/pmdomain/rockchip/pm-domains.c
diff --git a/drivers/genpd/samsung/Makefile b/drivers/pmdomain/samsung/Makefile
index 397aa5908c1d..397aa5908c1d 100644
--- a/drivers/genpd/samsung/Makefile
+++ b/drivers/pmdomain/samsung/Makefile
diff --git a/drivers/genpd/samsung/exynos-pm-domains.c b/drivers/pmdomain/samsung/exynos-pm-domains.c
index 9b502e8751d1..9b502e8751d1 100644
--- a/drivers/genpd/samsung/exynos-pm-domains.c
+++ b/drivers/pmdomain/samsung/exynos-pm-domains.c
diff --git a/drivers/genpd/st/Makefile b/drivers/pmdomain/st/Makefile
index 8fa5f9855460..8fa5f9855460 100644
--- a/drivers/genpd/st/Makefile
+++ b/drivers/pmdomain/st/Makefile
diff --git a/drivers/genpd/st/ste-ux500-pm-domain.c b/drivers/pmdomain/st/ste-ux500-pm-domain.c
index 3d4f111ed156..3d4f111ed156 100644
--- a/drivers/genpd/st/ste-ux500-pm-domain.c
+++ b/drivers/pmdomain/st/ste-ux500-pm-domain.c
diff --git a/drivers/genpd/starfive/Makefile b/drivers/pmdomain/starfive/Makefile
index 975bba2a29a9..975bba2a29a9 100644
--- a/drivers/genpd/starfive/Makefile
+++ b/drivers/pmdomain/starfive/Makefile
diff --git a/drivers/genpd/starfive/jh71xx-pmu.c b/drivers/pmdomain/starfive/jh71xx-pmu.c
index 7d5f50d71c0d..7d5f50d71c0d 100644
--- a/drivers/genpd/starfive/jh71xx-pmu.c
+++ b/drivers/pmdomain/starfive/jh71xx-pmu.c
diff --git a/drivers/genpd/sunxi/Makefile b/drivers/pmdomain/sunxi/Makefile
index ec1d7a2fb21d..ec1d7a2fb21d 100644
--- a/drivers/genpd/sunxi/Makefile
+++ b/drivers/pmdomain/sunxi/Makefile
diff --git a/drivers/genpd/sunxi/sun20i-ppu.c b/drivers/pmdomain/sunxi/sun20i-ppu.c
index 8700f9dd5f75..8700f9dd5f75 100644
--- a/drivers/genpd/sunxi/sun20i-ppu.c
+++ b/drivers/pmdomain/sunxi/sun20i-ppu.c
diff --git a/drivers/genpd/tegra/Makefile b/drivers/pmdomain/tegra/Makefile
index ec8acfd2c77c..ec8acfd2c77c 100644
--- a/drivers/genpd/tegra/Makefile
+++ b/drivers/pmdomain/tegra/Makefile
diff --git a/drivers/genpd/tegra/powergate-bpmp.c b/drivers/pmdomain/tegra/powergate-bpmp.c
index 179ed895c279..179ed895c279 100644
--- a/drivers/genpd/tegra/powergate-bpmp.c
+++ b/drivers/pmdomain/tegra/powergate-bpmp.c
diff --git a/drivers/genpd/ti/Makefile b/drivers/pmdomain/ti/Makefile
index 69580afbb436..69580afbb436 100644
--- a/drivers/genpd/ti/Makefile
+++ b/drivers/pmdomain/ti/Makefile
diff --git a/drivers/genpd/ti/omap_prm.c b/drivers/pmdomain/ti/omap_prm.c
index c2feae3a634c..c2feae3a634c 100644
--- a/drivers/genpd/ti/omap_prm.c
+++ b/drivers/pmdomain/ti/omap_prm.c
diff --git a/drivers/genpd/ti/ti_sci_pm_domains.c b/drivers/pmdomain/ti/ti_sci_pm_domains.c
index 34645104fe45..34645104fe45 100644
--- a/drivers/genpd/ti/ti_sci_pm_domains.c
+++ b/drivers/pmdomain/ti/ti_sci_pm_domains.c
diff --git a/drivers/genpd/xilinx/Makefile b/drivers/pmdomain/xilinx/Makefile
index a706ab699cfa..a706ab699cfa 100644
--- a/drivers/genpd/xilinx/Makefile
+++ b/drivers/pmdomain/xilinx/Makefile
diff --git a/drivers/genpd/xilinx/zynqmp-pm-domains.c b/drivers/pmdomain/xilinx/zynqmp-pm-domains.c
index 69d03ad4cf1e..69d03ad4cf1e 100644
--- a/drivers/genpd/xilinx/zynqmp-pm-domains.c
+++ b/drivers/pmdomain/xilinx/zynqmp-pm-domains.c
diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c
index 4aa466c945e2..0b69fb7bafd8 100644
--- a/drivers/power/supply/power_supply_core.c
+++ b/drivers/power/supply/power_supply_core.c
@@ -1309,8 +1309,8 @@ static int psy_register_thermal(struct power_supply *psy)
struct thermal_zone_params tzp = {
.no_hwmon = IS_ENABLED(CONFIG_POWER_SUPPLY_HWMON)
};
- psy->tzd = thermal_zone_device_register(psy->desc->name,
- 0, 0, psy, &psy_tzd_ops, &tzp, 0, 0);
+ psy->tzd = thermal_tripless_zone_device_register(psy->desc->name,
+ psy, &psy_tzd_ops, &tzp);
if (IS_ERR(psy->tzd))
return PTR_ERR(psy->tzd);
ret = thermal_zone_device_enable(psy->tzd);
diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c
index 5c2e6d5eea2a..40a2cc649c79 100644
--- a/drivers/powercap/intel_rapl_common.c
+++ b/drivers/powercap/intel_rapl_common.c
@@ -658,8 +658,6 @@ static struct rapl_primitive_info rpi_msr[NR_RAPL_PRIMITIVES] = {
RAPL_DOMAIN_REG_LIMIT, ARBITRARY_UNIT, 0),
[PL2_CLAMP] = PRIMITIVE_INFO_INIT(PL2_CLAMP, POWER_LIMIT2_CLAMP, 48,
RAPL_DOMAIN_REG_LIMIT, ARBITRARY_UNIT, 0),
- [PL4_ENABLE] = PRIMITIVE_INFO_INIT(PL4_ENABLE, POWER_LIMIT4_MASK, 0,
- RAPL_DOMAIN_REG_PL4, ARBITRARY_UNIT, 0),
[TIME_WINDOW1] = PRIMITIVE_INFO_INIT(TIME_WINDOW1, TIME_WINDOW1_MASK, 17,
RAPL_DOMAIN_REG_LIMIT, TIME_UNIT, 0),
[TIME_WINDOW2] = PRIMITIVE_INFO_INIT(TIME_WINDOW2, TIME_WINDOW2_MASK, 49,
@@ -1458,7 +1456,7 @@ static void rapl_detect_powerlimit(struct rapl_domain *rd)
}
}
- if (rapl_read_pl_data(rd, i, PL_ENABLE, false, &val64))
+ if (rapl_read_pl_data(rd, i, PL_LIMIT, false, &val64))
rd->rpl[i].name = NULL;
}
}
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index 620fab01b710..c4e36650c426 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -1378,16 +1378,12 @@ static ssize_t dasd_vendor_show(struct device *dev,
static DEVICE_ATTR(vendor, 0444, dasd_vendor_show, NULL);
-#define UID_STRLEN ( /* vendor */ 3 + 1 + /* serial */ 14 + 1 +\
- /* SSID */ 4 + 1 + /* unit addr */ 2 + 1 +\
- /* vduit */ 32 + 1)
-
static ssize_t
dasd_uid_show(struct device *dev, struct device_attribute *attr, char *buf)
{
+ char uid_string[DASD_UID_STRLEN];
struct dasd_device *device;
struct dasd_uid uid;
- char uid_string[UID_STRLEN];
char ua_string[3];
device = dasd_device_from_cdev(to_ccwdev(dev));
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 8587e423169e..bd89b032968a 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -1079,12 +1079,12 @@ static void dasd_eckd_get_uid_string(struct dasd_conf *conf,
create_uid(conf, &uid);
if (strlen(uid.vduit) > 0)
- snprintf(print_uid, sizeof(*print_uid),
+ snprintf(print_uid, DASD_UID_STRLEN,
"%s.%s.%04x.%02x.%s",
uid.vendor, uid.serial, uid.ssid,
uid.real_unit_addr, uid.vduit);
else
- snprintf(print_uid, sizeof(*print_uid),
+ snprintf(print_uid, DASD_UID_STRLEN,
"%s.%s.%04x.%02x",
uid.vendor, uid.serial, uid.ssid,
uid.real_unit_addr);
@@ -1093,8 +1093,8 @@ static void dasd_eckd_get_uid_string(struct dasd_conf *conf,
static int dasd_eckd_check_cabling(struct dasd_device *device,
void *conf_data, __u8 lpm)
{
+ char print_path_uid[DASD_UID_STRLEN], print_device_uid[DASD_UID_STRLEN];
struct dasd_eckd_private *private = device->private;
- char print_path_uid[60], print_device_uid[60];
struct dasd_conf path_conf;
path_conf.data = conf_data;
@@ -1293,9 +1293,9 @@ static void dasd_eckd_path_available_action(struct dasd_device *device,
__u8 path_rcd_buf[DASD_ECKD_RCD_DATA_SIZE];
__u8 lpm, opm, npm, ppm, epm, hpfpm, cablepm;
struct dasd_conf_data *conf_data;
+ char print_uid[DASD_UID_STRLEN];
struct dasd_conf path_conf;
unsigned long flags;
- char print_uid[60];
int rc, pos;
opm = 0;
@@ -5855,8 +5855,8 @@ static void dasd_eckd_dump_sense(struct dasd_device *device,
static int dasd_eckd_reload_device(struct dasd_device *device)
{
struct dasd_eckd_private *private = device->private;
+ char print_uid[DASD_UID_STRLEN];
int rc, old_base;
- char print_uid[60];
struct dasd_uid uid;
unsigned long flags;
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 0aa56351da72..8a4dbe9d7741 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -259,6 +259,10 @@ struct dasd_uid {
char vduit[33];
};
+#define DASD_UID_STRLEN ( /* vendor */ 3 + 1 + /* serial */ 14 + 1 + \
+ /* SSID */ 4 + 1 + /* unit addr */ 2 + 1 + \
+ /* vduit */ 32 + 1)
+
/*
* PPRC Status data
*/
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 3f062e4013ab..013a9a334972 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -1451,7 +1451,7 @@ retry_next:
#endif
break;
}
- scsi_rescan_device(&device->sdev_gendev);
+ scsi_rescan_device(device);
break;
default:
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index e51e92f932fa..93c68931a593 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -27,7 +27,7 @@
#define DRV_NAME "fnic"
#define DRV_DESCRIPTION "Cisco FCoE HBA Driver"
-#define DRV_VERSION "1.6.0.55"
+#define DRV_VERSION "1.6.0.56"
#define PFX DRV_NAME ": "
#define DFX DRV_NAME "%d: "
@@ -236,6 +236,7 @@ struct fnic {
unsigned int wq_count;
unsigned int cq_count;
+ struct mutex sgreset_mutex;
struct dentry *fnic_stats_debugfs_host;
struct dentry *fnic_stats_debugfs_file;
struct dentry *fnic_reset_debugfs_file;
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index be89ce96df46..9761b2c9db48 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -2168,39 +2168,6 @@ clean_pending_aborts_end:
}
/*
- * fnic_scsi_host_start_tag
- * Allocates tagid from host's tag list
- **/
-static inline int
-fnic_scsi_host_start_tag(struct fnic *fnic, struct scsi_cmnd *sc)
-{
- struct request *rq = scsi_cmd_to_rq(sc);
- struct request_queue *q = rq->q;
- struct request *dummy;
-
- dummy = blk_mq_alloc_request(q, REQ_OP_WRITE, BLK_MQ_REQ_NOWAIT);
- if (IS_ERR(dummy))
- return SCSI_NO_TAG;
-
- rq->tag = dummy->tag;
- sc->host_scribble = (unsigned char *)dummy;
-
- return dummy->tag;
-}
-
-/*
- * fnic_scsi_host_end_tag
- * frees tag allocated by fnic_scsi_host_start_tag.
- **/
-static inline void
-fnic_scsi_host_end_tag(struct fnic *fnic, struct scsi_cmnd *sc)
-{
- struct request *dummy = (struct request *)sc->host_scribble;
-
- blk_mq_free_request(dummy);
-}
-
-/*
* SCSI Eh thread issues a Lun Reset when one or more commands on a LUN
* fail to get aborted. It calls driver's eh_device_reset with a SCSI command
* on the LUN.
@@ -2222,7 +2189,6 @@ int fnic_device_reset(struct scsi_cmnd *sc)
struct reset_stats *reset_stats;
int tag = rq->tag;
DECLARE_COMPLETION_ONSTACK(tm_done);
- int tag_gen_flag = 0; /*to track tags allocated by fnic driver*/
bool new_sc = 0;
/* Wait for rport to unblock */
@@ -2252,17 +2218,17 @@ int fnic_device_reset(struct scsi_cmnd *sc)
}
fnic_priv(sc)->flags = FNIC_DEVICE_RESET;
- /* Allocate tag if not present */
if (unlikely(tag < 0)) {
/*
- * Really should fix the midlayer to pass in a proper
- * request for ioctls...
+ * For device reset issued through sg3utils, we let
+ * only one LUN_RESET to go through and use a special
+ * tag equal to max_tag_id so that we don't have to allocate
+ * or free it. It won't interact with tags
+ * allocated by mid layer.
*/
- tag = fnic_scsi_host_start_tag(fnic, sc);
- if (unlikely(tag == SCSI_NO_TAG))
- goto fnic_device_reset_end;
- tag_gen_flag = 1;
+ mutex_lock(&fnic->sgreset_mutex);
+ tag = fnic->fnic_max_tag_id;
new_sc = 1;
}
io_lock = fnic_io_lock_hash(fnic, sc);
@@ -2434,9 +2400,8 @@ fnic_device_reset_end:
(u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
fnic_flags_and_state(sc));
- /* free tag if it is allocated */
- if (unlikely(tag_gen_flag))
- fnic_scsi_host_end_tag(fnic, sc);
+ if (new_sc)
+ mutex_unlock(&fnic->sgreset_mutex);
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"Returning from device reset %s\n",
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index 0c103f4523b8..9047cfcd1072 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -386,37 +386,7 @@ struct sas_phy *sas_get_local_phy(struct domain_device *dev)
}
EXPORT_SYMBOL_GPL(sas_get_local_phy);
-static void sas_wait_eh(struct domain_device *dev)
-{
- struct sas_ha_struct *ha = dev->port->ha;
- DEFINE_WAIT(wait);
-
- if (dev_is_sata(dev)) {
- ata_port_wait_eh(dev->sata_dev.ap);
- return;
- }
- retry:
- spin_lock_irq(&ha->lock);
-
- while (test_bit(SAS_DEV_EH_PENDING, &dev->state)) {
- prepare_to_wait(&ha->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE);
- spin_unlock_irq(&ha->lock);
- schedule();
- spin_lock_irq(&ha->lock);
- }
- finish_wait(&ha->eh_wait_q, &wait);
-
- spin_unlock_irq(&ha->lock);
-
- /* make sure SCSI EH is complete */
- if (scsi_host_in_recovery(ha->shost)) {
- msleep(10);
- goto retry;
- }
-}
-
-static int sas_queue_reset(struct domain_device *dev, int reset_type,
- u64 lun, int wait)
+static int sas_queue_reset(struct domain_device *dev, int reset_type, u64 lun)
{
struct sas_ha_struct *ha = dev->port->ha;
int scheduled = 0, tries = 100;
@@ -424,8 +394,6 @@ static int sas_queue_reset(struct domain_device *dev, int reset_type,
/* ata: promote lun reset to bus reset */
if (dev_is_sata(dev)) {
sas_ata_schedule_reset(dev);
- if (wait)
- sas_ata_wait_eh(dev);
return SUCCESS;
}
@@ -443,9 +411,6 @@ static int sas_queue_reset(struct domain_device *dev, int reset_type,
}
spin_unlock_irq(&ha->lock);
- if (wait)
- sas_wait_eh(dev);
-
if (scheduled)
return SUCCESS;
}
@@ -498,7 +463,7 @@ int sas_eh_device_reset_handler(struct scsi_cmnd *cmd)
struct sas_internal *i = to_sas_internal(host->transportt);
if (current != host->ehandler)
- return sas_queue_reset(dev, SAS_DEV_LU_RESET, cmd->device->lun, 0);
+ return sas_queue_reset(dev, SAS_DEV_LU_RESET, cmd->device->lun);
int_to_scsilun(cmd->device->lun, &lun);
@@ -521,7 +486,7 @@ int sas_eh_target_reset_handler(struct scsi_cmnd *cmd)
struct sas_internal *i = to_sas_internal(host->transportt);
if (current != host->ehandler)
- return sas_queue_reset(dev, SAS_DEV_RESET, 0, 0);
+ return sas_queue_reset(dev, SAS_DEV_RESET, 0);
if (!i->dft->lldd_I_T_nexus_reset)
return FAILED;
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2.h b/drivers/scsi/mpt3sas/mpi/mpi2.h
index ed3923f8db4f..6de35b32223c 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2.h
@@ -199,7 +199,7 @@
*
*****************************************************************************/
-typedef volatile struct _MPI2_SYSTEM_INTERFACE_REGS {
+typedef struct _MPI2_SYSTEM_INTERFACE_REGS {
U32 Doorbell; /*0x00 */
U32 WriteSequence; /*0x04 */
U32 HostDiagnostic; /*0x08 */
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 53f5492579cb..61a32bf00747 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -138,6 +138,9 @@ _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc);
static void
_base_clear_outstanding_commands(struct MPT3SAS_ADAPTER *ioc);
+static u32
+_base_readl_ext_retry(const void __iomem *addr);
+
/**
* mpt3sas_base_check_cmd_timeout - Function
* to check timeout and command termination due
@@ -201,7 +204,7 @@ module_param_call(mpt3sas_fwfault_debug, _scsih_set_fwfault_debug,
* while reading the system interface register.
*/
static inline u32
-_base_readl_aero(const volatile void __iomem *addr)
+_base_readl_aero(const void __iomem *addr)
{
u32 i = 0, ret_val;
@@ -213,8 +216,22 @@ _base_readl_aero(const volatile void __iomem *addr)
return ret_val;
}
+static u32
+_base_readl_ext_retry(const void __iomem *addr)
+{
+ u32 i, ret_val;
+
+ for (i = 0 ; i < 30 ; i++) {
+ ret_val = readl(addr);
+ if (ret_val == 0)
+ continue;
+ }
+
+ return ret_val;
+}
+
static inline u32
-_base_readl(const volatile void __iomem *addr)
+_base_readl(const void __iomem *addr)
{
return readl(addr);
}
@@ -940,7 +957,7 @@ mpt3sas_halt_firmware(struct MPT3SAS_ADAPTER *ioc)
dump_stack();
- doorbell = ioc->base_readl(&ioc->chip->Doorbell);
+ doorbell = ioc->base_readl_ext_retry(&ioc->chip->Doorbell);
if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
mpt3sas_print_fault_code(ioc, doorbell &
MPI2_DOORBELL_DATA_MASK);
@@ -6686,7 +6703,7 @@ mpt3sas_base_get_iocstate(struct MPT3SAS_ADAPTER *ioc, int cooked)
{
u32 s, sc;
- s = ioc->base_readl(&ioc->chip->Doorbell);
+ s = ioc->base_readl_ext_retry(&ioc->chip->Doorbell);
sc = s & MPI2_IOC_STATE_MASK;
return cooked ? sc : s;
}
@@ -6831,7 +6848,7 @@ _base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout)
__func__, count, timeout));
return 0;
} else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
- doorbell = ioc->base_readl(&ioc->chip->Doorbell);
+ doorbell = ioc->base_readl_ext_retry(&ioc->chip->Doorbell);
if ((doorbell & MPI2_IOC_STATE_MASK) ==
MPI2_IOC_STATE_FAULT) {
mpt3sas_print_fault_code(ioc, doorbell);
@@ -6871,7 +6888,7 @@ _base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout)
count = 0;
cntdn = 1000 * timeout;
do {
- doorbell_reg = ioc->base_readl(&ioc->chip->Doorbell);
+ doorbell_reg = ioc->base_readl_ext_retry(&ioc->chip->Doorbell);
if (!(doorbell_reg & MPI2_DOORBELL_USED)) {
dhsprintk(ioc,
ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
@@ -7019,7 +7036,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
__le32 *mfp;
/* make sure doorbell is not in use */
- if ((ioc->base_readl(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) {
+ if ((ioc->base_readl_ext_retry(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) {
ioc_err(ioc, "doorbell is in use (line=%d)\n", __LINE__);
return -EFAULT;
}
@@ -7068,7 +7085,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
}
/* read the first two 16-bits, it gives the total length of the reply */
- reply[0] = le16_to_cpu(ioc->base_readl(&ioc->chip->Doorbell)
+ reply[0] = le16_to_cpu(ioc->base_readl_ext_retry(&ioc->chip->Doorbell)
& MPI2_DOORBELL_DATA_MASK);
writel(0, &ioc->chip->HostInterruptStatus);
if ((_base_wait_for_doorbell_int(ioc, 5))) {
@@ -7076,7 +7093,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
__LINE__);
return -EFAULT;
}
- reply[1] = le16_to_cpu(ioc->base_readl(&ioc->chip->Doorbell)
+ reply[1] = le16_to_cpu(ioc->base_readl_ext_retry(&ioc->chip->Doorbell)
& MPI2_DOORBELL_DATA_MASK);
writel(0, &ioc->chip->HostInterruptStatus);
@@ -7087,10 +7104,10 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
return -EFAULT;
}
if (i >= reply_bytes/2) /* overflow case */
- ioc->base_readl(&ioc->chip->Doorbell);
+ ioc->base_readl_ext_retry(&ioc->chip->Doorbell);
else
reply[i] = le16_to_cpu(
- ioc->base_readl(&ioc->chip->Doorbell)
+ ioc->base_readl_ext_retry(&ioc->chip->Doorbell)
& MPI2_DOORBELL_DATA_MASK);
writel(0, &ioc->chip->HostInterruptStatus);
}
@@ -7949,7 +7966,7 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
goto out;
}
- host_diagnostic = ioc->base_readl(&ioc->chip->HostDiagnostic);
+ host_diagnostic = ioc->base_readl_ext_retry(&ioc->chip->HostDiagnostic);
drsprintk(ioc,
ioc_info(ioc, "wrote magic sequence: count(%d), host_diagnostic(0x%08x)\n",
count, host_diagnostic));
@@ -7969,7 +7986,7 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
for (count = 0; count < (300000000 /
MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC); count++) {
- host_diagnostic = ioc->base_readl(&ioc->chip->HostDiagnostic);
+ host_diagnostic = ioc->base_readl_ext_retry(&ioc->chip->HostDiagnostic);
if (host_diagnostic == 0xFFFFFFFF) {
ioc_info(ioc,
@@ -8359,10 +8376,13 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
ioc->rdpq_array_enable_assigned = 0;
ioc->use_32bit_dma = false;
ioc->dma_mask = 64;
- if (ioc->is_aero_ioc)
+ if (ioc->is_aero_ioc) {
ioc->base_readl = &_base_readl_aero;
- else
+ ioc->base_readl_ext_retry = &_base_readl_ext_retry;
+ } else {
ioc->base_readl = &_base_readl;
+ ioc->base_readl_ext_retry = &_base_readl;
+ }
r = mpt3sas_base_map_resources(ioc);
if (r)
goto out_free_resources;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index 05364aa15ecd..1be0850ca17a 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -994,7 +994,7 @@ typedef void (*NVME_BUILD_PRP)(struct MPT3SAS_ADAPTER *ioc, u16 smid,
typedef void (*PUT_SMID_IO_FP_HIP) (struct MPT3SAS_ADAPTER *ioc, u16 smid,
u16 funcdep);
typedef void (*PUT_SMID_DEFAULT) (struct MPT3SAS_ADAPTER *ioc, u16 smid);
-typedef u32 (*BASE_READ_REG) (const volatile void __iomem *addr);
+typedef u32 (*BASE_READ_REG) (const void __iomem *addr);
/*
* To get high iops reply queue's msix index when high iops mode is enabled
* else get the msix index of general reply queues.
@@ -1618,6 +1618,7 @@ struct MPT3SAS_ADAPTER {
u8 diag_trigger_active;
u8 atomic_desc_capable;
BASE_READ_REG base_readl;
+ BASE_READ_REG base_readl_ext_retry;
struct SL_WH_MASTER_TRIGGER_T diag_trigger_master;
struct SL_WH_EVENT_TRIGGERS_T diag_trigger_event;
struct SL_WH_SCSI_TRIGGERS_T diag_trigger_scsi;
diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c
index 97f9d2fa6429..d9d366ec17dc 100644
--- a/drivers/scsi/mvumi.c
+++ b/drivers/scsi/mvumi.c
@@ -1500,7 +1500,7 @@ static void mvumi_rescan_devices(struct mvumi_hba *mhba, int id)
sdev = scsi_device_lookup(mhba->shost, 0, id, 0);
if (sdev) {
- scsi_rescan_device(&sdev->sdev_gendev);
+ scsi_rescan_device(sdev);
scsi_device_put(sdev);
}
}
diff --git a/drivers/scsi/qedf/qedf.h b/drivers/scsi/qedf/qedf.h
index c5c0bbdafc4e..1619cc33034f 100644
--- a/drivers/scsi/qedf/qedf.h
+++ b/drivers/scsi/qedf/qedf.h
@@ -548,7 +548,6 @@ extern void qedf_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data);
extern void qedf_wq_grcdump(struct work_struct *work);
void qedf_stag_change_work(struct work_struct *work);
void qedf_ctx_soft_reset(struct fc_lport *lport);
-extern void qedf_board_disable_work(struct work_struct *work);
extern void qedf_schedule_hw_err_handler(void *dev,
enum qed_hw_err_type err_type);
diff --git a/drivers/scsi/qedi/qedi_gbl.h b/drivers/scsi/qedi/qedi_gbl.h
index 0e316cc24b19..772218445a56 100644
--- a/drivers/scsi/qedi/qedi_gbl.h
+++ b/drivers/scsi/qedi/qedi_gbl.h
@@ -67,8 +67,6 @@ void qedi_trace_io(struct qedi_ctx *qedi, struct iscsi_task *task,
int qedi_alloc_id(struct qedi_portid_tbl *id_tbl, u16 id);
u16 qedi_alloc_new_id(struct qedi_portid_tbl *id_tbl);
void qedi_free_id(struct qedi_portid_tbl *id_tbl, u16 id);
-int qedi_create_sysfs_ctx_attr(struct qedi_ctx *qedi);
-void qedi_remove_sysfs_ctx_attr(struct qedi_ctx *qedi);
void qedi_clearsq(struct qedi_ctx *qedi,
struct qedi_conn *qedi_conn,
struct iscsi_task *task);
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index b00222459607..44449c70a375 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -3093,8 +3093,6 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
vha->flags.difdix_supported = 1;
ql_dbg(ql_dbg_user, vha, 0x7082,
"Registered for DIF/DIX type 1 and 3 protection.\n");
- if (ql2xenabledif == 1)
- prot = SHOST_DIX_TYPE0_PROTECTION;
scsi_host_set_prot(vha->host,
prot | SHOST_DIF_TYPE1_PROTECTION
| SHOST_DIF_TYPE2_PROTECTION
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index d7e8454304ce..691ef827a5ab 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -12,13 +12,12 @@
* ----------------------------------------------------------------------
* | Module Init and Probe | 0x0199 | |
* | Mailbox commands | 0x1206 | 0x11a5-0x11ff |
- * | Device Discovery | 0x2134 | 0x210e-0x2115 |
- * | | | 0x211c-0x2128 |
- * | | | 0x212c-0x2134 |
+ * | Device Discovery | 0x2134 | 0x2112-0x2115 |
+ * | | | 0x2127-0x2128 |
* | Queue Command and IO tracing | 0x3074 | 0x300b |
* | | | 0x3027-0x3028 |
* | | | 0x303d-0x3041 |
- * | | | 0x302d,0x3033 |
+ * | | | 0x302e,0x3033 |
* | | | 0x3036,0x3038 |
* | | | 0x303a |
* | DPC Thread | 0x4023 | 0x4002,0x4013 |
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index 70482b55d240..54f0a412226f 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -368,6 +368,7 @@ ql_log_qp(uint32_t, struct qla_qpair *, int32_t, const char *fmt, ...);
#define ql_dbg_tgt_tmr 0x00001000 /* Target mode task management */
#define ql_dbg_tgt_dif 0x00000800 /* Target mode dif */
#define ql_dbg_edif 0x00000400 /* edif and purex debug */
+#define ql_dbg_unsol 0x00000100 /* Unsolicited path debug */
extern int qla27xx_dump_mpi_ram(struct qla_hw_data *, uint32_t, uint32_t *,
uint32_t, void **);
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 336b8c665cb4..deb642607deb 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -346,6 +346,12 @@ struct name_list_extended {
u8 sent;
};
+struct qla_nvme_fc_rjt {
+ struct fcnvme_ls_rjt *c;
+ dma_addr_t cdma;
+ u16 size;
+};
+
struct els_reject {
struct fc_els_ls_rjt *c;
dma_addr_t cdma;
@@ -503,6 +509,20 @@ struct ct_arg {
port_id_t id;
};
+struct qla_nvme_lsrjt_pt_arg {
+ struct fc_port *fcport;
+ u8 opcode;
+ u8 vp_idx;
+ u8 reason;
+ u8 explanation;
+ __le16 nport_handle;
+ u16 control_flags;
+ __le16 ox_id;
+ __le32 xchg_address;
+ u32 tx_byte_count, rx_byte_count;
+ dma_addr_t tx_addr, rx_addr;
+};
+
/*
* SRB extensions.
*/
@@ -611,13 +631,16 @@ struct srb_iocb {
void *desc;
/* These are only used with ls4 requests */
- int cmd_len;
- int rsp_len;
+ __le32 cmd_len;
+ __le32 rsp_len;
dma_addr_t cmd_dma;
dma_addr_t rsp_dma;
enum nvmefc_fcp_datadir dir;
uint32_t dl;
uint32_t timeout_sec;
+ __le32 exchange_address;
+ __le16 nport_handle;
+ __le16 ox_id;
struct list_head entry;
} nvme;
struct {
@@ -707,6 +730,10 @@ typedef struct srb {
struct fc_port *fcport;
struct scsi_qla_host *vha;
unsigned int start_timer:1;
+ unsigned int abort:1;
+ unsigned int aborted:1;
+ unsigned int completed:1;
+ unsigned int unsol_rsp:1;
uint32_t handle;
uint16_t flags;
@@ -2542,6 +2569,7 @@ enum rscn_addr_format {
typedef struct fc_port {
struct list_head list;
struct scsi_qla_host *vha;
+ struct list_head unsol_ctx_head;
unsigned int conf_compl_supported:1;
unsigned int deleted:2;
@@ -3742,6 +3770,16 @@ struct qla_fw_resources {
u16 pad;
};
+struct qla_fw_res {
+ u16 iocb_total;
+ u16 iocb_limit;
+ atomic_t iocb_used;
+
+ u16 exch_total;
+ u16 exch_limit;
+ atomic_t exch_used;
+};
+
#define QLA_IOCB_PCT_LIMIT 95
struct qla_buf_pool {
@@ -3787,6 +3825,12 @@ struct qla_qpair {
uint16_t id; /* qp number used with FW */
uint16_t vp_idx; /* vport ID */
+
+ uint16_t dsd_inuse;
+ uint16_t dsd_avail;
+ struct list_head dsd_list;
+#define NUM_DSD_CHAIN 4096
+
mempool_t *srb_mempool;
struct pci_dev *pdev;
@@ -4384,7 +4428,6 @@ struct qla_hw_data {
uint8_t aen_mbx_count;
atomic_t num_pend_mbx_stage1;
atomic_t num_pend_mbx_stage2;
- atomic_t num_pend_mbx_stage3;
uint16_t frame_payload_size;
uint32_t login_retry_count;
@@ -4714,11 +4757,6 @@ struct qla_hw_data {
struct fw_blob *hablob;
struct qla82xx_legacy_intr_set nx_legacy_intr;
- uint16_t gbl_dsd_inuse;
- uint16_t gbl_dsd_avail;
- struct list_head gbl_dsd_list;
-#define NUM_DSD_CHAIN 4096
-
uint8_t fw_type;
uint32_t file_prd_off; /* File firmware product offset */
@@ -4800,6 +4838,8 @@ struct qla_hw_data {
struct els_reject elsrej;
u8 edif_post_stop_cnt_down;
struct qla_vp_map *vp_map;
+ struct qla_nvme_fc_rjt lsrjt;
+ struct qla_fw_res fwres ____cacheline_aligned;
};
#define RX_ELS_SIZE (roundup(sizeof(struct enode) + ELS_MAX_PAYLOAD, SMP_CACHE_BYTES))
@@ -4832,6 +4872,7 @@ struct active_regions {
* is variable) starting at "iocb".
*/
struct purex_item {
+ void *purls_context;
struct list_head list;
struct scsi_qla_host *vha;
void (*process_item)(struct scsi_qla_host *vha,
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
index 1925cc6897b6..f060e593685d 100644
--- a/drivers/scsi/qla2xxx/qla_dfs.c
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -276,6 +276,16 @@ qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused)
seq_printf(s, "estimate exchange used[%d] high water limit [%d] n",
exch_used, ha->base_qpair->fwres.exch_limit);
+
+ if (ql2xenforce_iocb_limit == 2) {
+ iocbs_used = atomic_read(&ha->fwres.iocb_used);
+ exch_used = atomic_read(&ha->fwres.exch_used);
+ seq_printf(s, " estimate iocb2 used [%d] high water limit [%d]\n",
+ iocbs_used, ha->fwres.iocb_limit);
+
+ seq_printf(s, " estimate exchange2 used[%d] high water limit [%d] \n",
+ exch_used, ha->fwres.exch_limit);
+ }
}
return 0;
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 816c0b9ecd0e..09cb9413670a 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -603,7 +603,11 @@ qla2xxx_msix_rsp_q_hs(int irq, void *dev_id);
fc_port_t *qla2x00_find_fcport_by_loopid(scsi_qla_host_t *, uint16_t);
fc_port_t *qla2x00_find_fcport_by_wwpn(scsi_qla_host_t *, u8 *, u8);
fc_port_t *qla2x00_find_fcport_by_nportid(scsi_qla_host_t *, port_id_t *, u8);
-void __qla_consume_iocb(struct scsi_qla_host *vha, void **pkt, struct rsp_que **rsp);
+void qla24xx_queue_purex_item(scsi_qla_host_t *, struct purex_item *,
+ void (*process_item)(struct scsi_qla_host *,
+ struct purex_item *));
+void __qla_consume_iocb(struct scsi_qla_host *, void **, struct rsp_que **);
+void qla2xxx_process_purls_iocb(void **pkt, struct rsp_que **rsp);
/*
* Global Function Prototypes in qla_sup.c source file.
@@ -666,9 +670,11 @@ extern int qla2xxx_get_vpd_field(scsi_qla_host_t *, char *, char *, size_t);
extern void qla2xxx_flash_npiv_conf(scsi_qla_host_t *);
extern int qla24xx_read_fcp_prio_cfg(scsi_qla_host_t *);
extern int qla2x00_mailbox_passthru(struct bsg_job *bsg_job);
-int __qla_copy_purex_to_buffer(struct scsi_qla_host *vha, void **pkt,
- struct rsp_que **rsp, u8 *buf, u32 buf_len);
-
+int qla2x00_sys_ld_info(struct bsg_job *bsg_job);
+int __qla_copy_purex_to_buffer(struct scsi_qla_host *, void **,
+ struct rsp_que **, u8 *, u32);
+struct purex_item *qla27xx_copy_multiple_pkt(struct scsi_qla_host *vha,
+ void **pkt, struct rsp_que **rsp, bool is_purls, bool byte_order);
int qla_mailbox_passthru(scsi_qla_host_t *vha, uint16_t *mbx_in,
uint16_t *mbx_out);
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index df623de89255..a314cfc5b263 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -2223,6 +2223,8 @@ __qla2x00_async_tm_cmd(struct tmf_arg *arg)
rval = QLA_FUNCTION_FAILED;
}
}
+ if (tm_iocb->u.tmf.data)
+ rval = tm_iocb->u.tmf.data;
done_free_sp:
/* ref: INIT */
@@ -4203,7 +4205,7 @@ void qla_init_iocb_limit(scsi_qla_host_t *vha)
u8 i;
struct qla_hw_data *ha = vha->hw;
- __qla_adjust_iocb_limit(ha->base_qpair);
+ __qla_adjust_iocb_limit(ha->base_qpair);
ha->base_qpair->fwres.iocbs_used = 0;
ha->base_qpair->fwres.exch_used = 0;
@@ -4214,6 +4216,14 @@ void qla_init_iocb_limit(scsi_qla_host_t *vha)
ha->queue_pair_map[i]->fwres.exch_used = 0;
}
}
+
+ ha->fwres.iocb_total = ha->orig_fw_iocb_count;
+ ha->fwres.iocb_limit = (ha->orig_fw_iocb_count * QLA_IOCB_PCT_LIMIT) / 100;
+ ha->fwres.exch_total = ha->orig_fw_xcb_count;
+ ha->fwres.exch_limit = (ha->orig_fw_xcb_count * QLA_IOCB_PCT_LIMIT) / 100;
+
+ atomic_set(&ha->fwres.iocb_used, 0);
+ atomic_set(&ha->fwres.exch_used, 0);
}
void qla_adjust_iocb_limit(scsi_qla_host_t *vha)
@@ -5554,6 +5564,7 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
INIT_WORK(&fcport->reg_work, qla_register_fcport_fn);
INIT_LIST_HEAD(&fcport->gnl_entry);
INIT_LIST_HEAD(&fcport->list);
+ INIT_LIST_HEAD(&fcport->unsol_ctx_head);
INIT_LIST_HEAD(&fcport->sess_cmd_list);
spin_lock_init(&fcport->sess_cmd_lock);
@@ -5596,7 +5607,7 @@ static void qla_get_login_template(scsi_qla_host_t *vha)
__be32 *q;
memset(ha->init_cb, 0, ha->init_cb_size);
- sz = min_t(int, sizeof(struct fc_els_csp), ha->init_cb_size);
+ sz = min_t(int, sizeof(struct fc_els_flogi), ha->init_cb_size);
rval = qla24xx_get_port_login_templ(vha, ha->init_cb_dma,
ha->init_cb, sz);
if (rval != QLA_SUCCESS) {
@@ -7390,14 +7401,15 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
}
/* purge MBox commands */
- if (atomic_read(&ha->num_pend_mbx_stage3)) {
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags)) {
clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
complete(&ha->mbx_intr_comp);
}
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
i = 0;
- while (atomic_read(&ha->num_pend_mbx_stage3) ||
- atomic_read(&ha->num_pend_mbx_stage2) ||
+ while (atomic_read(&ha->num_pend_mbx_stage2) ||
atomic_read(&ha->num_pend_mbx_stage1)) {
msleep(20);
i++;
@@ -9643,6 +9655,7 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos,
qpair->vp_idx = vp_idx;
qpair->fw_started = ha->flags.fw_started;
INIT_LIST_HEAD(&qpair->hints_list);
+ INIT_LIST_HEAD(&qpair->dsd_list);
qpair->chip_reset = ha->base_qpair->chip_reset;
qpair->enable_class_2 = ha->base_qpair->enable_class_2;
qpair->enable_explicit_conf =
@@ -9771,6 +9784,19 @@ int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair)
if (ret != QLA_SUCCESS)
goto fail;
+ if (!list_empty(&qpair->dsd_list)) {
+ struct dsd_dma *dsd_ptr, *tdsd_ptr;
+
+ /* clean up allocated prev pool */
+ list_for_each_entry_safe(dsd_ptr, tdsd_ptr,
+ &qpair->dsd_list, list) {
+ dma_pool_free(ha->dl_dma_pool, dsd_ptr->dsd_addr,
+ dsd_ptr->dsd_list_dma);
+ list_del(&dsd_ptr->list);
+ kfree(dsd_ptr);
+ }
+ }
+
mutex_lock(&ha->mq_lock);
ha->queue_pair_map[qpair->id] = NULL;
clear_bit(qpair->id, ha->qpair_qid_map);
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 0167e85ba058..0556969f6dc1 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -386,6 +386,7 @@ enum {
RESOURCE_IOCB = BIT_0,
RESOURCE_EXCH = BIT_1, /* exchange */
RESOURCE_FORCE = BIT_2,
+ RESOURCE_HA = BIT_3,
};
static inline int
@@ -393,7 +394,7 @@ qla_get_fw_resources(struct qla_qpair *qp, struct iocb_resource *iores)
{
u16 iocbs_used, i;
u16 exch_used;
- struct qla_hw_data *ha = qp->vha->hw;
+ struct qla_hw_data *ha = qp->hw;
if (!ql2xenforce_iocb_limit) {
iores->res_type = RESOURCE_NONE;
@@ -428,15 +429,69 @@ qla_get_fw_resources(struct qla_qpair *qp, struct iocb_resource *iores)
return -ENOSPC;
}
}
+
+ if (ql2xenforce_iocb_limit == 2) {
+ if ((iores->iocb_cnt + atomic_read(&ha->fwres.iocb_used)) >=
+ ha->fwres.iocb_limit) {
+ iores->res_type = RESOURCE_NONE;
+ return -ENOSPC;
+ }
+
+ if (iores->res_type & RESOURCE_EXCH) {
+ if ((iores->exch_cnt + atomic_read(&ha->fwres.exch_used)) >=
+ ha->fwres.exch_limit) {
+ iores->res_type = RESOURCE_NONE;
+ return -ENOSPC;
+ }
+ }
+ }
+
force:
qp->fwres.iocbs_used += iores->iocb_cnt;
qp->fwres.exch_used += iores->exch_cnt;
+ if (ql2xenforce_iocb_limit == 2) {
+ atomic_add(iores->iocb_cnt, &ha->fwres.iocb_used);
+ atomic_add(iores->exch_cnt, &ha->fwres.exch_used);
+ iores->res_type |= RESOURCE_HA;
+ }
return 0;
}
+/*
+ * decrement to zero. This routine will not decrement below zero
+ * @v: pointer of type atomic_t
+ * @amount: amount to decrement from v
+ */
+static void qla_atomic_dtz(atomic_t *v, int amount)
+{
+ int c, old, dec;
+
+ c = atomic_read(v);
+ for (;;) {
+ dec = c - amount;
+ if (unlikely(dec < 0))
+ dec = 0;
+
+ old = atomic_cmpxchg((v), c, dec);
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+}
+
static inline void
qla_put_fw_resources(struct qla_qpair *qp, struct iocb_resource *iores)
{
+ struct qla_hw_data *ha = qp->hw;
+
+ if (iores->res_type & RESOURCE_HA) {
+ if (iores->res_type & RESOURCE_IOCB)
+ qla_atomic_dtz(&ha->fwres.iocb_used, iores->iocb_cnt);
+
+ if (iores->res_type & RESOURCE_EXCH)
+ qla_atomic_dtz(&ha->fwres.exch_used, iores->exch_cnt);
+ }
+
if (iores->res_type & RESOURCE_IOCB) {
if (qp->fwres.iocbs_used >= iores->iocb_cnt) {
qp->fwres.iocbs_used -= iores->iocb_cnt;
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 1ee9b7d5fc15..df90169f8244 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -11,6 +11,7 @@
#include <scsi/scsi_tcq.h>
+static int qla_start_scsi_type6(srb_t *sp);
/**
* qla2x00_get_cmd_direction() - Determine control_flag data direction.
* @sp: SCSI command
@@ -590,8 +591,6 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
uint16_t tot_dsds)
{
struct dsd64 *cur_dsd = NULL, *next_dsd;
- scsi_qla_host_t *vha;
- struct qla_hw_data *ha;
struct scsi_cmnd *cmd;
struct scatterlist *cur_seg;
uint8_t avail_dsds;
@@ -613,9 +612,6 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
return 0;
}
- vha = sp->vha;
- ha = vha->hw;
-
/* Set transfer direction */
if (cmd->sc_data_direction == DMA_TO_DEVICE) {
cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
@@ -636,14 +632,13 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
tot_dsds -= avail_dsds;
dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
- dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
- struct dsd_dma, list);
+ dsd_ptr = list_first_entry(&qpair->dsd_list, struct dsd_dma, list);
next_dsd = dsd_ptr->dsd_addr;
list_del(&dsd_ptr->list);
- ha->gbl_dsd_avail--;
+ qpair->dsd_avail--;
list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
ctx->dsd_use_cnt++;
- ha->gbl_dsd_inuse++;
+ qpair->dsd_inuse++;
if (first_iocb) {
first_iocb = 0;
@@ -1722,6 +1717,8 @@ qla24xx_dif_start_scsi(srb_t *sp)
if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
if (cmd->cmd_len <= 16)
return qla24xx_start_scsi(sp);
+ else
+ return qla_start_scsi_type6(sp);
}
/* Setup device pointers. */
@@ -2101,6 +2098,8 @@ qla2xxx_dif_start_scsi_mq(srb_t *sp)
if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
if (cmd->cmd_len <= 16)
return qla2xxx_start_scsi_mq(sp);
+ else
+ return qla_start_scsi_type6(sp);
}
spin_lock_irqsave(&qpair->qp_lock, flags);
@@ -3368,6 +3367,7 @@ qla82xx_start_scsi(srb_t *sp)
struct qla_hw_data *ha = vha->hw;
struct req_que *req = NULL;
struct rsp_que *rsp = NULL;
+ struct qla_qpair *qpair = sp->qpair;
/* Setup device pointers. */
reg = &ha->iobase->isp82;
@@ -3416,18 +3416,18 @@ qla82xx_start_scsi(srb_t *sp)
uint16_t i;
more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
- if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
+ if ((more_dsd_lists + qpair->dsd_inuse) >= NUM_DSD_CHAIN) {
ql_dbg(ql_dbg_io, vha, 0x300d,
"Num of DSD list %d is than %d for cmd=%p.\n",
- more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
+ more_dsd_lists + qpair->dsd_inuse, NUM_DSD_CHAIN,
cmd);
goto queuing_error;
}
- if (more_dsd_lists <= ha->gbl_dsd_avail)
+ if (more_dsd_lists <= qpair->dsd_avail)
goto sufficient_dsds;
else
- more_dsd_lists -= ha->gbl_dsd_avail;
+ more_dsd_lists -= qpair->dsd_avail;
for (i = 0; i < more_dsd_lists; i++) {
dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
@@ -3447,8 +3447,8 @@ qla82xx_start_scsi(srb_t *sp)
"for cmd=%p.\n", cmd);
goto queuing_error;
}
- list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
- ha->gbl_dsd_avail++;
+ list_add_tail(&dsd_ptr->list, &qpair->dsd_list);
+ qpair->dsd_avail++;
}
sufficient_dsds:
@@ -3767,21 +3767,28 @@ qla_nvme_ls(srb_t *sp, struct pt_ls4_request *cmd_pkt)
nvme = &sp->u.iocb_cmd;
cmd_pkt->entry_type = PT_LS4_REQUEST;
cmd_pkt->entry_count = 1;
- cmd_pkt->control_flags = cpu_to_le16(CF_LS4_ORIGINATOR << CF_LS4_SHIFT);
-
cmd_pkt->timeout = cpu_to_le16(nvme->u.nvme.timeout_sec);
- cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
+ if (sp->unsol_rsp) {
+ cmd_pkt->control_flags =
+ cpu_to_le16(CF_LS4_RESPONDER << CF_LS4_SHIFT);
+ cmd_pkt->nport_handle = nvme->u.nvme.nport_handle;
+ cmd_pkt->exchange_address = nvme->u.nvme.exchange_address;
+ } else {
+ cmd_pkt->control_flags =
+ cpu_to_le16(CF_LS4_ORIGINATOR << CF_LS4_SHIFT);
+ cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+ cmd_pkt->rx_dseg_count = cpu_to_le16(1);
+ cmd_pkt->rx_byte_count = nvme->u.nvme.rsp_len;
+ cmd_pkt->dsd[1].length = nvme->u.nvme.rsp_len;
+ put_unaligned_le64(nvme->u.nvme.rsp_dma, &cmd_pkt->dsd[1].address);
+ }
+
cmd_pkt->tx_dseg_count = cpu_to_le16(1);
- cmd_pkt->tx_byte_count = cpu_to_le32(nvme->u.nvme.cmd_len);
- cmd_pkt->dsd[0].length = cpu_to_le32(nvme->u.nvme.cmd_len);
+ cmd_pkt->tx_byte_count = nvme->u.nvme.cmd_len;
+ cmd_pkt->dsd[0].length = nvme->u.nvme.cmd_len;
put_unaligned_le64(nvme->u.nvme.cmd_dma, &cmd_pkt->dsd[0].address);
-
- cmd_pkt->rx_dseg_count = cpu_to_le16(1);
- cmd_pkt->rx_byte_count = cpu_to_le32(nvme->u.nvme.rsp_len);
- cmd_pkt->dsd[1].length = cpu_to_le32(nvme->u.nvme.rsp_len);
- put_unaligned_le64(nvme->u.nvme.rsp_dma, &cmd_pkt->dsd[1].address);
}
static void
@@ -4198,3 +4205,267 @@ queuing_error:
return rval;
}
+
+/**
+ * qla_start_scsi_type6() - Send a SCSI command to the ISP
+ * @sp: command to send to the ISP
+ *
+ * Returns non-zero if a failure occurred, else zero.
+ */
+static int
+qla_start_scsi_type6(srb_t *sp)
+{
+ int nseg;
+ unsigned long flags;
+ uint32_t *clr_ptr;
+ uint32_t handle;
+ struct cmd_type_6 *cmd_pkt;
+ uint16_t cnt;
+ uint16_t req_cnt;
+ uint16_t tot_dsds;
+ struct req_que *req = NULL;
+ struct rsp_que *rsp;
+ struct scsi_cmnd *cmd = GET_CMD_SP(sp);
+ struct scsi_qla_host *vha = sp->fcport->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_qpair *qpair = sp->qpair;
+ uint16_t more_dsd_lists = 0;
+ struct dsd_dma *dsd_ptr;
+ uint16_t i;
+ __be32 *fcp_dl;
+ uint8_t additional_cdb_len;
+ struct ct6_dsd *ctx;
+
+ /* Acquire qpair specific lock */
+ spin_lock_irqsave(&qpair->qp_lock, flags);
+
+ /* Setup qpair pointers */
+ req = qpair->req;
+ rsp = qpair->rsp;
+
+ /* So we know we haven't pci_map'ed anything yet */
+ tot_dsds = 0;
+
+ /* Send marker if required */
+ if (vha->marker_needed != 0) {
+ if (__qla2x00_marker(vha, qpair, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
+ spin_unlock_irqrestore(&qpair->qp_lock, flags);
+ return QLA_FUNCTION_FAILED;
+ }
+ vha->marker_needed = 0;
+ }
+
+ handle = qla2xxx_get_next_handle(req);
+ if (handle == 0)
+ goto queuing_error;
+
+ /* Map the sg table so we have an accurate count of sg entries needed */
+ if (scsi_sg_count(cmd)) {
+ nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
+ scsi_sg_count(cmd), cmd->sc_data_direction);
+ if (unlikely(!nseg))
+ goto queuing_error;
+ } else {
+ nseg = 0;
+ }
+
+ tot_dsds = nseg;
+
+ /* eventhough driver only need 1 T6 IOCB, FW still convert DSD to Continueation IOCB */
+ req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
+
+ sp->iores.res_type = RESOURCE_IOCB | RESOURCE_EXCH;
+ sp->iores.exch_cnt = 1;
+ sp->iores.iocb_cnt = req_cnt;
+
+ if (qla_get_fw_resources(sp->qpair, &sp->iores))
+ goto queuing_error;
+
+ more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
+ if ((more_dsd_lists + qpair->dsd_inuse) >= NUM_DSD_CHAIN) {
+ ql_dbg(ql_dbg_io, vha, 0x3028,
+ "Num of DSD list %d is than %d for cmd=%p.\n",
+ more_dsd_lists + qpair->dsd_inuse, NUM_DSD_CHAIN, cmd);
+ goto queuing_error;
+ }
+
+ if (more_dsd_lists <= qpair->dsd_avail)
+ goto sufficient_dsds;
+ else
+ more_dsd_lists -= qpair->dsd_avail;
+
+ for (i = 0; i < more_dsd_lists; i++) {
+ dsd_ptr = kzalloc(sizeof(*dsd_ptr), GFP_ATOMIC);
+ if (!dsd_ptr) {
+ ql_log(ql_log_fatal, vha, 0x3029,
+ "Failed to allocate memory for dsd_dma for cmd=%p.\n", cmd);
+ goto queuing_error;
+ }
+ INIT_LIST_HEAD(&dsd_ptr->list);
+
+ dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
+ GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
+ if (!dsd_ptr->dsd_addr) {
+ kfree(dsd_ptr);
+ ql_log(ql_log_fatal, vha, 0x302a,
+ "Failed to allocate memory for dsd_addr for cmd=%p.\n", cmd);
+ goto queuing_error;
+ }
+ list_add_tail(&dsd_ptr->list, &qpair->dsd_list);
+ qpair->dsd_avail++;
+ }
+
+sufficient_dsds:
+ req_cnt = 1;
+
+ if (req->cnt < (req_cnt + 2)) {
+ if (IS_SHADOW_REG_CAPABLE(ha)) {
+ cnt = *req->out_ptr;
+ } else {
+ cnt = (uint16_t)rd_reg_dword_relaxed(req->req_q_out);
+ if (qla2x00_check_reg16_for_disconnect(vha, cnt))
+ goto queuing_error;
+ }
+
+ if (req->ring_index < cnt)
+ req->cnt = cnt - req->ring_index;
+ else
+ req->cnt = req->length - (req->ring_index - cnt);
+ if (req->cnt < (req_cnt + 2))
+ goto queuing_error;
+ }
+
+ ctx = &sp->u.scmd.ct6_ctx;
+
+ memset(ctx, 0, sizeof(struct ct6_dsd));
+ ctx->fcp_cmnd = dma_pool_zalloc(ha->fcp_cmnd_dma_pool,
+ GFP_ATOMIC, &ctx->fcp_cmnd_dma);
+ if (!ctx->fcp_cmnd) {
+ ql_log(ql_log_fatal, vha, 0x3031,
+ "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
+ goto queuing_error;
+ }
+
+ /* Initialize the DSD list and dma handle */
+ INIT_LIST_HEAD(&ctx->dsd_list);
+ ctx->dsd_use_cnt = 0;
+
+ if (cmd->cmd_len > 16) {
+ additional_cdb_len = cmd->cmd_len - 16;
+ if (cmd->cmd_len % 4 ||
+ cmd->cmd_len > QLA_CDB_BUF_SIZE) {
+ /*
+ * SCSI command bigger than 16 bytes must be
+ * multiple of 4 or too big.
+ */
+ ql_log(ql_log_warn, vha, 0x3033,
+ "scsi cmd len %d not multiple of 4 for cmd=%p.\n",
+ cmd->cmd_len, cmd);
+ goto queuing_error_fcp_cmnd;
+ }
+ ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
+ } else {
+ additional_cdb_len = 0;
+ ctx->fcp_cmnd_len = 12 + 16 + 4;
+ }
+
+ /* Build command packet. */
+ req->current_outstanding_cmd = handle;
+ req->outstanding_cmds[handle] = sp;
+ sp->handle = handle;
+ cmd->host_scribble = (unsigned char *)(unsigned long)handle;
+ req->cnt -= req_cnt;
+
+ cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
+ cmd_pkt->handle = make_handle(req->id, handle);
+
+ /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
+ clr_ptr = (uint32_t *)cmd_pkt + 2;
+ memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
+ cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
+
+ /* Set NPORT-ID and LUN number */
+ cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+ cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
+ cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
+ cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
+ cmd_pkt->vp_index = sp->vha->vp_idx;
+
+ /* Build IOCB segments */
+ qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds);
+
+ int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
+ host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
+
+ /* build FCP_CMND IU */
+ int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
+ ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
+
+ if (cmd->sc_data_direction == DMA_TO_DEVICE)
+ ctx->fcp_cmnd->additional_cdb_len |= 1;
+ else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
+ ctx->fcp_cmnd->additional_cdb_len |= 2;
+
+ /* Populate the FCP_PRIO. */
+ if (ha->flags.fcp_prio_enabled)
+ ctx->fcp_cmnd->task_attribute |=
+ sp->fcport->fcp_prio << 3;
+
+ memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
+
+ fcp_dl = (__be32 *)(ctx->fcp_cmnd->cdb + 16 +
+ additional_cdb_len);
+ *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
+
+ cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
+ put_unaligned_le64(ctx->fcp_cmnd_dma,
+ &cmd_pkt->fcp_cmnd_dseg_address);
+
+ sp->flags |= SRB_FCP_CMND_DMA_VALID;
+ cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
+ /* Set total data segment count. */
+ cmd_pkt->entry_count = (uint8_t)req_cnt;
+
+ wmb();
+ /* Adjust ring index. */
+ req->ring_index++;
+ if (req->ring_index == req->length) {
+ req->ring_index = 0;
+ req->ring_ptr = req->ring;
+ } else {
+ req->ring_ptr++;
+ }
+
+ sp->qpair->cmd_cnt++;
+ sp->flags |= SRB_DMA_VALID;
+
+ /* Set chip new ring index. */
+ wrt_reg_dword(req->req_q_in, req->ring_index);
+
+ /* Manage unprocessed RIO/ZIO commands in response queue. */
+ if (vha->flags.process_response_queue &&
+ rsp->ring_ptr->signature != RESPONSE_PROCESSED)
+ qla24xx_process_response_queue(vha, rsp);
+
+ spin_unlock_irqrestore(&qpair->qp_lock, flags);
+
+ return QLA_SUCCESS;
+
+queuing_error_fcp_cmnd:
+ dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
+
+queuing_error:
+ if (tot_dsds)
+ scsi_dma_unmap(cmd);
+
+ qla_put_fw_resources(sp->qpair, &sp->iores);
+
+ if (sp->u.scmd.crc_ctx) {
+ mempool_free(sp->u.scmd.crc_ctx, ha->ctx_mempool);
+ sp->u.scmd.crc_ctx = NULL;
+ }
+
+ spin_unlock_irqrestore(&qpair->qp_lock, flags);
+
+ return QLA_FUNCTION_FAILED;
+}
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 1f42a413b598..e98788191897 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -56,6 +56,22 @@ const char *const port_state_str[] = {
[FCS_ONLINE] = "ONLINE"
};
+#define SFP_DISABLE_LASER_INITIATED 0x15 /* Sub code of 8070 AEN */
+#define SFP_ENABLE_LASER_INITIATED 0x16 /* Sub code of 8070 AEN */
+
+static inline void display_Laser_info(scsi_qla_host_t *vha,
+ u16 mb1, u16 mb2, u16 mb3) {
+
+ if (mb1 == SFP_DISABLE_LASER_INITIATED)
+ ql_log(ql_log_warn, vha, 0xf0a2,
+ "SFP temperature (%d C) reached/exceeded the threshold (%d C). Laser is disabled.\n",
+ mb3, mb2);
+ if (mb1 == SFP_ENABLE_LASER_INITIATED)
+ ql_log(ql_log_warn, vha, 0xf0a3,
+ "SFP temperature (%d C) reached normal operating level. Laser is enabled.\n",
+ mb3);
+}
+
static void
qla24xx_process_abts(struct scsi_qla_host *vha, struct purex_item *pkt)
{
@@ -823,6 +839,135 @@ qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
}
}
+/**
+ * qla27xx_copy_multiple_pkt() - Copy over purex/purls packets that can
+ * span over multiple IOCBs.
+ * @vha: SCSI driver HA context
+ * @pkt: ELS packet
+ * @rsp: Response queue
+ * @is_purls: True, for Unsolicited Received FC-NVMe LS rsp IOCB
+ * false, for Unsolicited Received ELS IOCB
+ * @byte_order: True, to change the byte ordering of iocb payload
+ */
+struct purex_item *
+qla27xx_copy_multiple_pkt(struct scsi_qla_host *vha, void **pkt,
+ struct rsp_que **rsp, bool is_purls,
+ bool byte_order)
+{
+ struct purex_entry_24xx *purex = NULL;
+ struct pt_ls4_rx_unsol *purls = NULL;
+ struct rsp_que *rsp_q = *rsp;
+ sts_cont_entry_t *new_pkt;
+ uint16_t no_bytes = 0, total_bytes = 0, pending_bytes = 0;
+ uint16_t buffer_copy_offset = 0, payload_size = 0;
+ uint16_t entry_count, entry_count_remaining;
+ struct purex_item *item;
+ void *iocb_pkt = NULL;
+
+ if (is_purls) {
+ purls = *pkt;
+ total_bytes = (le16_to_cpu(purls->frame_size) & 0x0FFF) -
+ PURX_ELS_HEADER_SIZE;
+ entry_count = entry_count_remaining = purls->entry_count;
+ payload_size = sizeof(purls->payload);
+ } else {
+ purex = *pkt;
+ total_bytes = (le16_to_cpu(purex->frame_size) & 0x0FFF) -
+ PURX_ELS_HEADER_SIZE;
+ entry_count = entry_count_remaining = purex->entry_count;
+ payload_size = sizeof(purex->els_frame_payload);
+ }
+
+ pending_bytes = total_bytes;
+ no_bytes = (pending_bytes > payload_size) ? payload_size :
+ pending_bytes;
+ ql_dbg(ql_dbg_async, vha, 0x509a,
+ "%s LS, frame_size 0x%x, entry count %d\n",
+ (is_purls ? "PURLS" : "FPIN"), total_bytes, entry_count);
+
+ item = qla24xx_alloc_purex_item(vha, total_bytes);
+ if (!item)
+ return item;
+
+ iocb_pkt = &item->iocb;
+
+ if (is_purls)
+ memcpy(iocb_pkt, &purls->payload[0], no_bytes);
+ else
+ memcpy(iocb_pkt, &purex->els_frame_payload[0], no_bytes);
+ buffer_copy_offset += no_bytes;
+ pending_bytes -= no_bytes;
+ --entry_count_remaining;
+
+ if (is_purls)
+ ((response_t *)purls)->signature = RESPONSE_PROCESSED;
+ else
+ ((response_t *)purex)->signature = RESPONSE_PROCESSED;
+ wmb();
+
+ do {
+ while ((total_bytes > 0) && (entry_count_remaining > 0)) {
+ if (rsp_q->ring_ptr->signature == RESPONSE_PROCESSED) {
+ ql_dbg(ql_dbg_async, vha, 0x5084,
+ "Ran out of IOCBs, partial data 0x%x\n",
+ buffer_copy_offset);
+ cpu_relax();
+ continue;
+ }
+
+ new_pkt = (sts_cont_entry_t *)rsp_q->ring_ptr;
+ *pkt = new_pkt;
+
+ if (new_pkt->entry_type != STATUS_CONT_TYPE) {
+ ql_log(ql_log_warn, vha, 0x507a,
+ "Unexpected IOCB type, partial data 0x%x\n",
+ buffer_copy_offset);
+ break;
+ }
+
+ rsp_q->ring_index++;
+ if (rsp_q->ring_index == rsp_q->length) {
+ rsp_q->ring_index = 0;
+ rsp_q->ring_ptr = rsp_q->ring;
+ } else {
+ rsp_q->ring_ptr++;
+ }
+ no_bytes = (pending_bytes > sizeof(new_pkt->data)) ?
+ sizeof(new_pkt->data) : pending_bytes;
+ if ((buffer_copy_offset + no_bytes) <= total_bytes) {
+ memcpy(((uint8_t *)iocb_pkt + buffer_copy_offset),
+ new_pkt->data, no_bytes);
+ buffer_copy_offset += no_bytes;
+ pending_bytes -= no_bytes;
+ --entry_count_remaining;
+ } else {
+ ql_log(ql_log_warn, vha, 0x5044,
+ "Attempt to copy more that we got, optimizing..%x\n",
+ buffer_copy_offset);
+ memcpy(((uint8_t *)iocb_pkt + buffer_copy_offset),
+ new_pkt->data,
+ total_bytes - buffer_copy_offset);
+ }
+
+ ((response_t *)new_pkt)->signature = RESPONSE_PROCESSED;
+ wmb();
+ }
+
+ if (pending_bytes != 0 || entry_count_remaining != 0) {
+ ql_log(ql_log_fatal, vha, 0x508b,
+ "Dropping partial FPIN, underrun bytes = 0x%x, entry cnts 0x%x\n",
+ total_bytes, entry_count_remaining);
+ qla24xx_free_purex_item(item);
+ return NULL;
+ }
+ } while (entry_count_remaining > 0);
+
+ if (byte_order)
+ host_to_fcp_swap((uint8_t *)&item->iocb, total_bytes);
+
+ return item;
+}
+
int
qla2x00_is_a_vp_did(scsi_qla_host_t *vha, uint32_t rscn_entry)
{
@@ -958,7 +1103,7 @@ initialize_purex_header:
return item;
}
-static void
+void
qla24xx_queue_purex_item(scsi_qla_host_t *vha, struct purex_item *pkt,
void (*process_item)(struct scsi_qla_host *vha,
struct purex_item *pkt))
@@ -1798,6 +1943,8 @@ global_port_update:
break;
case MBA_TEMPERATURE_ALERT:
+ if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
+ display_Laser_info(vha, mb[1], mb[2], mb[3]);
ql_dbg(ql_dbg_async, vha, 0x505e,
"TEMPERATURE ALERT: %04x %04x %04x\n", mb[1], mb[2], mb[3]);
break;
@@ -3811,6 +3958,7 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
struct qla_hw_data *ha = vha->hw;
struct purex_entry_24xx *purex_entry;
struct purex_item *pure_item;
+ struct pt_ls4_rx_unsol *p;
u16 rsp_in = 0, cur_ring_index;
int is_shadow_hba;
@@ -3983,7 +4131,19 @@ process_err:
qla28xx_sa_update_iocb_entry(vha, rsp->req,
(struct sa_update_28xx *)pkt);
break;
-
+ case PT_LS4_UNSOL:
+ p = (void *)pkt;
+ if (qla_chk_cont_iocb_avail(vha, rsp, (response_t *)pkt, rsp_in)) {
+ rsp->ring_ptr = (response_t *)pkt;
+ rsp->ring_index = cur_ring_index;
+
+ ql_dbg(ql_dbg_init, vha, 0x2124,
+ "Defer processing UNSOL LS req opcode %#x...\n",
+ p->payload[0]);
+ return;
+ }
+ qla2xxx_process_purls_iocb((void **)&pkt, &rsp);
+ break;
default:
/* Type Not Supported. */
ql_dbg(ql_dbg_async, vha, 0x5042,
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index b05f93037875..21ec32b4fb28 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -273,7 +273,6 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
spin_unlock_irqrestore(&ha->hardware_lock, flags);
wait_time = jiffies;
- atomic_inc(&ha->num_pend_mbx_stage3);
if (!wait_for_completion_timeout(&ha->mbx_intr_comp,
mcp->tov * HZ)) {
ql_dbg(ql_dbg_mbx, vha, 0x117a,
@@ -290,7 +289,6 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
spin_unlock_irqrestore(&ha->hardware_lock,
flags);
atomic_dec(&ha->num_pend_mbx_stage2);
- atomic_dec(&ha->num_pend_mbx_stage3);
rval = QLA_ABORTED;
goto premature_exit;
}
@@ -302,11 +300,9 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
ha->flags.mbox_busy = 0;
spin_unlock_irqrestore(&ha->hardware_lock, flags);
atomic_dec(&ha->num_pend_mbx_stage2);
- atomic_dec(&ha->num_pend_mbx_stage3);
rval = QLA_ABORTED;
goto premature_exit;
}
- atomic_dec(&ha->num_pend_mbx_stage3);
if (time_after(jiffies, wait_time + 5 * HZ))
ql_log(ql_log_warn, vha, 0x1015, "cmd=0x%x, waited %d msecs\n",
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
index 9941b38eac93..db753d712991 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.c
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
@@ -12,6 +12,26 @@
#include <linux/blk-mq.h>
static struct nvme_fc_port_template qla_nvme_fc_transport;
+static int qla_nvme_ls_reject_iocb(struct scsi_qla_host *vha,
+ struct qla_qpair *qp,
+ struct qla_nvme_lsrjt_pt_arg *a,
+ bool is_xchg_terminate);
+
+struct qla_nvme_unsol_ctx {
+ struct list_head elem;
+ struct scsi_qla_host *vha;
+ struct fc_port *fcport;
+ struct srb *sp;
+ struct nvmefc_ls_rsp lsrsp;
+ struct nvmefc_ls_rsp *fd_rsp;
+ struct work_struct lsrsp_work;
+ struct work_struct abort_work;
+ __le32 exchange_address;
+ __le16 nport_handle;
+ __le16 ox_id;
+ int comp_status;
+ spinlock_t cmd_lock;
+};
int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
{
@@ -216,6 +236,55 @@ static void qla_nvme_sp_ls_done(srb_t *sp, int res)
schedule_work(&priv->ls_work);
}
+static void qla_nvme_release_lsrsp_cmd_kref(struct kref *kref)
+{
+ struct srb *sp = container_of(kref, struct srb, cmd_kref);
+ struct qla_nvme_unsol_ctx *uctx = sp->priv;
+ struct nvmefc_ls_rsp *fd_rsp;
+ unsigned long flags;
+
+ if (!uctx) {
+ qla2x00_rel_sp(sp);
+ return;
+ }
+
+ spin_lock_irqsave(&uctx->cmd_lock, flags);
+ uctx->sp = NULL;
+ sp->priv = NULL;
+ spin_unlock_irqrestore(&uctx->cmd_lock, flags);
+
+ fd_rsp = uctx->fd_rsp;
+
+ list_del(&uctx->elem);
+
+ fd_rsp->done(fd_rsp);
+ kfree(uctx);
+ qla2x00_rel_sp(sp);
+}
+
+static void qla_nvme_lsrsp_complete(struct work_struct *work)
+{
+ struct qla_nvme_unsol_ctx *uctx =
+ container_of(work, struct qla_nvme_unsol_ctx, lsrsp_work);
+
+ kref_put(&uctx->sp->cmd_kref, qla_nvme_release_lsrsp_cmd_kref);
+}
+
+static void qla_nvme_sp_lsrsp_done(srb_t *sp, int res)
+{
+ struct qla_nvme_unsol_ctx *uctx = sp->priv;
+
+ if (WARN_ON_ONCE(kref_read(&sp->cmd_kref) == 0))
+ return;
+
+ if (res)
+ res = -EINVAL;
+
+ uctx->comp_status = res;
+ INIT_WORK(&uctx->lsrsp_work, qla_nvme_lsrsp_complete);
+ schedule_work(&uctx->lsrsp_work);
+}
+
/* it assumed that QPair lock is held. */
static void qla_nvme_sp_done(srb_t *sp, int res)
{
@@ -288,6 +357,92 @@ out:
kref_put(&sp->cmd_kref, sp->put_fn);
}
+static int qla_nvme_xmt_ls_rsp(struct nvme_fc_local_port *lport,
+ struct nvme_fc_remote_port *rport,
+ struct nvmefc_ls_rsp *fd_resp)
+{
+ struct qla_nvme_unsol_ctx *uctx = container_of(fd_resp,
+ struct qla_nvme_unsol_ctx, lsrsp);
+ struct qla_nvme_rport *qla_rport = rport->private;
+ fc_port_t *fcport = qla_rport->fcport;
+ struct scsi_qla_host *vha = uctx->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_nvme_lsrjt_pt_arg a;
+ struct srb_iocb *nvme;
+ srb_t *sp;
+ int rval = QLA_FUNCTION_FAILED;
+ uint8_t cnt = 0;
+
+ if (!fcport || fcport->deleted)
+ goto out;
+
+ if (!ha->flags.fw_started)
+ goto out;
+
+ /* Alloc SRB structure */
+ sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
+ if (!sp)
+ goto out;
+
+ sp->type = SRB_NVME_LS;
+ sp->name = "nvme_ls";
+ sp->done = qla_nvme_sp_lsrsp_done;
+ sp->put_fn = qla_nvme_release_lsrsp_cmd_kref;
+ sp->priv = (void *)uctx;
+ sp->unsol_rsp = 1;
+ uctx->sp = sp;
+ spin_lock_init(&uctx->cmd_lock);
+ nvme = &sp->u.iocb_cmd;
+ uctx->fd_rsp = fd_resp;
+ nvme->u.nvme.desc = fd_resp;
+ nvme->u.nvme.dir = 0;
+ nvme->u.nvme.dl = 0;
+ nvme->u.nvme.timeout_sec = 0;
+ nvme->u.nvme.cmd_dma = fd_resp->rspdma;
+ nvme->u.nvme.cmd_len = fd_resp->rsplen;
+ nvme->u.nvme.rsp_len = 0;
+ nvme->u.nvme.rsp_dma = 0;
+ nvme->u.nvme.exchange_address = uctx->exchange_address;
+ nvme->u.nvme.nport_handle = uctx->nport_handle;
+ nvme->u.nvme.ox_id = uctx->ox_id;
+ dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
+ le32_to_cpu(fd_resp->rsplen), DMA_TO_DEVICE);
+
+ ql_dbg(ql_dbg_unsol, vha, 0x2122,
+ "Unsol lsreq portid=%06x %8phC exchange_address 0x%x ox_id 0x%x hdl 0x%x\n",
+ fcport->d_id.b24, fcport->port_name, uctx->exchange_address,
+ uctx->ox_id, uctx->nport_handle);
+retry:
+ rval = qla2x00_start_sp(sp);
+ switch (rval) {
+ case QLA_SUCCESS:
+ break;
+ case EAGAIN:
+ msleep(PURLS_MSLEEP_INTERVAL);
+ cnt++;
+ if (cnt < PURLS_RETRY_COUNT)
+ goto retry;
+
+ fallthrough;
+ default:
+ ql_dbg(ql_log_warn, vha, 0x2123,
+ "Failed to xmit Unsol ls response = %d\n", rval);
+ rval = -EIO;
+ qla2x00_rel_sp(sp);
+ goto out;
+ }
+
+ return 0;
+out:
+ memset((void *)&a, 0, sizeof(a));
+ a.vp_idx = vha->vp_idx;
+ a.nport_handle = uctx->nport_handle;
+ a.xchg_address = uctx->exchange_address;
+ qla_nvme_ls_reject_iocb(vha, ha->base_qpair, &a, true);
+ kfree(uctx);
+ return rval;
+}
+
static void qla_nvme_ls_abort(struct nvme_fc_local_port *lport,
struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
{
@@ -355,7 +510,7 @@ static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
nvme->u.nvme.timeout_sec = fd->timeout;
nvme->u.nvme.cmd_dma = fd->rqstdma;
dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
- fd->rqstlen, DMA_TO_DEVICE);
+ le32_to_cpu(fd->rqstlen), DMA_TO_DEVICE);
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
@@ -720,6 +875,7 @@ static struct nvme_fc_port_template qla_nvme_fc_transport = {
.ls_abort = qla_nvme_ls_abort,
.fcp_io = qla_nvme_post_cmd,
.fcp_abort = qla_nvme_fcp_abort,
+ .xmt_ls_rsp = qla_nvme_xmt_ls_rsp,
.map_queues = qla_nvme_map_queues,
.max_hw_queues = DEF_NVME_HW_QUEUES,
.max_sgl_segments = 1024,
@@ -924,3 +1080,247 @@ inline void qla_wait_nvme_release_cmd_kref(srb_t *orig_sp)
return;
kref_put(&orig_sp->cmd_kref, orig_sp->put_fn);
}
+
+static void qla_nvme_fc_format_rjt(void *buf, u8 ls_cmd, u8 reason,
+ u8 explanation, u8 vendor)
+{
+ struct fcnvme_ls_rjt *rjt = buf;
+
+ rjt->w0.ls_cmd = FCNVME_LSDESC_RQST;
+ rjt->desc_list_len = fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_rjt));
+ rjt->rqst.desc_tag = cpu_to_be32(FCNVME_LSDESC_RQST);
+ rjt->rqst.desc_len =
+ fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst));
+ rjt->rqst.w0.ls_cmd = ls_cmd;
+ rjt->rjt.desc_tag = cpu_to_be32(FCNVME_LSDESC_RJT);
+ rjt->rjt.desc_len = fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rjt));
+ rjt->rjt.reason_code = reason;
+ rjt->rjt.reason_explanation = explanation;
+ rjt->rjt.vendor = vendor;
+}
+
+static void qla_nvme_lsrjt_pt_iocb(struct scsi_qla_host *vha,
+ struct pt_ls4_request *lsrjt_iocb,
+ struct qla_nvme_lsrjt_pt_arg *a)
+{
+ lsrjt_iocb->entry_type = PT_LS4_REQUEST;
+ lsrjt_iocb->entry_count = 1;
+ lsrjt_iocb->sys_define = 0;
+ lsrjt_iocb->entry_status = 0;
+ lsrjt_iocb->handle = QLA_SKIP_HANDLE;
+ lsrjt_iocb->nport_handle = a->nport_handle;
+ lsrjt_iocb->exchange_address = a->xchg_address;
+ lsrjt_iocb->vp_index = a->vp_idx;
+
+ lsrjt_iocb->control_flags = cpu_to_le16(a->control_flags);
+
+ put_unaligned_le64(a->tx_addr, &lsrjt_iocb->dsd[0].address);
+ lsrjt_iocb->dsd[0].length = cpu_to_le32(a->tx_byte_count);
+ lsrjt_iocb->tx_dseg_count = cpu_to_le16(1);
+ lsrjt_iocb->tx_byte_count = cpu_to_le32(a->tx_byte_count);
+
+ put_unaligned_le64(a->rx_addr, &lsrjt_iocb->dsd[1].address);
+ lsrjt_iocb->dsd[1].length = 0;
+ lsrjt_iocb->rx_dseg_count = 0;
+ lsrjt_iocb->rx_byte_count = 0;
+}
+
+static int
+qla_nvme_ls_reject_iocb(struct scsi_qla_host *vha, struct qla_qpair *qp,
+ struct qla_nvme_lsrjt_pt_arg *a, bool is_xchg_terminate)
+{
+ struct pt_ls4_request *lsrjt_iocb;
+
+ lsrjt_iocb = __qla2x00_alloc_iocbs(qp, NULL);
+ if (!lsrjt_iocb) {
+ ql_log(ql_log_warn, vha, 0x210e,
+ "qla2x00_alloc_iocbs failed.\n");
+ return QLA_FUNCTION_FAILED;
+ }
+
+ if (!is_xchg_terminate) {
+ qla_nvme_fc_format_rjt((void *)vha->hw->lsrjt.c, a->opcode,
+ a->reason, a->explanation, 0);
+
+ a->tx_byte_count = sizeof(struct fcnvme_ls_rjt);
+ a->tx_addr = vha->hw->lsrjt.cdma;
+ a->control_flags = CF_LS4_RESPONDER << CF_LS4_SHIFT;
+
+ ql_dbg(ql_dbg_unsol, vha, 0x211f,
+ "Sending nvme fc ls reject ox_id %04x op %04x\n",
+ a->ox_id, a->opcode);
+ ql_dump_buffer(ql_dbg_unsol + ql_dbg_verbose, vha, 0x210f,
+ vha->hw->lsrjt.c, sizeof(*vha->hw->lsrjt.c));
+ } else {
+ a->tx_byte_count = 0;
+ a->control_flags = CF_LS4_RESPONDER_TERM << CF_LS4_SHIFT;
+ ql_dbg(ql_dbg_unsol, vha, 0x2110,
+ "Terminate nvme ls xchg 0x%x\n", a->xchg_address);
+ }
+
+ qla_nvme_lsrjt_pt_iocb(vha, lsrjt_iocb, a);
+ /* flush iocb to mem before notifying hw doorbell */
+ wmb();
+ qla2x00_start_iocbs(vha, qp->req);
+ return 0;
+}
+
+/*
+ * qla2xxx_process_purls_pkt() - Pass-up Unsolicited
+ * Received FC-NVMe Link Service pkt to nvme_fc_rcv_ls_req().
+ * LLDD need to provide memory for response buffer, which
+ * will be used to reference the exchange corresponding
+ * to the LS when issuing an ls response. LLDD will have to free
+ * response buffer in lport->ops->xmt_ls_rsp().
+ *
+ * @vha: SCSI qla host
+ * @item: ptr to purex_item
+ */
+static void
+qla2xxx_process_purls_pkt(struct scsi_qla_host *vha, struct purex_item *item)
+{
+ struct qla_nvme_unsol_ctx *uctx = item->purls_context;
+ struct qla_nvme_lsrjt_pt_arg a;
+ int ret = 1;
+
+#if (IS_ENABLED(CONFIG_NVME_FC))
+ ret = nvme_fc_rcv_ls_req(uctx->fcport->nvme_remote_port, &uctx->lsrsp,
+ &item->iocb, item->size);
+#endif
+ if (ret) {
+ ql_dbg(ql_dbg_unsol, vha, 0x2125, "NVMe transport ls_req failed\n");
+ memset((void *)&a, 0, sizeof(a));
+ a.vp_idx = vha->vp_idx;
+ a.nport_handle = uctx->nport_handle;
+ a.xchg_address = uctx->exchange_address;
+ qla_nvme_ls_reject_iocb(vha, vha->hw->base_qpair, &a, true);
+ list_del(&uctx->elem);
+ kfree(uctx);
+ }
+}
+
+static scsi_qla_host_t *
+qla2xxx_get_vha_from_vp_idx(struct qla_hw_data *ha, uint16_t vp_index)
+{
+ scsi_qla_host_t *base_vha, *vha, *tvp;
+ unsigned long flags;
+
+ base_vha = pci_get_drvdata(ha->pdev);
+
+ if (!vp_index && !ha->num_vhosts)
+ return base_vha;
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ list_for_each_entry_safe(vha, tvp, &ha->vp_list, list) {
+ if (vha->vp_idx == vp_index) {
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+ return vha;
+ }
+ }
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
+ return NULL;
+}
+
+void qla2xxx_process_purls_iocb(void **pkt, struct rsp_que **rsp)
+{
+ struct nvme_fc_remote_port *rport;
+ struct qla_nvme_rport *qla_rport;
+ struct qla_nvme_lsrjt_pt_arg a;
+ struct pt_ls4_rx_unsol *p = *pkt;
+ struct qla_nvme_unsol_ctx *uctx;
+ struct rsp_que *rsp_q = *rsp;
+ struct qla_hw_data *ha;
+ scsi_qla_host_t *vha;
+ fc_port_t *fcport = NULL;
+ struct purex_item *item;
+ port_id_t d_id = {0};
+ port_id_t id = {0};
+ u8 *opcode;
+ bool xmt_reject = false;
+
+ ha = rsp_q->hw;
+
+ vha = qla2xxx_get_vha_from_vp_idx(ha, p->vp_index);
+ if (!vha) {
+ ql_log(ql_log_warn, NULL, 0x2110, "Invalid vp index %d\n", p->vp_index);
+ WARN_ON_ONCE(1);
+ return;
+ }
+
+ memset((void *)&a, 0, sizeof(a));
+ opcode = (u8 *)&p->payload[0];
+ a.opcode = opcode[3];
+ a.vp_idx = p->vp_index;
+ a.nport_handle = p->nport_handle;
+ a.ox_id = p->ox_id;
+ a.xchg_address = p->exchange_address;
+
+ id.b.domain = p->s_id.domain;
+ id.b.area = p->s_id.area;
+ id.b.al_pa = p->s_id.al_pa;
+ d_id.b.domain = p->d_id[2];
+ d_id.b.area = p->d_id[1];
+ d_id.b.al_pa = p->d_id[0];
+
+ fcport = qla2x00_find_fcport_by_nportid(vha, &id, 0);
+ if (!fcport) {
+ ql_dbg(ql_dbg_unsol, vha, 0x211e,
+ "Failed to find sid=%06x did=%06x\n",
+ id.b24, d_id.b24);
+ a.reason = FCNVME_RJT_RC_INV_ASSOC;
+ a.explanation = FCNVME_RJT_EXP_NONE;
+ xmt_reject = true;
+ goto out;
+ }
+ rport = fcport->nvme_remote_port;
+ qla_rport = rport->private;
+
+ item = qla27xx_copy_multiple_pkt(vha, pkt, rsp, true, false);
+ if (!item) {
+ a.reason = FCNVME_RJT_RC_LOGIC;
+ a.explanation = FCNVME_RJT_EXP_NONE;
+ xmt_reject = true;
+ goto out;
+ }
+
+ uctx = kzalloc(sizeof(*uctx), GFP_ATOMIC);
+ if (!uctx) {
+ ql_log(ql_log_info, vha, 0x2126, "Failed allocate memory\n");
+ a.reason = FCNVME_RJT_RC_LOGIC;
+ a.explanation = FCNVME_RJT_EXP_NONE;
+ xmt_reject = true;
+ kfree(item);
+ goto out;
+ }
+
+ uctx->vha = vha;
+ uctx->fcport = fcport;
+ uctx->exchange_address = p->exchange_address;
+ uctx->nport_handle = p->nport_handle;
+ uctx->ox_id = p->ox_id;
+ qla_rport->uctx = uctx;
+ INIT_LIST_HEAD(&uctx->elem);
+ list_add_tail(&uctx->elem, &fcport->unsol_ctx_head);
+ item->purls_context = (void *)uctx;
+
+ ql_dbg(ql_dbg_unsol, vha, 0x2121,
+ "PURLS OP[%01x] size %d xchg addr 0x%x portid %06x\n",
+ item->iocb.iocb[3], item->size, uctx->exchange_address,
+ fcport->d_id.b24);
+ /* +48 0 1 2 3 4 5 6 7 8 9 A B C D E F
+ * ----- -----------------------------------------------
+ * 0000: 00 00 00 05 28 00 00 00 07 00 00 00 08 00 00 00
+ * 0010: ab ec 0f cc 00 00 8d 7d 05 00 00 00 10 00 00 00
+ * 0020: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ */
+ ql_dump_buffer(ql_dbg_unsol + ql_dbg_verbose, vha, 0x2120,
+ &item->iocb, item->size);
+
+ qla24xx_queue_purex_item(vha, item, qla2xxx_process_purls_pkt);
+out:
+ if (xmt_reject) {
+ qla_nvme_ls_reject_iocb(vha, (*rsp)->qpair, &a, false);
+ __qla_consume_iocb(vha, pkt, rsp);
+ }
+}
diff --git a/drivers/scsi/qla2xxx/qla_nvme.h b/drivers/scsi/qla2xxx/qla_nvme.h
index d299478371b2..a253ac55171b 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.h
+++ b/drivers/scsi/qla2xxx/qla_nvme.h
@@ -21,6 +21,7 @@
#define Q2T_NVME_NUM_TAGS 2048
#define QLA_MAX_FC_SEGMENTS 64
+struct qla_nvme_unsol_ctx;
struct scsi_qla_host;
struct qla_hw_data;
struct req_que;
@@ -37,6 +38,7 @@ struct nvme_private {
struct qla_nvme_rport {
struct fc_port *fcport;
+ struct qla_nvme_unsol_ctx *uctx;
};
#define COMMAND_NVME 0x88 /* Command Type FC-NVMe IOCB */
@@ -75,6 +77,9 @@ struct cmd_nvme {
struct dsd64 nvme_dsd;
};
+#define PURLS_MSLEEP_INTERVAL 1
+#define PURLS_RETRY_COUNT 5
+
#define PT_LS4_REQUEST 0x89 /* Link Service pass-through IOCB (request) */
struct pt_ls4_request {
uint8_t entry_type;
@@ -118,21 +123,19 @@ struct pt_ls4_rx_unsol {
__le32 exchange_address;
uint8_t d_id[3];
uint8_t r_ctl;
- be_id_t s_id;
+ le_id_t s_id;
uint8_t cs_ctl;
uint8_t f_ctl[3];
uint8_t type;
__le16 seq_cnt;
uint8_t df_ctl;
uint8_t seq_id;
- __le16 rx_id;
- __le16 ox_id;
- __le32 param;
- __le32 desc0;
+ __le16 rx_id;
+ __le16 ox_id;
+ __le32 desc0;
#define PT_LS4_PAYLOAD_OFFSET 0x2c
#define PT_LS4_FIRST_PACKET_LEN 20
- __le32 desc_len;
- __le32 payload[3];
+ __le32 payload[5];
};
/*
diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h
index 6dc80c8ddf79..5d1bdc15b75c 100644
--- a/drivers/scsi/qla2xxx/qla_nx.h
+++ b/drivers/scsi/qla2xxx/qla_nx.h
@@ -857,7 +857,9 @@ struct fcp_cmnd {
uint8_t task_attribute;
uint8_t task_management;
uint8_t additional_cdb_len;
- uint8_t cdb[260]; /* 256 for CDB len and 4 for FCP_DL */
+#define QLA_CDB_BUF_SIZE 256
+#define QLA_FCP_DL_SIZE 4
+ uint8_t cdb[QLA_CDB_BUF_SIZE + QLA_FCP_DL_SIZE]; /* 256 for CDB len and 4 for FCP_DL */
};
struct dsd_dma {
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 03bc3a0b45b6..50db08265c51 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -44,10 +44,11 @@ module_param(ql2xfulldump_on_mpifail, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(ql2xfulldump_on_mpifail,
"Set this to take full dump on MPI hang.");
-int ql2xenforce_iocb_limit = 1;
+int ql2xenforce_iocb_limit = 2;
module_param(ql2xenforce_iocb_limit, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(ql2xenforce_iocb_limit,
- "Enforce IOCB throttling, to avoid FW congestion. (default: 1)");
+ "Enforce IOCB throttling, to avoid FW congestion. (default: 2) "
+ "1: track usage per queue, 2: track usage per adapter");
/*
* CT6 CTX allocation cache
@@ -432,6 +433,7 @@ static void qla_init_base_qpair(struct scsi_qla_host *vha, struct req_que *req,
ha->base_qpair->msix = &ha->msix_entries[QLA_MSIX_RSP_Q];
ha->base_qpair->srb_mempool = ha->srb_mempool;
INIT_LIST_HEAD(&ha->base_qpair->hints_list);
+ INIT_LIST_HEAD(&ha->base_qpair->dsd_list);
ha->base_qpair->enable_class_2 = ql2xenableclass2;
/* init qpair to this cpu. Will adjust at run time. */
qla_cpu_update(rsp->qpair, raw_smp_processor_id());
@@ -750,9 +752,9 @@ void qla2x00_sp_free_dma(srb_t *sp)
dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd,
ctx1->fcp_cmnd_dma);
- list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list);
- ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt;
- ha->gbl_dsd_avail += ctx1->dsd_use_cnt;
+ list_splice(&ctx1->dsd_list, &sp->qpair->dsd_list);
+ sp->qpair->dsd_inuse -= ctx1->dsd_use_cnt;
+ sp->qpair->dsd_avail += ctx1->dsd_use_cnt;
}
if (sp->flags & SRB_GOT_BUF)
@@ -836,9 +838,9 @@ void qla2xxx_qpair_sp_free_dma(srb_t *sp)
dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd,
ctx1->fcp_cmnd_dma);
- list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list);
- ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt;
- ha->gbl_dsd_avail += ctx1->dsd_use_cnt;
+ list_splice(&ctx1->dsd_list, &sp->qpair->dsd_list);
+ sp->qpair->dsd_inuse -= ctx1->dsd_use_cnt;
+ sp->qpair->dsd_avail += ctx1->dsd_use_cnt;
sp->flags &= ~SRB_FCP_CMND_DMA_VALID;
}
@@ -3007,7 +3009,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->max_exchg = FW_MAX_EXCHANGES_CNT;
atomic_set(&ha->num_pend_mbx_stage1, 0);
atomic_set(&ha->num_pend_mbx_stage2, 0);
- atomic_set(&ha->num_pend_mbx_stage3, 0);
atomic_set(&ha->zio_threshold, DEFAULT_ZIO_THRESHOLD);
ha->last_zio_threshold = DEFAULT_ZIO_THRESHOLD;
INIT_LIST_HEAD(&ha->tmf_pending);
@@ -3288,6 +3289,13 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
host->max_id = ha->max_fibre_devices;
host->cmd_per_lun = 3;
host->unique_id = host->host_no;
+
+ if (ql2xenabledif && ql2xenabledif != 2) {
+ ql_log(ql_log_warn, base_vha, 0x302d,
+ "Invalid value for ql2xenabledif, resetting it to default (2)\n");
+ ql2xenabledif = 2;
+ }
+
if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif)
host->max_cmd_len = 32;
else
@@ -3524,8 +3532,6 @@ skip_dpc:
base_vha->flags.difdix_supported = 1;
ql_dbg(ql_dbg_init, base_vha, 0x00f1,
"Registering for DIF/DIX type 1 and 3 protection.\n");
- if (ql2xenabledif == 1)
- prot = SHOST_DIX_TYPE0_PROTECTION;
if (ql2xprotmask)
scsi_host_set_prot(host, ql2xprotmask);
else
@@ -4402,7 +4408,6 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
"sf_init_cb=%p.\n", ha->sf_init_cb);
}
- INIT_LIST_HEAD(&ha->gbl_dsd_list);
/* Get consistent memory allocated for Async Port-Database. */
if (!IS_FWI2_CAPABLE(ha)) {
@@ -4457,8 +4462,9 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
ha->elsrej.size = sizeof(struct fc_els_ls_rjt) + 16;
ha->elsrej.c = dma_alloc_coherent(&ha->pdev->dev,
- ha->elsrej.size, &ha->elsrej.cdma, GFP_KERNEL);
-
+ ha->elsrej.size,
+ &ha->elsrej.cdma,
+ GFP_KERNEL);
if (!ha->elsrej.c) {
ql_dbg_pci(ql_dbg_init, ha->pdev, 0xffff,
"Alloc failed for els reject cmd.\n");
@@ -4467,8 +4473,21 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
ha->elsrej.c->er_cmd = ELS_LS_RJT;
ha->elsrej.c->er_reason = ELS_RJT_LOGIC;
ha->elsrej.c->er_explan = ELS_EXPL_UNAB_DATA;
+
+ ha->lsrjt.size = sizeof(struct fcnvme_ls_rjt);
+ ha->lsrjt.c = dma_alloc_coherent(&ha->pdev->dev, ha->lsrjt.size,
+ &ha->lsrjt.cdma, GFP_KERNEL);
+ if (!ha->lsrjt.c) {
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0xffff,
+ "Alloc failed for nvme fc reject cmd.\n");
+ goto fail_lsrjt;
+ }
+
return 0;
+fail_lsrjt:
+ dma_free_coherent(&ha->pdev->dev, ha->elsrej.size,
+ ha->elsrej.c, ha->elsrej.cdma);
fail_elsrej:
dma_pool_destroy(ha->purex_dma_pool);
fail_flt:
@@ -4934,18 +4953,16 @@ qla2x00_mem_free(struct qla_hw_data *ha)
ha->gid_list = NULL;
ha->gid_list_dma = 0;
- if (IS_QLA82XX(ha)) {
- if (!list_empty(&ha->gbl_dsd_list)) {
- struct dsd_dma *dsd_ptr, *tdsd_ptr;
-
- /* clean up allocated prev pool */
- list_for_each_entry_safe(dsd_ptr,
- tdsd_ptr, &ha->gbl_dsd_list, list) {
- dma_pool_free(ha->dl_dma_pool,
- dsd_ptr->dsd_addr, dsd_ptr->dsd_list_dma);
- list_del(&dsd_ptr->list);
- kfree(dsd_ptr);
- }
+ if (!list_empty(&ha->base_qpair->dsd_list)) {
+ struct dsd_dma *dsd_ptr, *tdsd_ptr;
+
+ /* clean up allocated prev pool */
+ list_for_each_entry_safe(dsd_ptr, tdsd_ptr,
+ &ha->base_qpair->dsd_list, list) {
+ dma_pool_free(ha->dl_dma_pool, dsd_ptr->dsd_addr,
+ dsd_ptr->dsd_list_dma);
+ list_del(&dsd_ptr->list);
+ kfree(dsd_ptr);
}
}
@@ -5000,6 +5017,12 @@ qla2x00_mem_free(struct qla_hw_data *ha)
ha->elsrej.c = NULL;
}
+ if (ha->lsrjt.c) {
+ dma_free_coherent(&ha->pdev->dev, ha->lsrjt.size, ha->lsrjt.c,
+ ha->lsrjt.cdma);
+ ha->lsrjt.c = NULL;
+ }
+
ha->init_cb = NULL;
ha->init_cb_dma = 0;
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 81bdf6b03241..d903563e969e 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -6,9 +6,9 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "10.02.08.500-k"
+#define QLA2XXX_VERSION "10.02.09.100-k"
#define QLA_DRIVER_MAJOR_VER 10
#define QLA_DRIVER_MINOR_VER 2
-#define QLA_DRIVER_PATCH_VER 8
-#define QLA_DRIVER_BETA_VER 500
+#define QLA_DRIVER_PATCH_VER 9
+#define QLA_DRIVER_BETA_VER 100
diff --git a/drivers/scsi/scsi_debugfs.c b/drivers/scsi/scsi_debugfs.c
index 217b70c678c3..f795848b316c 100644
--- a/drivers/scsi/scsi_debugfs.c
+++ b/drivers/scsi/scsi_debugfs.c
@@ -3,6 +3,7 @@
#include <linux/seq_file.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_dbg.h>
+#include <scsi/scsi_host.h>
#include "scsi_debugfs.h"
#define SCSI_CMD_FLAG_NAME(name)[const_ilog2(SCMD_##name)] = #name
@@ -33,14 +34,33 @@ static int scsi_flags_show(struct seq_file *m, const unsigned long flags,
void scsi_show_rq(struct seq_file *m, struct request *rq)
{
- struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
+ struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq), *cmd2;
+ struct Scsi_Host *shost = cmd->device->host;
int alloc_ms = jiffies_to_msecs(jiffies - cmd->jiffies_at_alloc);
int timeout_ms = jiffies_to_msecs(rq->timeout);
+ const char *list_info = NULL;
char buf[80] = "(?)";
+ spin_lock_irq(shost->host_lock);
+ list_for_each_entry(cmd2, &shost->eh_abort_list, eh_entry) {
+ if (cmd == cmd2) {
+ list_info = "on eh_abort_list";
+ goto unlock;
+ }
+ }
+ list_for_each_entry(cmd2, &shost->eh_cmd_q, eh_entry) {
+ if (cmd == cmd2) {
+ list_info = "on eh_cmd_q";
+ goto unlock;
+ }
+ }
+unlock:
+ spin_unlock_irq(shost->host_lock);
+
__scsi_format_command(buf, sizeof(buf), cmd->cmnd, cmd->cmd_len);
- seq_printf(m, ", .cmd=%s, .retries=%d, .result = %#x, .flags=", buf,
- cmd->retries, cmd->result);
+ seq_printf(m, ", .cmd=%s, .retries=%d, .allowed=%d, .result = %#x, %s%s.flags=",
+ buf, cmd->retries, cmd->allowed, cmd->result,
+ list_info ? : "", list_info ? ", " : "");
scsi_flags_show(m, cmd->flags, scsi_cmd_flags,
ARRAY_SIZE(scsi_cmd_flags));
seq_printf(m, ", .timeout=%d.%03d, allocated %d.%03d s ago",
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 59176946ab56..c2f647a7c1b0 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -2454,7 +2454,7 @@ static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt)
envp[idx++] = "SDEV_MEDIA_CHANGE=1";
break;
case SDEV_EVT_INQUIRY_CHANGE_REPORTED:
- scsi_rescan_device(&sdev->sdev_gendev);
+ scsi_rescan_device(sdev);
envp[idx++] = "SDEV_UA=INQUIRY_DATA_HAS_CHANGED";
break;
case SDEV_EVT_CAPACITY_CHANGE_REPORTED:
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index 118855576ca8..3f0dfb97db6b 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -137,7 +137,6 @@ extern int scsi_complete_async_scans(void);
extern int scsi_scan_host_selected(struct Scsi_Host *, unsigned int,
unsigned int, u64, enum scsi_scan_mode);
extern void scsi_forget_host(struct Scsi_Host *);
-extern void scsi_rescan_device(struct device *);
/* scsi_sysctl.c */
#ifdef CONFIG_SYSCTL
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index aa13feb17c62..52014b2d39e1 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -1619,9 +1619,9 @@ int scsi_add_device(struct Scsi_Host *host, uint channel,
}
EXPORT_SYMBOL(scsi_add_device);
-void scsi_rescan_device(struct device *dev)
+void scsi_rescan_device(struct scsi_device *sdev)
{
- struct scsi_device *sdev = to_scsi_device(dev);
+ struct device *dev = &sdev->sdev_gendev;
device_lock(dev);
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 60317676e45f..24f6eefb6803 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -747,7 +747,7 @@ static ssize_t
store_rescan_field (struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- scsi_rescan_device(dev);
+ scsi_rescan_device(to_scsi_device(dev));
return count;
}
static DEVICE_ATTR(rescan, S_IWUSR, NULL, store_rescan_field);
@@ -840,7 +840,7 @@ store_state_field(struct device *dev, struct device_attribute *attr,
* waiting for pending I/O to finish.
*/
blk_mq_run_hw_queues(sdev->request_queue, true);
- scsi_rescan_device(dev);
+ scsi_rescan_device(sdev);
}
return ret == 0 ? count : -EINVAL;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 3c668cfb146d..c92a317ba547 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -104,19 +104,7 @@ static void sd_config_discard(struct scsi_disk *, unsigned int);
static void sd_config_write_same(struct scsi_disk *);
static int sd_revalidate_disk(struct gendisk *);
static void sd_unlock_native_capacity(struct gendisk *disk);
-static int sd_probe(struct device *);
-static int sd_remove(struct device *);
static void sd_shutdown(struct device *);
-static int sd_suspend_system(struct device *);
-static int sd_suspend_runtime(struct device *);
-static int sd_resume_system(struct device *);
-static int sd_resume_runtime(struct device *);
-static void sd_rescan(struct device *);
-static blk_status_t sd_init_command(struct scsi_cmnd *SCpnt);
-static void sd_uninit_command(struct scsi_cmnd *SCpnt);
-static int sd_done(struct scsi_cmnd *);
-static void sd_eh_reset(struct scsi_cmnd *);
-static int sd_eh_action(struct scsi_cmnd *, int);
static void sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer);
static void scsi_disk_release(struct device *cdev);
@@ -592,33 +580,6 @@ static struct class sd_disk_class = {
.dev_groups = sd_disk_groups,
};
-static const struct dev_pm_ops sd_pm_ops = {
- .suspend = sd_suspend_system,
- .resume = sd_resume_system,
- .poweroff = sd_suspend_system,
- .restore = sd_resume_system,
- .runtime_suspend = sd_suspend_runtime,
- .runtime_resume = sd_resume_runtime,
-};
-
-static struct scsi_driver sd_template = {
- .gendrv = {
- .name = "sd",
- .owner = THIS_MODULE,
- .probe = sd_probe,
- .probe_type = PROBE_PREFER_ASYNCHRONOUS,
- .remove = sd_remove,
- .shutdown = sd_shutdown,
- .pm = &sd_pm_ops,
- },
- .rescan = sd_rescan,
- .init_command = sd_init_command,
- .uninit_command = sd_uninit_command,
- .done = sd_done,
- .eh_action = sd_eh_action,
- .eh_reset = sd_eh_reset,
-};
-
/*
* Don't request a new module, as that could deadlock in multipath
* environment.
@@ -3929,6 +3890,33 @@ static int sd_resume_runtime(struct device *dev)
return sd_resume(dev);
}
+static const struct dev_pm_ops sd_pm_ops = {
+ .suspend = sd_suspend_system,
+ .resume = sd_resume_system,
+ .poweroff = sd_suspend_system,
+ .restore = sd_resume_system,
+ .runtime_suspend = sd_suspend_runtime,
+ .runtime_resume = sd_resume_runtime,
+};
+
+static struct scsi_driver sd_template = {
+ .gendrv = {
+ .name = "sd",
+ .owner = THIS_MODULE,
+ .probe = sd_probe,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ .remove = sd_remove,
+ .shutdown = sd_shutdown,
+ .pm = &sd_pm_ops,
+ },
+ .rescan = sd_rescan,
+ .init_command = sd_init_command,
+ .uninit_command = sd_uninit_command,
+ .done = sd_done,
+ .eh_action = sd_eh_action,
+ .eh_reset = sd_eh_reset,
+};
+
/**
* init_sd - entry point for this driver (both when built in or when
* a module).
diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h
index e392eaf5b2bf..041940183516 100644
--- a/drivers/scsi/smartpqi/smartpqi.h
+++ b/drivers/scsi/smartpqi/smartpqi.h
@@ -710,7 +710,7 @@ typedef u32 pqi_index_t;
#define SOP_TMF_COMPLETE 0x0
#define SOP_TMF_REJECTED 0x4
#define SOP_TMF_FUNCTION_SUCCEEDED 0x8
-#define SOP_RC_INCORRECT_LOGICAL_UNIT 0x9
+#define SOP_TMF_INCORRECT_LOGICAL_UNIT 0x9
/* additional CDB bytes usage field codes */
#define SOP_ADDITIONAL_CDB_BYTES_0 0 /* 16-byte CDB */
@@ -1085,7 +1085,16 @@ struct pqi_stream_data {
u32 last_accessed;
};
-#define PQI_MAX_LUNS_PER_DEVICE 256
+#define PQI_MAX_LUNS_PER_DEVICE 256
+
+struct pqi_tmf_work {
+ struct work_struct work_struct;
+ struct scsi_cmnd *scmd;
+ struct pqi_ctrl_info *ctrl_info;
+ struct pqi_scsi_dev *device;
+ u8 lun;
+ u8 scsi_opcode;
+};
struct pqi_scsi_dev {
int devtype; /* as reported by INQUIRY command */
@@ -1111,6 +1120,7 @@ struct pqi_scsi_dev {
u8 erase_in_progress : 1;
bool aio_enabled; /* only valid for physical disks */
bool in_remove;
+ bool in_reset[PQI_MAX_LUNS_PER_DEVICE];
bool device_offline;
u8 vendor[8]; /* bytes 8-15 of inquiry data */
u8 model[16]; /* bytes 16-31 of inquiry data */
@@ -1149,6 +1159,8 @@ struct pqi_scsi_dev {
struct pqi_stream_data stream_data[NUM_STREAMS_PER_LUN];
atomic_t scsi_cmds_outstanding[PQI_MAX_LUNS_PER_DEVICE];
unsigned int raid_bypass_cnt;
+
+ struct pqi_tmf_work tmf_work[PQI_MAX_LUNS_PER_DEVICE];
};
/* VPD inquiry pages */
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index 6aaaa7ebca37..9a58df9312fa 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -33,11 +33,11 @@
#define BUILD_TIMESTAMP
#endif
-#define DRIVER_VERSION "2.1.22-040"
+#define DRIVER_VERSION "2.1.24-046"
#define DRIVER_MAJOR 2
#define DRIVER_MINOR 1
-#define DRIVER_RELEASE 22
-#define DRIVER_REVISION 40
+#define DRIVER_RELEASE 24
+#define DRIVER_REVISION 46
#define DRIVER_NAME "Microchip SmartPQI Driver (v" \
DRIVER_VERSION BUILD_TIMESTAMP ")"
@@ -48,6 +48,8 @@
#define PQI_POST_RESET_DELAY_SECS 5
#define PQI_POST_OFA_RESET_DELAY_UPON_TIMEOUT_SECS 10
+#define PQI_NO_COMPLETION ((void *)-1)
+
MODULE_AUTHOR("Microchip");
MODULE_DESCRIPTION("Driver for Microchip Smart Family Controller version "
DRIVER_VERSION);
@@ -96,6 +98,7 @@ static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info);
static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *device, u8 lun, unsigned long timeout_msecs);
static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info);
+static void pqi_tmf_worker(struct work_struct *work);
/* for flags argument to pqi_submit_raid_request_synchronous() */
#define PQI_SYNC_FLAGS_INTERRUPTABLE 0x1
@@ -455,6 +458,21 @@ static inline bool pqi_device_in_remove(struct pqi_scsi_dev *device)
return device->in_remove;
}
+static inline void pqi_device_reset_start(struct pqi_scsi_dev *device, u8 lun)
+{
+ device->in_reset[lun] = true;
+}
+
+static inline void pqi_device_reset_done(struct pqi_scsi_dev *device, u8 lun)
+{
+ device->in_reset[lun] = false;
+}
+
+static inline bool pqi_device_in_reset(struct pqi_scsi_dev *device, u8 lun)
+{
+ return device->in_reset[lun];
+}
+
static inline int pqi_event_type_to_event_index(unsigned int event_type)
{
int index;
@@ -2137,6 +2155,15 @@ static inline bool pqi_is_device_added(struct pqi_scsi_dev *device)
return device->sdev != NULL;
}
+static inline void pqi_init_device_tmf_work(struct pqi_scsi_dev *device)
+{
+ unsigned int lun;
+ struct pqi_tmf_work *tmf_work;
+
+ for (lun = 0, tmf_work = device->tmf_work; lun < PQI_MAX_LUNS_PER_DEVICE; lun++, tmf_work++)
+ INIT_WORK(&tmf_work->work_struct, pqi_tmf_worker);
+}
+
static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices)
{
@@ -2217,6 +2244,7 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
list_add_tail(&device->add_list_entry, &add_list);
/* To prevent this device structure from being freed later. */
device->keep_device = true;
+ pqi_init_device_tmf_work(device);
}
spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
@@ -2257,7 +2285,7 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
device->advertised_queue_depth = device->queue_depth;
scsi_change_queue_depth(device->sdev, device->advertised_queue_depth);
if (device->rescan) {
- scsi_rescan_device(&device->sdev->sdev_gendev);
+ scsi_rescan_device(device->sdev);
device->rescan = false;
}
}
@@ -3330,7 +3358,7 @@ static int pqi_interpret_task_management_response(struct pqi_ctrl_info *ctrl_inf
case SOP_TMF_REJECTED:
rc = -EAGAIN;
break;
- case SOP_RC_INCORRECT_LOGICAL_UNIT:
+ case SOP_TMF_INCORRECT_LOGICAL_UNIT:
rc = -ENODEV;
break;
default:
@@ -5628,7 +5656,6 @@ static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
int rc;
struct pqi_io_request *io_request;
struct pqi_aio_path_request *request;
- struct pqi_scsi_dev *device;
io_request = pqi_alloc_io_request(ctrl_info, scmd);
if (!io_request)
@@ -5648,9 +5675,8 @@ static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
request->command_priority = io_high_prio;
put_unaligned_le16(io_request->index, &request->request_id);
request->error_index = request->request_id;
- device = scmd->device->hostdata;
- if (!pqi_is_logical_device(device) && ctrl_info->multi_lun_device_supported)
- put_unaligned_le64(((scmd->device->lun) << 8), &request->lun_number);
+ if (!raid_bypass && ctrl_info->multi_lun_device_supported)
+ put_unaligned_le64(scmd->device->lun << 8, &request->lun_number);
if (cdb_length > sizeof(request->cdb))
cdb_length = sizeof(request->cdb);
request->cdb_length = cdb_length;
@@ -5850,6 +5876,7 @@ static inline bool pqi_is_bypass_eligible_request(struct scsi_cmnd *scmd)
void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd)
{
struct pqi_scsi_dev *device;
+ struct completion *wait;
if (!scmd->device) {
set_host_byte(scmd, DID_NO_CONNECT);
@@ -5863,6 +5890,10 @@ void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd)
}
atomic_dec(&device->scsi_cmds_outstanding[scmd->device->lun]);
+
+ wait = (struct completion *)xchg(&scmd->host_scribble, NULL);
+ if (wait != PQI_NO_COMPLETION)
+ complete(wait);
}
static bool pqi_is_parity_write_stream(struct pqi_ctrl_info *ctrl_info,
@@ -5948,6 +5979,9 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scm
u16 hw_queue;
struct pqi_queue_group *queue_group;
bool raid_bypassed;
+ u8 lun;
+
+ scmd->host_scribble = PQI_NO_COMPLETION;
device = scmd->device->hostdata;
@@ -5957,7 +5991,9 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scm
return 0;
}
- atomic_inc(&device->scsi_cmds_outstanding[scmd->device->lun]);
+ lun = (u8)scmd->device->lun;
+
+ atomic_inc(&device->scsi_cmds_outstanding[lun]);
ctrl_info = shost_to_hba(shost);
@@ -5967,7 +6003,7 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scm
return 0;
}
- if (pqi_ctrl_blocked(ctrl_info)) {
+ if (pqi_ctrl_blocked(ctrl_info) || pqi_device_in_reset(device, lun)) {
rc = SCSI_MLQUEUE_HOST_BUSY;
goto out;
}
@@ -6002,8 +6038,10 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scm
}
out:
- if (rc)
- atomic_dec(&device->scsi_cmds_outstanding[scmd->device->lun]);
+ if (rc) {
+ scmd->host_scribble = NULL;
+ atomic_dec(&device->scsi_cmds_outstanding[lun]);
+ }
return rc;
}
@@ -6097,7 +6135,7 @@ static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info)
}
static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info,
- struct pqi_scsi_dev *device)
+ struct pqi_scsi_dev *device, u8 lun)
{
unsigned int i;
unsigned int path;
@@ -6127,6 +6165,9 @@ static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info,
if (scsi_device != device)
continue;
+ if ((u8)scmd->device->lun != lun)
+ continue;
+
list_del(&io_request->request_list_entry);
set_host_byte(scmd, DID_RESET);
pqi_free_io_request(io_request);
@@ -6224,15 +6265,13 @@ static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
#define PQI_LUN_RESET_FIRMWARE_TIMEOUT_SECS 30
-static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd)
+static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun)
{
int rc;
struct pqi_io_request *io_request;
DECLARE_COMPLETION_ONSTACK(wait);
struct pqi_task_management_request *request;
- struct pqi_scsi_dev *device;
- device = scmd->device->hostdata;
io_request = pqi_alloc_io_request(ctrl_info, NULL);
io_request->io_complete_callback = pqi_lun_reset_complete;
io_request->context = &wait;
@@ -6247,7 +6286,7 @@ static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd
memcpy(request->lun_number, device->scsi3addr,
sizeof(request->lun_number));
if (!pqi_is_logical_device(device) && ctrl_info->multi_lun_device_supported)
- request->ml_device_lun_number = (u8)scmd->device->lun;
+ request->ml_device_lun_number = lun;
request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET;
if (ctrl_info->tmf_iu_timeout_supported)
put_unaligned_le16(PQI_LUN_RESET_FIRMWARE_TIMEOUT_SECS, &request->timeout);
@@ -6255,7 +6294,7 @@ static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd
pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
io_request);
- rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, (u8)scmd->device->lun, &wait);
+ rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, lun, &wait);
if (rc == 0)
rc = io_request->status;
@@ -6269,18 +6308,16 @@ static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd
#define PQI_LUN_RESET_PENDING_IO_TIMEOUT_MSECS (10 * 60 * 1000)
#define PQI_LUN_RESET_FAILED_PENDING_IO_TIMEOUT_MSECS (2 * 60 * 1000)
-static int pqi_lun_reset_with_retries(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd)
+static int pqi_lun_reset_with_retries(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun)
{
int reset_rc;
int wait_rc;
unsigned int retries;
unsigned long timeout_msecs;
- struct pqi_scsi_dev *device;
- device = scmd->device->hostdata;
for (retries = 0;;) {
- reset_rc = pqi_lun_reset(ctrl_info, scmd);
- if (reset_rc == 0 || reset_rc == -ENODEV || ++retries > PQI_LUN_RESET_RETRIES)
+ reset_rc = pqi_lun_reset(ctrl_info, device, lun);
+ if (reset_rc == 0 || reset_rc == -ENODEV || reset_rc == -ENXIO || ++retries > PQI_LUN_RESET_RETRIES)
break;
msleep(PQI_LUN_RESET_RETRY_INTERVAL_MSECS);
}
@@ -6288,60 +6325,51 @@ static int pqi_lun_reset_with_retries(struct pqi_ctrl_info *ctrl_info, struct sc
timeout_msecs = reset_rc ? PQI_LUN_RESET_FAILED_PENDING_IO_TIMEOUT_MSECS :
PQI_LUN_RESET_PENDING_IO_TIMEOUT_MSECS;
- wait_rc = pqi_device_wait_for_pending_io(ctrl_info, device, scmd->device->lun, timeout_msecs);
+ wait_rc = pqi_device_wait_for_pending_io(ctrl_info, device, lun, timeout_msecs);
if (wait_rc && reset_rc == 0)
reset_rc = wait_rc;
return reset_rc == 0 ? SUCCESS : FAILED;
}
-static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd)
+static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun)
{
int rc;
- struct pqi_scsi_dev *device;
- device = scmd->device->hostdata;
pqi_ctrl_block_requests(ctrl_info);
pqi_ctrl_wait_until_quiesced(ctrl_info);
- pqi_fail_io_queued_for_device(ctrl_info, device);
+ pqi_fail_io_queued_for_device(ctrl_info, device, lun);
rc = pqi_wait_until_inbound_queues_empty(ctrl_info);
+ pqi_device_reset_start(device, lun);
+ pqi_ctrl_unblock_requests(ctrl_info);
if (rc)
rc = FAILED;
else
- rc = pqi_lun_reset_with_retries(ctrl_info, scmd);
- pqi_ctrl_unblock_requests(ctrl_info);
+ rc = pqi_lun_reset_with_retries(ctrl_info, device, lun);
+ pqi_device_reset_done(device, lun);
return rc;
}
-static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
+static int pqi_device_reset_handler(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun, struct scsi_cmnd *scmd, u8 scsi_opcode)
{
int rc;
- struct Scsi_Host *shost;
- struct pqi_ctrl_info *ctrl_info;
- struct pqi_scsi_dev *device;
-
- shost = scmd->device->host;
- ctrl_info = shost_to_hba(shost);
- device = scmd->device->hostdata;
mutex_lock(&ctrl_info->lun_reset_mutex);
dev_err(&ctrl_info->pci_dev->dev,
- "resetting scsi %d:%d:%d:%d due to cmd 0x%02x\n",
- shost->host_no,
- device->bus, device->target, (u32)scmd->device->lun,
- scmd->cmd_len > 0 ? scmd->cmnd[0] : 0xff);
+ "resetting scsi %d:%d:%d:%u SCSI cmd at %p due to cmd opcode 0x%02x\n",
+ ctrl_info->scsi_host->host_no, device->bus, device->target, lun, scmd, scsi_opcode);
pqi_check_ctrl_health(ctrl_info);
if (pqi_ctrl_offline(ctrl_info))
rc = FAILED;
else
- rc = pqi_device_reset(ctrl_info, scmd);
+ rc = pqi_device_reset(ctrl_info, device, lun);
dev_err(&ctrl_info->pci_dev->dev,
- "reset of scsi %d:%d:%d:%d: %s\n",
- shost->host_no, device->bus, device->target, (u32)scmd->device->lun,
+ "reset of scsi %d:%d:%d:%u: %s\n",
+ ctrl_info->scsi_host->host_no, device->bus, device->target, lun,
rc == SUCCESS ? "SUCCESS" : "FAILED");
mutex_unlock(&ctrl_info->lun_reset_mutex);
@@ -6349,6 +6377,77 @@ static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
return rc;
}
+static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
+{
+ struct Scsi_Host *shost;
+ struct pqi_ctrl_info *ctrl_info;
+ struct pqi_scsi_dev *device;
+ u8 scsi_opcode;
+
+ shost = scmd->device->host;
+ ctrl_info = shost_to_hba(shost);
+ device = scmd->device->hostdata;
+ scsi_opcode = scmd->cmd_len > 0 ? scmd->cmnd[0] : 0xff;
+
+ return pqi_device_reset_handler(ctrl_info, device, (u8)scmd->device->lun, scmd, scsi_opcode);
+}
+
+static void pqi_tmf_worker(struct work_struct *work)
+{
+ struct pqi_tmf_work *tmf_work;
+ struct scsi_cmnd *scmd;
+
+ tmf_work = container_of(work, struct pqi_tmf_work, work_struct);
+ scmd = (struct scsi_cmnd *)xchg(&tmf_work->scmd, NULL);
+
+ pqi_device_reset_handler(tmf_work->ctrl_info, tmf_work->device, tmf_work->lun, scmd, tmf_work->scsi_opcode);
+}
+
+static int pqi_eh_abort_handler(struct scsi_cmnd *scmd)
+{
+ struct Scsi_Host *shost;
+ struct pqi_ctrl_info *ctrl_info;
+ struct pqi_scsi_dev *device;
+ struct pqi_tmf_work *tmf_work;
+ DECLARE_COMPLETION_ONSTACK(wait);
+
+ shost = scmd->device->host;
+ ctrl_info = shost_to_hba(shost);
+ device = scmd->device->hostdata;
+
+ dev_err(&ctrl_info->pci_dev->dev,
+ "attempting TASK ABORT on scsi %d:%d:%d:%d for SCSI cmd at %p\n",
+ shost->host_no, device->bus, device->target, (int)scmd->device->lun, scmd);
+
+ if (cmpxchg(&scmd->host_scribble, PQI_NO_COMPLETION, (void *)&wait) == NULL) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "scsi %d:%d:%d:%d for SCSI cmd at %p already completed\n",
+ shost->host_no, device->bus, device->target, (int)scmd->device->lun, scmd);
+ scmd->result = DID_RESET << 16;
+ goto out;
+ }
+
+ tmf_work = &device->tmf_work[scmd->device->lun];
+
+ if (cmpxchg(&tmf_work->scmd, NULL, scmd) == NULL) {
+ tmf_work->ctrl_info = ctrl_info;
+ tmf_work->device = device;
+ tmf_work->lun = (u8)scmd->device->lun;
+ tmf_work->scsi_opcode = scmd->cmd_len > 0 ? scmd->cmnd[0] : 0xff;
+ schedule_work(&tmf_work->work_struct);
+ }
+
+ wait_for_completion(&wait);
+
+ dev_err(&ctrl_info->pci_dev->dev,
+ "TASK ABORT on scsi %d:%d:%d:%d for SCSI cmd at %p: SUCCESS\n",
+ shost->host_no, device->bus, device->target, (int)scmd->device->lun, scmd);
+
+out:
+
+ return SUCCESS;
+}
+
static int pqi_slave_alloc(struct scsi_device *sdev)
{
struct pqi_scsi_dev *device;
@@ -6470,21 +6569,21 @@ static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *ar
struct pci_dev *pci_dev;
u32 subsystem_vendor;
u32 subsystem_device;
- cciss_pci_info_struct pciinfo;
+ cciss_pci_info_struct pci_info;
if (!arg)
return -EINVAL;
pci_dev = ctrl_info->pci_dev;
- pciinfo.domain = pci_domain_nr(pci_dev->bus);
- pciinfo.bus = pci_dev->bus->number;
- pciinfo.dev_fn = pci_dev->devfn;
+ pci_info.domain = pci_domain_nr(pci_dev->bus);
+ pci_info.bus = pci_dev->bus->number;
+ pci_info.dev_fn = pci_dev->devfn;
subsystem_vendor = pci_dev->subsystem_vendor;
subsystem_device = pci_dev->subsystem_device;
- pciinfo.board_id = ((subsystem_device << 16) & 0xffff0000) | subsystem_vendor;
+ pci_info.board_id = ((subsystem_device << 16) & 0xffff0000) | subsystem_vendor;
- if (copy_to_user(arg, &pciinfo, sizeof(pciinfo)))
+ if (copy_to_user(arg, &pci_info, sizeof(pci_info)))
return -EFAULT;
return 0;
@@ -7362,6 +7461,7 @@ static const struct scsi_host_template pqi_driver_template = {
.scan_finished = pqi_scan_finished,
.this_id = -1,
.eh_device_reset_handler = pqi_eh_device_reset_handler,
+ .eh_abort_handler = pqi_eh_abort_handler,
.ioctl = pqi_ioctl,
.slave_alloc = pqi_slave_alloc,
.slave_configure = pqi_slave_configure,
@@ -8904,6 +9004,52 @@ static void pqi_ctrl_offline_worker(struct work_struct *work)
pqi_take_ctrl_offline_deferred(ctrl_info);
}
+static char *pqi_ctrl_shutdown_reason_to_string(enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason)
+{
+ char *string;
+
+ switch (ctrl_shutdown_reason) {
+ case PQI_IQ_NOT_DRAINED_TIMEOUT:
+ string = "inbound queue not drained timeout";
+ break;
+ case PQI_LUN_RESET_TIMEOUT:
+ string = "LUN reset timeout";
+ break;
+ case PQI_IO_PENDING_POST_LUN_RESET_TIMEOUT:
+ string = "I/O pending timeout after LUN reset";
+ break;
+ case PQI_NO_HEARTBEAT:
+ string = "no controller heartbeat detected";
+ break;
+ case PQI_FIRMWARE_KERNEL_NOT_UP:
+ string = "firmware kernel not ready";
+ break;
+ case PQI_OFA_RESPONSE_TIMEOUT:
+ string = "OFA response timeout";
+ break;
+ case PQI_INVALID_REQ_ID:
+ string = "invalid request ID";
+ break;
+ case PQI_UNMATCHED_REQ_ID:
+ string = "unmatched request ID";
+ break;
+ case PQI_IO_PI_OUT_OF_RANGE:
+ string = "I/O queue producer index out of range";
+ break;
+ case PQI_EVENT_PI_OUT_OF_RANGE:
+ string = "event queue producer index out of range";
+ break;
+ case PQI_UNEXPECTED_IU_TYPE:
+ string = "unexpected IU type";
+ break;
+ default:
+ string = "unknown reason";
+ break;
+ }
+
+ return string;
+}
+
static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info,
enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason)
{
@@ -8916,7 +9062,9 @@ static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info,
if (!pqi_disable_ctrl_shutdown)
sis_shutdown_ctrl(ctrl_info, ctrl_shutdown_reason);
pci_disable_device(ctrl_info->pci_dev);
- dev_err(&ctrl_info->pci_dev->dev, "controller offline\n");
+ dev_err(&ctrl_info->pci_dev->dev,
+ "controller offline: reason code 0x%x (%s)\n",
+ ctrl_shutdown_reason, pqi_ctrl_shutdown_reason_to_string(ctrl_shutdown_reason));
schedule_work(&ctrl_info->ctrl_offline_work);
}
@@ -9062,7 +9210,7 @@ static void pqi_shutdown(struct pci_dev *pci_dev)
rc = pqi_flush_cache(ctrl_info, shutdown_event);
if (rc)
dev_err(&pci_dev->dev,
- "unable to flush controller cache\n");
+ "unable to flush controller cache during shutdown\n");
pqi_crash_if_pending_command(ctrl_info);
pqi_reset(ctrl_info);
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 14d7981ddcdd..338aa8c42968 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -414,6 +414,8 @@ static int st_chk_result(struct scsi_tape *STp, struct st_request * SRpnt)
if (cmdstatp->have_sense &&
cmdstatp->sense_hdr.asc == 0 && cmdstatp->sense_hdr.ascq == 0x17)
STp->cleaning_req = 1; /* ASC and ASCQ => cleaning requested */
+ if (cmdstatp->have_sense && scode == UNIT_ATTENTION && cmdstatp->sense_hdr.asc == 0x29)
+ STp->pos_unknown = 1; /* ASC => power on / reset */
STp->pos_unknown |= STp->device->was_reset;
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 068625556dda..a95936b18f69 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -475,7 +475,7 @@ static void storvsc_device_scan(struct work_struct *work)
sdev = scsi_device_lookup(wrk->host, 0, wrk->tgt_id, wrk->lun);
if (!sdev)
goto done;
- scsi_rescan_device(&sdev->sdev_gendev);
+ scsi_rescan_device(sdev);
scsi_device_put(sdev);
done:
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index bd5633667d01..9d1bdcdc1331 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -325,7 +325,7 @@ static void virtscsi_handle_param_change(struct virtio_scsi *vscsi,
/* Handle "Parameters changed", "Mode parameters changed", and
"Capacity data has changed". */
if (asc == 0x2a && (ascq == 0x00 || ascq == 0x01 || ascq == 0x09))
- scsi_rescan_device(&sdev->sdev_gendev);
+ scsi_rescan_device(sdev);
scsi_device_put(sdev);
}
diff --git a/drivers/scsi/xen-scsifront.c b/drivers/scsi/xen-scsifront.c
index caae61aa2afe..9ec55ddc1204 100644
--- a/drivers/scsi/xen-scsifront.c
+++ b/drivers/scsi/xen-scsifront.c
@@ -743,7 +743,7 @@ static int scsifront_sdev_configure(struct scsi_device *sdev)
if (info->host_active == STATE_ERROR)
return -EIO;
- if (info && current == info->curr) {
+ if (current == info->curr) {
err = xenbus_printf(XBT_NIL, info->dev->nodename,
info->dev_state_path, "%d", XenbusStateConnected);
if (err) {
@@ -761,7 +761,7 @@ static void scsifront_sdev_destroy(struct scsi_device *sdev)
struct vscsifrnt_info *info = shost_priv(sdev->host);
int err;
- if (info && current == info->curr) {
+ if (current == info->curr) {
err = xenbus_printf(XBT_NIL, info->dev->nodename,
info->dev_state_path, "%d", XenbusStateClosed);
if (err)
@@ -903,7 +903,7 @@ static int scsifront_probe(struct xenbus_device *dev,
xenbus_dev_fatal(dev, err, "fail to allocate scsi host");
return err;
}
- info = (struct vscsifrnt_info *)host->hostdata;
+ info = shost_priv(host);
dev_set_drvdata(&dev->dev, info);
info->dev = dev;
diff --git a/drivers/soc/renesas/Kconfig b/drivers/soc/renesas/Kconfig
index de31589ed054..5a75ab64d1ed 100644
--- a/drivers/soc/renesas/Kconfig
+++ b/drivers/soc/renesas/Kconfig
@@ -334,6 +334,11 @@ if RISCV
config ARCH_R9A07G043
bool "RISC-V Platform support for RZ/Five"
select ARCH_RZG2L
+ select AX45MP_L2_CACHE if RISCV_DMA_NONCOHERENT
+ select DMA_GLOBAL_POOL
+ select ERRATA_ANDES if RISCV_SBI
+ select ERRATA_ANDES_CMO if ERRATA_ANDES
+
help
This enables support for the Renesas RZ/Five SoC.
diff --git a/drivers/staging/media/av7110/sp8870.c b/drivers/staging/media/av7110/sp8870.c
index 9767159aeb9b..abf5c72607b6 100644
--- a/drivers/staging/media/av7110/sp8870.c
+++ b/drivers/staging/media/av7110/sp8870.c
@@ -606,4 +606,4 @@ MODULE_DESCRIPTION("Spase SP8870 DVB-T Demodulator driver");
MODULE_AUTHOR("Juergen Peitz");
MODULE_LICENSE("GPL");
-EXPORT_SYMBOL(sp8870_attach);
+EXPORT_SYMBOL_GPL(sp8870_attach);
diff --git a/drivers/thermal/armada_thermal.c b/drivers/thermal/armada_thermal.c
index 9f6dc4fc9112..f00765bfc22e 100644
--- a/drivers/thermal/armada_thermal.c
+++ b/drivers/thermal/armada_thermal.c
@@ -876,8 +876,9 @@ static int armada_thermal_probe(struct platform_device *pdev)
/* Wait the sensors to be valid */
armada_wait_sensor_validity(priv);
- tz = thermal_zone_device_register(priv->zone_name, 0, 0, priv,
- &legacy_ops, NULL, 0, 0);
+ tz = thermal_tripless_zone_device_register(priv->zone_name,
+ priv, &legacy_ops,
+ NULL);
if (IS_ERR(tz)) {
dev_err(&pdev->dev,
"Failed to register thermal zone device\n");
diff --git a/drivers/thermal/dove_thermal.c b/drivers/thermal/dove_thermal.c
index 9954040d1d2c..7a18cb960bee 100644
--- a/drivers/thermal/dove_thermal.c
+++ b/drivers/thermal/dove_thermal.c
@@ -139,8 +139,8 @@ static int dove_thermal_probe(struct platform_device *pdev)
return ret;
}
- thermal = thermal_zone_device_register("dove_thermal", 0, 0,
- priv, &ops, NULL, 0, 0);
+ thermal = thermal_tripless_zone_device_register("dove_thermal", priv,
+ &ops, NULL);
if (IS_ERR(thermal)) {
dev_err(&pdev->dev,
"Failed to register thermal zone device\n");
diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
index ddd600820f68..ffc2871a021c 100644
--- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
+++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
@@ -609,9 +609,9 @@ static int int3400_thermal_probe(struct platform_device *pdev)
evaluate_odvp(priv);
- priv->thermal = thermal_zone_device_register("INT3400 Thermal", 0, 0,
- priv, &int3400_thermal_ops,
- &int3400_thermal_params, 0, 0);
+ priv->thermal = thermal_tripless_zone_device_register("INT3400 Thermal", priv,
+ &int3400_thermal_ops,
+ &int3400_thermal_params);
if (IS_ERR(priv->thermal)) {
result = PTR_ERR(priv->thermal);
goto free_art_trt;
diff --git a/drivers/thermal/kirkwood_thermal.c b/drivers/thermal/kirkwood_thermal.c
index 668747bd86ef..acb10d24256d 100644
--- a/drivers/thermal/kirkwood_thermal.c
+++ b/drivers/thermal/kirkwood_thermal.c
@@ -71,8 +71,8 @@ static int kirkwood_thermal_probe(struct platform_device *pdev)
if (IS_ERR(priv->sensor))
return PTR_ERR(priv->sensor);
- thermal = thermal_zone_device_register("kirkwood_thermal", 0, 0,
- priv, &ops, NULL, 0, 0);
+ thermal = thermal_tripless_zone_device_register("kirkwood_thermal",
+ priv, &ops, NULL);
if (IS_ERR(thermal)) {
dev_err(&pdev->dev,
"Failed to register thermal zone device\n");
diff --git a/drivers/thermal/spear_thermal.c b/drivers/thermal/spear_thermal.c
index 6e78616a576e..96d99289799a 100644
--- a/drivers/thermal/spear_thermal.c
+++ b/drivers/thermal/spear_thermal.c
@@ -122,8 +122,8 @@ static int spear_thermal_probe(struct platform_device *pdev)
stdev->flags = val;
writel_relaxed(stdev->flags, stdev->thermal_base);
- spear_thermal = thermal_zone_device_register("spear_thermal", 0, 0,
- stdev, &ops, NULL, 0, 0);
+ spear_thermal = thermal_tripless_zone_device_register("spear_thermal",
+ stdev, &ops, NULL);
if (IS_ERR(spear_thermal)) {
dev_err(&pdev->dev, "thermal zone device is NULL\n");
ret = PTR_ERR(spear_thermal);
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 0bdde1ab5d8b..8717a3343512 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -1389,16 +1389,16 @@ free_tz:
}
EXPORT_SYMBOL_GPL(thermal_zone_device_register_with_trips);
-struct thermal_zone_device *thermal_zone_device_register(const char *type, int ntrips, int mask,
- void *devdata, struct thermal_zone_device_ops *ops,
- const struct thermal_zone_params *tzp, int passive_delay,
- int polling_delay)
+struct thermal_zone_device *thermal_tripless_zone_device_register(
+ const char *type,
+ void *devdata,
+ struct thermal_zone_device_ops *ops,
+ const struct thermal_zone_params *tzp)
{
- return thermal_zone_device_register_with_trips(type, NULL, ntrips, mask,
- devdata, ops, tzp,
- passive_delay, polling_delay);
+ return thermal_zone_device_register_with_trips(type, NULL, 0, 0, devdata,
+ ops, tzp, 0, 0);
}
-EXPORT_SYMBOL_GPL(thermal_zone_device_register);
+EXPORT_SYMBOL_GPL(thermal_tripless_zone_device_register);
void *thermal_zone_device_priv(struct thermal_zone_device *tzd)
{
diff --git a/drivers/ufs/core/ufs_bsg.c b/drivers/ufs/core/ufs_bsg.c
index 34e423924e06..374e5aae4e7e 100644
--- a/drivers/ufs/core/ufs_bsg.c
+++ b/drivers/ufs/core/ufs_bsg.c
@@ -76,8 +76,7 @@ static int ufs_bsg_exec_advanced_rpmb_req(struct ufs_hba *hba, struct bsg_job *j
int ret;
int data_len;
- if (hba->ufs_version < ufshci_version(4, 0) || !hba->dev_info.b_advanced_rpmb_en ||
- !(hba->capabilities & MASK_EHSLUTRD_SUPPORTED))
+ if (hba->ufs_version < ufshci_version(4, 0) || !hba->dev_info.b_advanced_rpmb_en)
return -EINVAL;
if (rpmb_request->ehs_req.length != 2 || rpmb_request->ehs_req.ehs_type != 1)
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index e4318171381b..93417518c04d 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -7240,11 +7240,17 @@ int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *r
/* Advanced RPMB starts from UFS 4.0, so its command type is UTP_CMD_TYPE_UFS_STORAGE */
lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
- ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, dir, 2);
+ /*
+ * According to UFSHCI 4.0 specification page 24, if EHSLUTRDS is 0, host controller takes
+ * EHS length from CMD UPIU, and SW driver use EHS Length field in CMD UPIU. if it is 1,
+ * HW controller takes EHS length from UTRD.
+ */
+ if (hba->capabilities & MASK_EHSLUTRD_SUPPORTED)
+ ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, dir, 2);
+ else
+ ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, dir, 0);
- /* update the task tag and LUN in the request upiu */
- req_upiu->header.flags = upiu_flags;
- req_upiu->header.lun = UFS_UPIU_RPMB_WLUN;
+ /* update the task tag */
req_upiu->header.task_tag = tag;
/* copy the UPIU(contains CDB) request as it is */
diff --git a/fs/btrfs/Kconfig b/fs/btrfs/Kconfig
index 3282adc84d52..a25c9910d90b 100644
--- a/fs/btrfs/Kconfig
+++ b/fs/btrfs/Kconfig
@@ -31,7 +31,7 @@ config BTRFS_FS
continue to be mountable and usable by newer kernels.
For more information, please see the web pages at
- http://btrfs.wiki.kernel.org.
+ https://btrfs.readthedocs.io
To compile this file system support as a module, choose M here. The
module will be called btrfs.
diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
index 0cb1dee965a0..b2e5107b7cec 100644
--- a/fs/btrfs/block-group.c
+++ b/fs/btrfs/block-group.c
@@ -3028,8 +3028,16 @@ static int update_block_group_item(struct btrfs_trans_handle *trans,
btrfs_mark_buffer_dirty(leaf);
fail:
btrfs_release_path(path);
- /* We didn't update the block group item, need to revert @commit_used. */
- if (ret < 0) {
+ /*
+ * We didn't update the block group item, need to revert commit_used
+ * unless the block group item didn't exist yet - this is to prevent a
+ * race with a concurrent insertion of the block group item, with
+ * insert_block_group_item(), that happened just after we attempted to
+ * update. In that case we would reset commit_used to 0 just after the
+ * insertion set it to a value greater than 0 - if the block group later
+ * becomes with 0 used bytes, we would incorrectly skip its update.
+ */
+ if (ret < 0 && ret != -ENOENT) {
spin_lock(&cache->lock);
cache->commit_used = old_commit_used;
spin_unlock(&cache->lock);
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index 53c1211dd60b..caf0bbd028d1 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -412,6 +412,7 @@ static void finish_one_item(struct btrfs_delayed_root *delayed_root)
static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
{
+ struct btrfs_delayed_node *delayed_node = delayed_item->delayed_node;
struct rb_root_cached *root;
struct btrfs_delayed_root *delayed_root;
@@ -419,18 +420,21 @@ static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
if (RB_EMPTY_NODE(&delayed_item->rb_node))
return;
- delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root;
+ /* If it's in a rbtree, then we need to have delayed node locked. */
+ lockdep_assert_held(&delayed_node->mutex);
+
+ delayed_root = delayed_node->root->fs_info->delayed_root;
BUG_ON(!delayed_root);
if (delayed_item->type == BTRFS_DELAYED_INSERTION_ITEM)
- root = &delayed_item->delayed_node->ins_root;
+ root = &delayed_node->ins_root;
else
- root = &delayed_item->delayed_node->del_root;
+ root = &delayed_node->del_root;
rb_erase_cached(&delayed_item->rb_node, root);
RB_CLEAR_NODE(&delayed_item->rb_node);
- delayed_item->delayed_node->count--;
+ delayed_node->count--;
finish_one_item(delayed_root);
}
@@ -1153,20 +1157,33 @@ static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans, int nr)
ret = __btrfs_commit_inode_delayed_items(trans, path,
curr_node);
if (ret) {
- btrfs_release_delayed_node(curr_node);
- curr_node = NULL;
btrfs_abort_transaction(trans, ret);
break;
}
prev_node = curr_node;
curr_node = btrfs_next_delayed_node(curr_node);
+ /*
+ * See the comment below about releasing path before releasing
+ * node. If the commit of delayed items was successful the path
+ * should always be released, but in case of an error, it may
+ * point to locked extent buffers (a leaf at the very least).
+ */
+ ASSERT(path->nodes[0] == NULL);
btrfs_release_delayed_node(prev_node);
}
+ /*
+ * Release the path to avoid a potential deadlock and lockdep splat when
+ * releasing the delayed node, as that requires taking the delayed node's
+ * mutex. If another task starts running delayed items before we take
+ * the mutex, it will first lock the mutex and then it may try to lock
+ * the same btree path (leaf).
+ */
+ btrfs_free_path(path);
+
if (curr_node)
btrfs_release_delayed_node(curr_node);
- btrfs_free_path(path);
trans->block_rsv = block_rsv;
return ret;
@@ -1413,7 +1430,29 @@ void btrfs_balance_delayed_items(struct btrfs_fs_info *fs_info)
btrfs_wq_run_delayed_node(delayed_root, fs_info, BTRFS_DELAYED_BATCH);
}
-/* Will return 0 or -ENOMEM */
+static void btrfs_release_dir_index_item_space(struct btrfs_trans_handle *trans)
+{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+ const u64 bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
+
+ if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
+ return;
+
+ /*
+ * Adding the new dir index item does not require touching another
+ * leaf, so we can release 1 unit of metadata that was previously
+ * reserved when starting the transaction. This applies only to
+ * the case where we had a transaction start and excludes the
+ * transaction join case (when replaying log trees).
+ */
+ trace_btrfs_space_reservation(fs_info, "transaction",
+ trans->transid, bytes, 0);
+ btrfs_block_rsv_release(fs_info, trans->block_rsv, bytes, NULL);
+ ASSERT(trans->bytes_reserved >= bytes);
+ trans->bytes_reserved -= bytes;
+}
+
+/* Will return 0, -ENOMEM or -EEXIST (index number collision, unexpected). */
int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
const char *name, int name_len,
struct btrfs_inode *dir,
@@ -1455,6 +1494,27 @@ int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
mutex_lock(&delayed_node->mutex);
+ /*
+ * First attempt to insert the delayed item. This is to make the error
+ * handling path simpler in case we fail (-EEXIST). There's no risk of
+ * any other task coming in and running the delayed item before we do
+ * the metadata space reservation below, because we are holding the
+ * delayed node's mutex and that mutex must also be locked before the
+ * node's delayed items can be run.
+ */
+ ret = __btrfs_add_delayed_item(delayed_node, delayed_item);
+ if (unlikely(ret)) {
+ btrfs_err(trans->fs_info,
+"error adding delayed dir index item, name: %.*s, index: %llu, root: %llu, dir: %llu, dir->index_cnt: %llu, delayed_node->index_cnt: %llu, error: %d",
+ name_len, name, index, btrfs_root_id(delayed_node->root),
+ delayed_node->inode_id, dir->index_cnt,
+ delayed_node->index_cnt, ret);
+ btrfs_release_delayed_item(delayed_item);
+ btrfs_release_dir_index_item_space(trans);
+ mutex_unlock(&delayed_node->mutex);
+ goto release_node;
+ }
+
if (delayed_node->index_item_leaves == 0 ||
delayed_node->curr_index_batch_size + data_len > leaf_data_size) {
delayed_node->curr_index_batch_size = data_len;
@@ -1472,36 +1532,14 @@ int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
* impossible.
*/
if (WARN_ON(ret)) {
- mutex_unlock(&delayed_node->mutex);
btrfs_release_delayed_item(delayed_item);
+ mutex_unlock(&delayed_node->mutex);
goto release_node;
}
delayed_node->index_item_leaves++;
- } else if (!test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) {
- const u64 bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
-
- /*
- * Adding the new dir index item does not require touching another
- * leaf, so we can release 1 unit of metadata that was previously
- * reserved when starting the transaction. This applies only to
- * the case where we had a transaction start and excludes the
- * transaction join case (when replaying log trees).
- */
- trace_btrfs_space_reservation(fs_info, "transaction",
- trans->transid, bytes, 0);
- btrfs_block_rsv_release(fs_info, trans->block_rsv, bytes, NULL);
- ASSERT(trans->bytes_reserved >= bytes);
- trans->bytes_reserved -= bytes;
- }
-
- ret = __btrfs_add_delayed_item(delayed_node, delayed_item);
- if (unlikely(ret)) {
- btrfs_err(trans->fs_info,
- "err add delayed dir index item(name: %.*s) into the insertion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
- name_len, name, delayed_node->root->root_key.objectid,
- delayed_node->inode_id, ret);
- BUG();
+ } else {
+ btrfs_release_dir_index_item_space(trans);
}
mutex_unlock(&delayed_node->mutex);
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 0a96ea8c1d3a..68f60d50e1fd 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -520,6 +520,7 @@ static bool btree_dirty_folio(struct address_space *mapping,
struct folio *folio)
{
struct btrfs_fs_info *fs_info = btrfs_sb(mapping->host->i_sb);
+ struct btrfs_subpage_info *spi = fs_info->subpage_info;
struct btrfs_subpage *subpage;
struct extent_buffer *eb;
int cur_bit = 0;
@@ -533,18 +534,19 @@ static bool btree_dirty_folio(struct address_space *mapping,
btrfs_assert_tree_write_locked(eb);
return filemap_dirty_folio(mapping, folio);
}
+
+ ASSERT(spi);
subpage = folio_get_private(folio);
- ASSERT(subpage->dirty_bitmap);
- while (cur_bit < BTRFS_SUBPAGE_BITMAP_SIZE) {
+ for (cur_bit = spi->dirty_offset;
+ cur_bit < spi->dirty_offset + spi->bitmap_nr_bits;
+ cur_bit++) {
unsigned long flags;
u64 cur;
- u16 tmp = (1 << cur_bit);
spin_lock_irqsave(&subpage->lock, flags);
- if (!(tmp & subpage->dirty_bitmap)) {
+ if (!test_bit(cur_bit, subpage->bitmaps)) {
spin_unlock_irqrestore(&subpage->lock, flags);
- cur_bit++;
continue;
}
spin_unlock_irqrestore(&subpage->lock, flags);
@@ -557,7 +559,7 @@ static bool btree_dirty_folio(struct address_space *mapping,
btrfs_assert_tree_write_locked(eb);
free_extent_buffer(eb);
- cur_bit += (fs_info->nodesize >> fs_info->sectorsize_bits);
+ cur_bit += (fs_info->nodesize >> fs_info->sectorsize_bits) - 1;
}
return filemap_dirty_folio(mapping, folio);
}
@@ -1547,7 +1549,7 @@ static int transaction_kthread(void *arg)
delta = ktime_get_seconds() - cur->start_time;
if (!test_and_clear_bit(BTRFS_FS_COMMIT_TRANS, &fs_info->flags) &&
- cur->state < TRANS_STATE_COMMIT_START &&
+ cur->state < TRANS_STATE_COMMIT_PREP &&
delta < fs_info->commit_interval) {
spin_unlock(&fs_info->trans_lock);
delay -= msecs_to_jiffies((delta - 1) * 1000);
@@ -2682,8 +2684,8 @@ void btrfs_init_fs_info(struct btrfs_fs_info *fs_info)
btrfs_lockdep_init_map(fs_info, btrfs_trans_num_extwriters);
btrfs_lockdep_init_map(fs_info, btrfs_trans_pending_ordered);
btrfs_lockdep_init_map(fs_info, btrfs_ordered_extent);
- btrfs_state_lockdep_init_map(fs_info, btrfs_trans_commit_start,
- BTRFS_LOCKDEP_TRANS_COMMIT_START);
+ btrfs_state_lockdep_init_map(fs_info, btrfs_trans_commit_prep,
+ BTRFS_LOCKDEP_TRANS_COMMIT_PREP);
btrfs_state_lockdep_init_map(fs_info, btrfs_trans_unblocked,
BTRFS_LOCKDEP_TRANS_UNBLOCKED);
btrfs_state_lockdep_init_map(fs_info, btrfs_trans_super_committed,
@@ -4870,7 +4872,7 @@ static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info)
while (!list_empty(&fs_info->trans_list)) {
t = list_first_entry(&fs_info->trans_list,
struct btrfs_transaction, list);
- if (t->state >= TRANS_STATE_COMMIT_START) {
+ if (t->state >= TRANS_STATE_COMMIT_PREP) {
refcount_inc(&t->use_count);
spin_unlock(&fs_info->trans_lock);
btrfs_wait_for_commit(fs_info, t->transid);
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index a18ee7b5a166..75ab766fe156 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -1958,6 +1958,13 @@ static int btrfs_search_path_in_tree_user(struct mnt_idmap *idmap,
goto out_put;
}
+ /*
+ * We don't need the path anymore, so release it and
+ * avoid deadlocks and lockdep warnings in case
+ * btrfs_iget() needs to lookup the inode from its root
+ * btree and lock the same leaf.
+ */
+ btrfs_release_path(path);
temp_inode = btrfs_iget(sb, key2.objectid, root);
if (IS_ERR(temp_inode)) {
ret = PTR_ERR(temp_inode);
@@ -1978,7 +1985,6 @@ static int btrfs_search_path_in_tree_user(struct mnt_idmap *idmap,
goto out_put;
}
- btrfs_release_path(path);
key.objectid = key.offset;
key.offset = (u64)-1;
dirid = key.objectid;
diff --git a/fs/btrfs/locking.h b/fs/btrfs/locking.h
index edb9b4a0dba1..7d6ee1e609bf 100644
--- a/fs/btrfs/locking.h
+++ b/fs/btrfs/locking.h
@@ -79,7 +79,7 @@ enum btrfs_lock_nesting {
};
enum btrfs_lockdep_trans_states {
- BTRFS_LOCKDEP_TRANS_COMMIT_START,
+ BTRFS_LOCKDEP_TRANS_COMMIT_PREP,
BTRFS_LOCKDEP_TRANS_UNBLOCKED,
BTRFS_LOCKDEP_TRANS_SUPER_COMMITTED,
BTRFS_LOCKDEP_TRANS_COMPLETED,
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index b46ab348e8e5..345c449d588c 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -639,7 +639,7 @@ void btrfs_remove_ordered_extent(struct btrfs_inode *btrfs_inode,
refcount_inc(&trans->use_count);
spin_unlock(&fs_info->trans_lock);
- ASSERT(trans);
+ ASSERT(trans || BTRFS_FS_ERROR(fs_info));
if (trans) {
if (atomic_dec_and_test(&trans->pending_ordered))
wake_up(&trans->pending_wait);
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 874e4394df86..0bf42dccb041 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -56,12 +56,17 @@ static struct kmem_cache *btrfs_trans_handle_cachep;
* | Call btrfs_commit_transaction() on any trans handle attached to
* | transaction N
* V
- * Transaction N [[TRANS_STATE_COMMIT_START]]
+ * Transaction N [[TRANS_STATE_COMMIT_PREP]]
+ * |
+ * | If there are simultaneous calls to btrfs_commit_transaction() one will win
+ * | the race and the rest will wait for the winner to commit the transaction.
+ * |
+ * | The winner will wait for previous running transaction to completely finish
+ * | if there is one.
* |
- * | Will wait for previous running transaction to completely finish if there
- * | is one
+ * Transaction N [[TRANS_STATE_COMMIT_START]]
* |
- * | Then one of the following happes:
+ * | Then one of the following happens:
* | - Wait for all other trans handle holders to release.
* | The btrfs_commit_transaction() caller will do the commit work.
* | - Wait for current transaction to be committed by others.
@@ -112,6 +117,7 @@ static struct kmem_cache *btrfs_trans_handle_cachep;
*/
static const unsigned int btrfs_blocked_trans_types[TRANS_STATE_MAX] = {
[TRANS_STATE_RUNNING] = 0U,
+ [TRANS_STATE_COMMIT_PREP] = 0U,
[TRANS_STATE_COMMIT_START] = (__TRANS_START | __TRANS_ATTACH),
[TRANS_STATE_COMMIT_DOING] = (__TRANS_START |
__TRANS_ATTACH |
@@ -1982,7 +1988,7 @@ void btrfs_commit_transaction_async(struct btrfs_trans_handle *trans)
* Wait for the current transaction commit to start and block
* subsequent transaction joins
*/
- btrfs_might_wait_for_state(fs_info, BTRFS_LOCKDEP_TRANS_COMMIT_START);
+ btrfs_might_wait_for_state(fs_info, BTRFS_LOCKDEP_TRANS_COMMIT_PREP);
wait_event(fs_info->transaction_blocked_wait,
cur_trans->state >= TRANS_STATE_COMMIT_START ||
TRANS_ABORTED(cur_trans));
@@ -2129,7 +2135,7 @@ static void add_pending_snapshot(struct btrfs_trans_handle *trans)
return;
lockdep_assert_held(&trans->fs_info->trans_lock);
- ASSERT(cur_trans->state >= TRANS_STATE_COMMIT_START);
+ ASSERT(cur_trans->state >= TRANS_STATE_COMMIT_PREP);
list_add(&trans->pending_snapshot->list, &cur_trans->pending_snapshots);
}
@@ -2153,7 +2159,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
ktime_t interval;
ASSERT(refcount_read(&trans->use_count) == 1);
- btrfs_trans_state_lockdep_acquire(fs_info, BTRFS_LOCKDEP_TRANS_COMMIT_START);
+ btrfs_trans_state_lockdep_acquire(fs_info, BTRFS_LOCKDEP_TRANS_COMMIT_PREP);
clear_bit(BTRFS_FS_NEED_TRANS_COMMIT, &fs_info->flags);
@@ -2213,7 +2219,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
}
spin_lock(&fs_info->trans_lock);
- if (cur_trans->state >= TRANS_STATE_COMMIT_START) {
+ if (cur_trans->state >= TRANS_STATE_COMMIT_PREP) {
enum btrfs_trans_state want_state = TRANS_STATE_COMPLETED;
add_pending_snapshot(trans);
@@ -2225,7 +2231,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
want_state = TRANS_STATE_SUPER_COMMITTED;
btrfs_trans_state_lockdep_release(fs_info,
- BTRFS_LOCKDEP_TRANS_COMMIT_START);
+ BTRFS_LOCKDEP_TRANS_COMMIT_PREP);
ret = btrfs_end_transaction(trans);
wait_for_commit(cur_trans, want_state);
@@ -2237,9 +2243,9 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
return ret;
}
- cur_trans->state = TRANS_STATE_COMMIT_START;
+ cur_trans->state = TRANS_STATE_COMMIT_PREP;
wake_up(&fs_info->transaction_blocked_wait);
- btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_COMMIT_START);
+ btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_COMMIT_PREP);
if (cur_trans->list.prev != &fs_info->trans_list) {
enum btrfs_trans_state want_state = TRANS_STATE_COMPLETED;
@@ -2260,11 +2266,9 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
btrfs_put_transaction(prev_trans);
if (ret)
goto lockdep_release;
- } else {
- spin_unlock(&fs_info->trans_lock);
+ spin_lock(&fs_info->trans_lock);
}
} else {
- spin_unlock(&fs_info->trans_lock);
/*
* The previous transaction was aborted and was already removed
* from the list of transactions at fs_info->trans_list. So we
@@ -2272,11 +2276,16 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
* corrupt state (pointing to trees with unwritten nodes/leafs).
*/
if (BTRFS_FS_ERROR(fs_info)) {
+ spin_unlock(&fs_info->trans_lock);
ret = -EROFS;
goto lockdep_release;
}
}
+ cur_trans->state = TRANS_STATE_COMMIT_START;
+ wake_up(&fs_info->transaction_blocked_wait);
+ spin_unlock(&fs_info->trans_lock);
+
/*
* Get the time spent on the work done by the commit thread and not
* the time spent waiting on a previous commit
@@ -2586,7 +2595,7 @@ lockdep_release:
goto cleanup_transaction;
lockdep_trans_commit_start_release:
- btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_COMMIT_START);
+ btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_COMMIT_PREP);
btrfs_end_transaction(trans);
return ret;
}
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index 8e9fa23bd7fe..6b309f8a99a8 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -14,6 +14,7 @@
enum btrfs_trans_state {
TRANS_STATE_RUNNING,
+ TRANS_STATE_COMMIT_PREP,
TRANS_STATE_COMMIT_START,
TRANS_STATE_COMMIT_DOING,
TRANS_STATE_UNBLOCKED,
diff --git a/fs/nls/Kconfig b/fs/nls/Kconfig
index a0d0e2f7ec83..2a601af6f3bd 100644
--- a/fs/nls/Kconfig
+++ b/fs/nls/Kconfig
@@ -618,11 +618,6 @@ config NLS_UTF8
the Unicode/ISO9646 universal character set.
config NLS_UCS2_UTILS
- tristate "NLS UCS-2 UTILS"
- help
- Set of older UCS-2 conversion utilities and tables used by some
- filesystems including SMB/CIFS. This includes upper case conversion
- tables. This will automatically be selected when the filesystem
- that uses it is selected.
+ tristate
endif # NLS
diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
index bae404a1bad4..d1761ec5866a 100644
--- a/fs/overlayfs/copy_up.c
+++ b/fs/overlayfs/copy_up.c
@@ -618,7 +618,8 @@ static int ovl_copy_up_metadata(struct ovl_copy_up_ctx *c, struct dentry *temp)
if (err)
return err;
- if (inode->i_flags & OVL_COPY_I_FLAGS_MASK) {
+ if (inode->i_flags & OVL_COPY_I_FLAGS_MASK &&
+ (S_ISREG(c->stat.mode) || S_ISDIR(c->stat.mode))) {
/*
* Copy the fileattr inode flags that are the source of already
* copied i_flags
diff --git a/fs/overlayfs/file.c b/fs/overlayfs/file.c
index 3b4cc633d763..4193633c4c7a 100644
--- a/fs/overlayfs/file.c
+++ b/fs/overlayfs/file.c
@@ -19,7 +19,6 @@ struct ovl_aio_req {
struct kiocb iocb;
refcount_t ref;
struct kiocb *orig_iocb;
- struct fd fd;
};
static struct kmem_cache *ovl_aio_request_cachep;
@@ -280,7 +279,7 @@ static rwf_t ovl_iocb_to_rwf(int ifl)
static inline void ovl_aio_put(struct ovl_aio_req *aio_req)
{
if (refcount_dec_and_test(&aio_req->ref)) {
- fdput(aio_req->fd);
+ fput(aio_req->iocb.ki_filp);
kmem_cache_free(ovl_aio_request_cachep, aio_req);
}
}
@@ -342,10 +341,9 @@ static ssize_t ovl_read_iter(struct kiocb *iocb, struct iov_iter *iter)
if (!aio_req)
goto out;
- aio_req->fd = real;
real.flags = 0;
aio_req->orig_iocb = iocb;
- kiocb_clone(&aio_req->iocb, iocb, real.file);
+ kiocb_clone(&aio_req->iocb, iocb, get_file(real.file));
aio_req->iocb.ki_complete = ovl_aio_rw_complete;
refcount_set(&aio_req->ref, 2);
ret = vfs_iocb_iter_read(real.file, &aio_req->iocb, iter);
@@ -409,10 +407,9 @@ static ssize_t ovl_write_iter(struct kiocb *iocb, struct iov_iter *iter)
if (!aio_req)
goto out;
- aio_req->fd = real;
real.flags = 0;
aio_req->orig_iocb = iocb;
- kiocb_clone(&aio_req->iocb, iocb, real.file);
+ kiocb_clone(&aio_req->iocb, iocb, get_file(real.file));
aio_req->iocb.ki_flags = ifl;
aio_req->iocb.ki_complete = ovl_aio_rw_complete;
refcount_set(&aio_req->ref, 2);
diff --git a/fs/smb/client/cached_dir.c b/fs/smb/client/cached_dir.c
index 2d5e9a9d5b8b..b17f067e4ada 100644
--- a/fs/smb/client/cached_dir.c
+++ b/fs/smb/client/cached_dir.c
@@ -18,7 +18,8 @@ static void smb2_close_cached_fid(struct kref *ref);
static struct cached_fid *find_or_create_cached_dir(struct cached_fids *cfids,
const char *path,
- bool lookup_only)
+ bool lookup_only,
+ __u32 max_cached_dirs)
{
struct cached_fid *cfid;
@@ -43,7 +44,7 @@ static struct cached_fid *find_or_create_cached_dir(struct cached_fids *cfids,
spin_unlock(&cfids->cfid_list_lock);
return NULL;
}
- if (cfids->num_entries >= MAX_CACHED_FIDS) {
+ if (cfids->num_entries >= max_cached_dirs) {
spin_unlock(&cfids->cfid_list_lock);
return NULL;
}
@@ -145,7 +146,7 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
const char *npath;
if (tcon == NULL || tcon->cfids == NULL || tcon->nohandlecache ||
- is_smb1_server(tcon->ses->server))
+ is_smb1_server(tcon->ses->server) || (dir_cache_timeout == 0))
return -EOPNOTSUPP;
ses = tcon->ses;
@@ -162,7 +163,7 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
if (!utf16_path)
return -ENOMEM;
- cfid = find_or_create_cached_dir(cfids, path, lookup_only);
+ cfid = find_or_create_cached_dir(cfids, path, lookup_only, tcon->max_cached_dirs);
if (cfid == NULL) {
kfree(utf16_path);
return -ENOENT;
@@ -582,7 +583,7 @@ cifs_cfids_laundromat_thread(void *p)
return 0;
spin_lock(&cfids->cfid_list_lock);
list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
- if (time_after(jiffies, cfid->time + HZ * 30)) {
+ if (time_after(jiffies, cfid->time + HZ * dir_cache_timeout)) {
list_del(&cfid->entry);
list_add(&cfid->entry, &entry);
cfids->num_entries--;
diff --git a/fs/smb/client/cached_dir.h b/fs/smb/client/cached_dir.h
index facc9b154d00..a82ff2cea789 100644
--- a/fs/smb/client/cached_dir.h
+++ b/fs/smb/client/cached_dir.h
@@ -49,7 +49,7 @@ struct cached_fid {
struct cached_dirents dirents;
};
-#define MAX_CACHED_FIDS 16
+/* default MAX_CACHED_FIDS is 16 */
struct cached_fids {
/* Must be held when:
* - accessing the cfids->entries list
diff --git a/fs/smb/client/cifsfs.c b/fs/smb/client/cifsfs.c
index 73c44e097a69..22869cda1356 100644
--- a/fs/smb/client/cifsfs.c
+++ b/fs/smb/client/cifsfs.c
@@ -117,6 +117,10 @@ module_param(cifs_max_pending, uint, 0444);
MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server for "
"CIFS/SMB1 dialect (N/A for SMB3) "
"Default: 32767 Range: 2 to 32767.");
+unsigned int dir_cache_timeout = 30;
+module_param(dir_cache_timeout, uint, 0644);
+MODULE_PARM_DESC(dir_cache_timeout, "Number of seconds to cache directory contents for which we have a lease. Default: 30 "
+ "Range: 1 to 65000 seconds, 0 to disable caching dir contents");
#ifdef CONFIG_CIFS_STATS2
unsigned int slow_rsp_threshold = 1;
module_param(slow_rsp_threshold, uint, 0644);
@@ -695,6 +699,8 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
seq_printf(s, ",snapshot=%llu", tcon->snapshot_time);
if (tcon->handle_timeout)
seq_printf(s, ",handletimeout=%u", tcon->handle_timeout);
+ if (tcon->max_cached_dirs != MAX_CACHED_FIDS)
+ seq_printf(s, ",max_cached_dirs=%u", tcon->max_cached_dirs);
/*
* Display file and directory attribute timeout in seconds.
@@ -1679,6 +1685,12 @@ init_cifs(void)
CIFS_MAX_REQ);
}
+ /* Limit max to about 18 hours, and setting to zero disables directory entry caching */
+ if (dir_cache_timeout > 65000) {
+ dir_cache_timeout = 65000;
+ cifs_dbg(VFS, "dir_cache_timeout set to max of 65000 seconds\n");
+ }
+
cifsiod_wq = alloc_workqueue("cifsiod", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
if (!cifsiod_wq) {
rc = -ENOMEM;
diff --git a/fs/smb/client/cifsfs.h b/fs/smb/client/cifsfs.h
index 532c38fe07cd..41daebd220ff 100644
--- a/fs/smb/client/cifsfs.h
+++ b/fs/smb/client/cifsfs.h
@@ -152,6 +152,6 @@ extern const struct export_operations cifs_export_ops;
#endif /* CONFIG_CIFS_NFSD_EXPORT */
/* when changing internal version - update following two lines at same time */
-#define SMB3_PRODUCT_BUILD 44
-#define CIFS_VERSION "2.44"
+#define SMB3_PRODUCT_BUILD 45
+#define CIFS_VERSION "2.45"
#endif /* _CIFSFS_H */
diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h
index 259e231f8b4f..032d8716f671 100644
--- a/fs/smb/client/cifsglob.h
+++ b/fs/smb/client/cifsglob.h
@@ -1210,6 +1210,7 @@ struct cifs_tcon {
__u32 max_chunks;
__u32 max_bytes_chunk;
__u32 max_bytes_copy;
+ __u32 max_cached_dirs;
#ifdef CONFIG_CIFS_FSCACHE
u64 resource_id; /* server resource id */
struct fscache_volume *fscache; /* cookie for share */
@@ -2016,6 +2017,7 @@ extern unsigned int CIFSMaxBufSize; /* max size not including hdr */
extern unsigned int cifs_min_rcv; /* min size of big ntwrk buf pool */
extern unsigned int cifs_min_small; /* min size of small buf pool */
extern unsigned int cifs_max_pending; /* MAX requests at once to server*/
+extern unsigned int dir_cache_timeout; /* max time for directory lease caching of dir */
extern bool disable_legacy_dialects; /* forbid vers=1.0 and vers=2.0 mounts */
extern atomic_t mid_count;
diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c
index 3bd71f982170..687754791bf0 100644
--- a/fs/smb/client/connect.c
+++ b/fs/smb/client/connect.c
@@ -2657,6 +2657,7 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx)
tcon->retry = ctx->retry;
tcon->nocase = ctx->nocase;
tcon->broken_sparse_sup = ctx->no_sparse;
+ tcon->max_cached_dirs = ctx->max_cached_dirs;
if (ses->server->capabilities & SMB2_GLOBAL_CAP_DIRECTORY_LEASING)
tcon->nohandlecache = ctx->nohandlecache;
else
diff --git a/fs/smb/client/fs_context.c b/fs/smb/client/fs_context.c
index 67e16c2ac90e..e45ce31bbda7 100644
--- a/fs/smb/client/fs_context.c
+++ b/fs/smb/client/fs_context.c
@@ -150,6 +150,7 @@ const struct fs_parameter_spec smb3_fs_parameters[] = {
fsparam_u32("closetimeo", Opt_closetimeo),
fsparam_u32("echo_interval", Opt_echo_interval),
fsparam_u32("max_credits", Opt_max_credits),
+ fsparam_u32("max_cached_dirs", Opt_max_cached_dirs),
fsparam_u32("handletimeout", Opt_handletimeout),
fsparam_u64("snapshot", Opt_snapshot),
fsparam_u32("max_channels", Opt_max_channels),
@@ -1165,6 +1166,14 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
if (result.uint_32 > 1)
ctx->multichannel = true;
break;
+ case Opt_max_cached_dirs:
+ if (result.uint_32 < 1) {
+ cifs_errorf(fc, "%s: Invalid max_cached_dirs, needs to be 1 or more\n",
+ __func__);
+ goto cifs_parse_mount_err;
+ }
+ ctx->max_cached_dirs = result.uint_32;
+ break;
case Opt_handletimeout:
ctx->handle_timeout = result.uint_32;
if (ctx->handle_timeout > SMB3_MAX_HANDLE_TIMEOUT) {
@@ -1592,7 +1601,7 @@ int smb3_init_fs_context(struct fs_context *fc)
ctx->acregmax = CIFS_DEF_ACTIMEO;
ctx->acdirmax = CIFS_DEF_ACTIMEO;
ctx->closetimeo = SMB3_DEF_DCLOSETIMEO;
-
+ ctx->max_cached_dirs = MAX_CACHED_FIDS;
/* Most clients set timeout to 0, allows server to use its default */
ctx->handle_timeout = 0; /* See MS-SMB2 spec section 2.2.14.2.12 */
diff --git a/fs/smb/client/fs_context.h b/fs/smb/client/fs_context.h
index f4eaf8558902..9d8d34af0211 100644
--- a/fs/smb/client/fs_context.h
+++ b/fs/smb/client/fs_context.h
@@ -128,6 +128,7 @@ enum cifs_param {
Opt_closetimeo,
Opt_echo_interval,
Opt_max_credits,
+ Opt_max_cached_dirs,
Opt_snapshot,
Opt_max_channels,
Opt_handletimeout,
@@ -261,6 +262,7 @@ struct smb3_fs_context {
__u32 handle_timeout; /* persistent and durable handle timeout in ms */
unsigned int max_credits; /* smb3 max_credits 10 < credits < 60000 */
unsigned int max_channels;
+ unsigned int max_cached_dirs;
__u16 compression; /* compression algorithm 0xFFFF default 0=disabled */
bool rootfs:1; /* if it's a SMB root file system */
bool witness:1; /* use witness protocol */
@@ -287,7 +289,7 @@ extern void smb3_update_mnt_flags(struct cifs_sb_info *cifs_sb);
*/
#define SMB3_MAX_DCLOSETIMEO (1 << 30)
#define SMB3_DEF_DCLOSETIMEO (1 * HZ) /* even 1 sec enough to help eg open/write/close/open/read */
-
+#define MAX_CACHED_FIDS 16
extern char *cifs_sanitize_prepath(char *prepath, gfp_t gfp);
#endif
diff --git a/fs/smb/client/fscache.c b/fs/smb/client/fscache.c
index 3677525ee993..e5cad149f5a2 100644
--- a/fs/smb/client/fscache.c
+++ b/fs/smb/client/fscache.c
@@ -48,7 +48,7 @@ int cifs_fscache_get_super_cookie(struct cifs_tcon *tcon)
sharename = extract_sharename(tcon->tree_name);
if (IS_ERR(sharename)) {
cifs_dbg(FYI, "%s: couldn't extract sharename\n", __func__);
- return -EINVAL;
+ return PTR_ERR(sharename);
}
slen = strlen(sharename);
diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
index e3dd698854d6..d9eda2e958b4 100644
--- a/fs/smb/client/smb2ops.c
+++ b/fs/smb/client/smb2ops.c
@@ -2683,6 +2683,7 @@ smb2_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
smb2_copy_fs_info_to_kstatfs(info, buf);
qfs_exit:
+ trace_smb3_qfs_done(xid, tcon->tid, tcon->ses->Suid, tcon->tree_name, rc);
free_rsp_buf(buftype, rsp_iov.iov_base);
return rc;
}
diff --git a/fs/smb/client/trace.h b/fs/smb/client/trace.h
index e671bd16f00c..a7e4755bed0f 100644
--- a/fs/smb/client/trace.h
+++ b/fs/smb/client/trace.h
@@ -691,7 +691,7 @@ DEFINE_EVENT(smb3_tcon_class, smb3_##name, \
TP_ARGS(xid, tid, sesid, unc_name, rc))
DEFINE_SMB3_TCON_EVENT(tcon);
-
+DEFINE_SMB3_TCON_EVENT(qfs_done);
/*
* For smb2/smb3 open (including create and mkdir) calls
diff --git a/fs/smb/common/smb2pdu.h b/fs/smb/common/smb2pdu.h
index 2680251b9aac..319fb9ffc6a0 100644
--- a/fs/smb/common/smb2pdu.h
+++ b/fs/smb/common/smb2pdu.h
@@ -406,7 +406,7 @@ struct smb2_tree_disconnect_rsp {
/* Capabilities flags */
#define SMB2_GLOBAL_CAP_DFS 0x00000001
#define SMB2_GLOBAL_CAP_LEASING 0x00000002 /* Resp only New to SMB2.1 */
-#define SMB2_GLOBAL_CAP_LARGE_MTU 0X00000004 /* Resp only New to SMB2.1 */
+#define SMB2_GLOBAL_CAP_LARGE_MTU 0x00000004 /* Resp only New to SMB2.1 */
#define SMB2_GLOBAL_CAP_MULTI_CHANNEL 0x00000008 /* New to SMB3 */
#define SMB2_GLOBAL_CAP_PERSISTENT_HANDLES 0x00000010 /* New to SMB3 */
#define SMB2_GLOBAL_CAP_DIRECTORY_LEASING 0x00000020 /* New to SMB3 */
diff --git a/fs/smb/server/Kconfig b/fs/smb/server/Kconfig
index 793151ddd60e..cabe6a843c6a 100644
--- a/fs/smb/server/Kconfig
+++ b/fs/smb/server/Kconfig
@@ -1,5 +1,5 @@
config SMB_SERVER
- tristate "SMB3 server support (EXPERIMENTAL)"
+ tristate "SMB3 server support"
depends on INET
depends on MULTIUSER
depends on FILE_LOCKING
diff --git a/fs/smb/server/server.c b/fs/smb/server/server.c
index 801cd0929209..5ab2f52f9b35 100644
--- a/fs/smb/server/server.c
+++ b/fs/smb/server/server.c
@@ -590,8 +590,6 @@ static int __init ksmbd_server_init(void)
if (ret)
goto err_crypto_destroy;
- pr_warn_once("The ksmbd server is experimental\n");
-
return 0;
err_crypto_destroy:
diff --git a/fs/tracefs/event_inode.c b/fs/tracefs/event_inode.c
index 237c6f370ad9..9f64e7332796 100644
--- a/fs/tracefs/event_inode.c
+++ b/fs/tracefs/event_inode.c
@@ -185,17 +185,49 @@ static struct dentry *create_dir(const char *name, struct dentry *parent, void *
/**
* eventfs_set_ef_status_free - set the ef->status to free
+ * @ti: the tracefs_inode of the dentry
* @dentry: dentry who's status to be freed
*
* eventfs_set_ef_status_free will be called if no more
* references remain
*/
-void eventfs_set_ef_status_free(struct dentry *dentry)
+void eventfs_set_ef_status_free(struct tracefs_inode *ti, struct dentry *dentry)
{
struct tracefs_inode *ti_parent;
- struct eventfs_file *ef;
+ struct eventfs_inode *ei;
+ struct eventfs_file *ef, *tmp;
+
+ /* The top level events directory may be freed by this */
+ if (unlikely(ti->flags & TRACEFS_EVENT_TOP_INODE)) {
+ LIST_HEAD(ef_del_list);
+
+ mutex_lock(&eventfs_mutex);
+
+ ei = ti->private;
+
+ /* Record all the top level files */
+ list_for_each_entry_srcu(ef, &ei->e_top_files, list,
+ lockdep_is_held(&eventfs_mutex)) {
+ list_add_tail(&ef->del_list, &ef_del_list);
+ }
+
+ /* Nothing should access this, but just in case! */
+ ti->private = NULL;
+
+ mutex_unlock(&eventfs_mutex);
+
+ /* Now safely free the top level files and their children */
+ list_for_each_entry_safe(ef, tmp, &ef_del_list, del_list) {
+ list_del(&ef->del_list);
+ eventfs_remove(ef);
+ }
+
+ kfree(ei);
+ return;
+ }
mutex_lock(&eventfs_mutex);
+
ti_parent = get_tracefs(dentry->d_parent->d_inode);
if (!ti_parent || !(ti_parent->flags & TRACEFS_EVENT_INODE))
goto out;
@@ -420,7 +452,8 @@ static int dcache_dir_open_wrapper(struct inode *inode, struct file *file)
ei = ti->private;
idx = srcu_read_lock(&eventfs_srcu);
- list_for_each_entry_rcu(ef, &ei->e_top_files, list) {
+ list_for_each_entry_srcu(ef, &ei->e_top_files, list,
+ srcu_read_lock_held(&eventfs_srcu)) {
create_dentry(ef, dentry, false);
}
srcu_read_unlock(&eventfs_srcu, idx);
@@ -491,6 +524,9 @@ struct dentry *eventfs_create_events_dir(const char *name,
struct tracefs_inode *ti;
struct inode *inode;
+ if (security_locked_down(LOCKDOWN_TRACEFS))
+ return NULL;
+
if (IS_ERR(dentry))
return dentry;
@@ -507,7 +543,7 @@ struct dentry *eventfs_create_events_dir(const char *name,
INIT_LIST_HEAD(&ei->e_top_files);
ti = get_tracefs(inode);
- ti->flags |= TRACEFS_EVENT_INODE;
+ ti->flags |= TRACEFS_EVENT_INODE | TRACEFS_EVENT_TOP_INODE;
ti->private = ei;
inode->i_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
@@ -538,6 +574,9 @@ struct eventfs_file *eventfs_add_subsystem_dir(const char *name,
struct eventfs_inode *ei_parent;
struct eventfs_file *ef;
+ if (security_locked_down(LOCKDOWN_TRACEFS))
+ return NULL;
+
if (!parent)
return ERR_PTR(-EINVAL);
@@ -569,6 +608,9 @@ struct eventfs_file *eventfs_add_dir(const char *name,
{
struct eventfs_file *ef;
+ if (security_locked_down(LOCKDOWN_TRACEFS))
+ return NULL;
+
if (!ef_parent)
return ERR_PTR(-EINVAL);
@@ -606,6 +648,9 @@ int eventfs_add_events_file(const char *name, umode_t mode,
struct eventfs_inode *ei;
struct eventfs_file *ef;
+ if (security_locked_down(LOCKDOWN_TRACEFS))
+ return -ENODEV;
+
if (!parent)
return -EINVAL;
@@ -654,6 +699,9 @@ int eventfs_add_file(const char *name, umode_t mode,
{
struct eventfs_file *ef;
+ if (security_locked_down(LOCKDOWN_TRACEFS))
+ return -ENODEV;
+
if (!ef_parent)
return -EINVAL;
@@ -791,7 +839,6 @@ void eventfs_remove(struct eventfs_file *ef)
void eventfs_remove_events_dir(struct dentry *dentry)
{
struct tracefs_inode *ti;
- struct eventfs_inode *ei;
if (!dentry || !dentry->d_inode)
return;
@@ -800,8 +847,6 @@ void eventfs_remove_events_dir(struct dentry *dentry)
if (!ti || !(ti->flags & TRACEFS_EVENT_INODE))
return;
- ei = ti->private;
d_invalidate(dentry);
dput(dentry);
- kfree(ei);
}
diff --git a/fs/tracefs/inode.c b/fs/tracefs/inode.c
index de5b72216b1a..891653ba9cf3 100644
--- a/fs/tracefs/inode.c
+++ b/fs/tracefs/inode.c
@@ -385,7 +385,7 @@ static void tracefs_dentry_iput(struct dentry *dentry, struct inode *inode)
ti = get_tracefs(inode);
if (ti && ti->flags & TRACEFS_EVENT_INODE)
- eventfs_set_ef_status_free(dentry);
+ eventfs_set_ef_status_free(ti, dentry);
iput(inode);
}
@@ -673,6 +673,9 @@ static struct dentry *__create_dir(const char *name, struct dentry *parent,
*/
struct dentry *tracefs_create_dir(const char *name, struct dentry *parent)
{
+ if (security_locked_down(LOCKDOWN_TRACEFS))
+ return NULL;
+
return __create_dir(name, parent, &simple_dir_inode_operations);
}
diff --git a/fs/tracefs/internal.h b/fs/tracefs/internal.h
index 69c2b1d87c46..4f2e49e2197b 100644
--- a/fs/tracefs/internal.h
+++ b/fs/tracefs/internal.h
@@ -3,7 +3,8 @@
#define _TRACEFS_INTERNAL_H
enum {
- TRACEFS_EVENT_INODE = BIT(1),
+ TRACEFS_EVENT_INODE = BIT(1),
+ TRACEFS_EVENT_TOP_INODE = BIT(2),
};
struct tracefs_inode {
@@ -24,6 +25,6 @@ struct inode *tracefs_get_inode(struct super_block *sb);
struct dentry *eventfs_start_creating(const char *name, struct dentry *parent);
struct dentry *eventfs_failed_creating(struct dentry *dentry);
struct dentry *eventfs_end_creating(struct dentry *dentry);
-void eventfs_set_ef_status_free(struct dentry *dentry);
+void eventfs_set_ef_status_free(struct tracefs_inode *ti, struct dentry *dentry);
#endif /* _TRACEFS_INTERNAL_H */
diff --git a/include/linux/export-internal.h b/include/linux/export-internal.h
index 1c849db953a5..45fca09b2319 100644
--- a/include/linux/export-internal.h
+++ b/include/linux/export-internal.h
@@ -52,6 +52,8 @@
#ifdef CONFIG_IA64
#define KSYM_FUNC(name) @fptr(name)
+#elif defined(CONFIG_PARISC) && defined(CONFIG_64BIT)
+#define KSYM_FUNC(name) P%name
#else
#define KSYM_FUNC(name) name
#endif
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 819b6bc8ac08..3df5499f7936 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -54,11 +54,13 @@ extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D];
int kasan_populate_early_shadow(const void *shadow_start,
const void *shadow_end);
+#ifndef __HAVE_ARCH_SHADOW_MAP
static inline void *kasan_mem_to_shadow(const void *addr)
{
return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT)
+ KASAN_SHADOW_OFFSET;
}
+#endif
int kasan_add_zero_shadow(void *start, unsigned long size);
void kasan_remove_zero_shadow(void *start, unsigned long size);
diff --git a/include/linux/nvme-fc-driver.h b/include/linux/nvme-fc-driver.h
index 4109f1bd6128..f6ef8cf5d774 100644
--- a/include/linux/nvme-fc-driver.h
+++ b/include/linux/nvme-fc-driver.h
@@ -53,10 +53,10 @@
struct nvmefc_ls_req {
void *rqstaddr;
dma_addr_t rqstdma;
- u32 rqstlen;
+ __le32 rqstlen;
void *rspaddr;
dma_addr_t rspdma;
- u32 rsplen;
+ __le32 rsplen;
u32 timeout;
void *private;
@@ -120,7 +120,7 @@ struct nvmefc_ls_req {
struct nvmefc_ls_rsp {
void *rspbuf;
dma_addr_t rspdma;
- u16 rsplen;
+ __le32 rsplen;
void (*done)(struct nvmefc_ls_rsp *rsp);
void *nvme_fc_private; /* LLDD is not to access !! */
diff --git a/include/linux/oid_registry.h b/include/linux/oid_registry.h
index 0f4a8903922a..f86a08ba0207 100644
--- a/include/linux/oid_registry.h
+++ b/include/linux/oid_registry.h
@@ -67,6 +67,7 @@ enum OID {
OID_msOutlookExpress, /* 1.3.6.1.4.1.311.16.4 */
OID_ntlmssp, /* 1.3.6.1.4.1.311.2.2.10 */
+ OID_negoex, /* 1.3.6.1.4.1.311.2.2.30 */
OID_spnego, /* 1.3.6.1.5.5.2 */
diff --git a/include/linux/raid/pq.h b/include/linux/raid/pq.h
index f29aaaf2eb21..006e18decfad 100644
--- a/include/linux/raid/pq.h
+++ b/include/linux/raid/pq.h
@@ -108,6 +108,8 @@ extern const struct raid6_calls raid6_vpermxor1;
extern const struct raid6_calls raid6_vpermxor2;
extern const struct raid6_calls raid6_vpermxor4;
extern const struct raid6_calls raid6_vpermxor8;
+extern const struct raid6_calls raid6_lsx;
+extern const struct raid6_calls raid6_lasx;
struct raid6_recov_calls {
void (*data2)(int, size_t, int, int, void **);
@@ -123,6 +125,8 @@ extern const struct raid6_recov_calls raid6_recov_avx2;
extern const struct raid6_recov_calls raid6_recov_avx512;
extern const struct raid6_recov_calls raid6_recov_s390xc;
extern const struct raid6_recov_calls raid6_recov_neon;
+extern const struct raid6_recov_calls raid6_recov_lsx;
+extern const struct raid6_recov_calls raid6_recov_lasx;
extern const struct raid6_calls raid6_neonx1;
extern const struct raid6_calls raid6_neonx2;
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index eb17495c8acc..c99440aac1a1 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -300,16 +300,22 @@ int thermal_acpi_critical_trip_temp(struct acpi_device *adev, int *ret_temp);
#endif
#ifdef CONFIG_THERMAL
-struct thermal_zone_device *thermal_zone_device_register(const char *, int, int,
- void *, struct thermal_zone_device_ops *,
- const struct thermal_zone_params *, int, int);
-
-void thermal_zone_device_unregister(struct thermal_zone_device *);
-
-struct thermal_zone_device *
-thermal_zone_device_register_with_trips(const char *, struct thermal_trip *, int, int,
- void *, struct thermal_zone_device_ops *,
- const struct thermal_zone_params *, int, int);
+struct thermal_zone_device *thermal_zone_device_register_with_trips(
+ const char *type,
+ struct thermal_trip *trips,
+ int num_trips, int mask,
+ void *devdata,
+ struct thermal_zone_device_ops *ops,
+ const struct thermal_zone_params *tzp,
+ int passive_delay, int polling_delay);
+
+struct thermal_zone_device *thermal_tripless_zone_device_register(
+ const char *type,
+ void *devdata,
+ struct thermal_zone_device_ops *ops,
+ const struct thermal_zone_params *tzp);
+
+void thermal_zone_device_unregister(struct thermal_zone_device *tz);
void *thermal_zone_device_priv(struct thermal_zone_device *tzd);
const char *thermal_zone_device_type(struct thermal_zone_device *tzd);
@@ -350,15 +356,26 @@ int thermal_zone_device_enable(struct thermal_zone_device *tz);
int thermal_zone_device_disable(struct thermal_zone_device *tz);
void thermal_zone_device_critical(struct thermal_zone_device *tz);
#else
-static inline struct thermal_zone_device *thermal_zone_device_register(
- const char *type, int trips, int mask, void *devdata,
- struct thermal_zone_device_ops *ops,
- const struct thermal_zone_params *tzp,
- int passive_delay, int polling_delay)
+static inline struct thermal_zone_device *thermal_zone_device_register_with_trips(
+ const char *type,
+ struct thermal_trip *trips,
+ int num_trips, int mask,
+ void *devdata,
+ struct thermal_zone_device_ops *ops,
+ const struct thermal_zone_params *tzp,
+ int passive_delay, int polling_delay)
+{ return ERR_PTR(-ENODEV); }
+
+static inline struct thermal_zone_device *thermal_tripless_zone_device_register(
+ const char *type,
+ void *devdata,
+ struct thermal_zone_device_ops *ops,
+ const struct thermal_zone_params *tzp)
{ return ERR_PTR(-ENODEV); }
-static inline void thermal_zone_device_unregister(
- struct thermal_zone_device *tz)
+
+static inline void thermal_zone_device_unregister(struct thermal_zone_device *tz)
{ }
+
static inline struct thermal_cooling_device *
thermal_cooling_device_register(const char *type, void *devdata,
const struct thermal_cooling_device_ops *ops)
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index eb5c3add939b..21ae37e49319 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -62,13 +62,13 @@ void trace_event_printf(struct trace_iterator *iter, const char *fmt, ...);
/* Used to find the offset and length of dynamic fields in trace events */
struct trace_dynamic_info {
#ifdef CONFIG_CPU_BIG_ENDIAN
- u16 offset;
u16 len;
+ u16 offset;
#else
- u16 len;
u16 offset;
+ u16 len;
#endif
-};
+} __packed;
/*
* The trace entry - the most basic unit of tracing. This is what
@@ -650,7 +650,6 @@ struct trace_event_file {
struct trace_event_call *event_call;
struct event_filter __rcu *filter;
struct eventfs_file *ef;
- struct dentry *dir;
struct trace_array *tr;
struct trace_subsystem_dir *system;
struct list_head triggers;
diff --git a/include/linux/xarray.h b/include/linux/xarray.h
index 741703b45f61..cb571dfcf4b1 100644
--- a/include/linux/xarray.h
+++ b/include/linux/xarray.h
@@ -856,6 +856,9 @@ static inline int __must_check xa_insert_irq(struct xarray *xa,
* stores the index into the @id pointer, then stores the entry at
* that index. A concurrent lookup will not see an uninitialised @id.
*
+ * Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set
+ * in xa_init_flags().
+ *
* Context: Any context. Takes and releases the xa_lock. May sleep if
* the @gfp flags permit.
* Return: 0 on success, -ENOMEM if memory could not be allocated or
@@ -886,6 +889,9 @@ static inline __must_check int xa_alloc(struct xarray *xa, u32 *id,
* stores the index into the @id pointer, then stores the entry at
* that index. A concurrent lookup will not see an uninitialised @id.
*
+ * Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set
+ * in xa_init_flags().
+ *
* Context: Any context. Takes and releases the xa_lock while
* disabling softirqs. May sleep if the @gfp flags permit.
* Return: 0 on success, -ENOMEM if memory could not be allocated or
@@ -916,6 +922,9 @@ static inline int __must_check xa_alloc_bh(struct xarray *xa, u32 *id,
* stores the index into the @id pointer, then stores the entry at
* that index. A concurrent lookup will not see an uninitialised @id.
*
+ * Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set
+ * in xa_init_flags().
+ *
* Context: Process context. Takes and releases the xa_lock while
* disabling interrupts. May sleep if the @gfp flags permit.
* Return: 0 on success, -ENOMEM if memory could not be allocated or
@@ -949,6 +958,9 @@ static inline int __must_check xa_alloc_irq(struct xarray *xa, u32 *id,
* The search for an empty entry will start at @next and will wrap
* around if necessary.
*
+ * Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set
+ * in xa_init_flags().
+ *
* Context: Any context. Takes and releases the xa_lock. May sleep if
* the @gfp flags permit.
* Return: 0 if the allocation succeeded without wrapping. 1 if the
@@ -983,6 +995,9 @@ static inline int xa_alloc_cyclic(struct xarray *xa, u32 *id, void *entry,
* The search for an empty entry will start at @next and will wrap
* around if necessary.
*
+ * Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set
+ * in xa_init_flags().
+ *
* Context: Any context. Takes and releases the xa_lock while
* disabling softirqs. May sleep if the @gfp flags permit.
* Return: 0 if the allocation succeeded without wrapping. 1 if the
@@ -1017,6 +1032,9 @@ static inline int xa_alloc_cyclic_bh(struct xarray *xa, u32 *id, void *entry,
* The search for an empty entry will start at @next and will wrap
* around if necessary.
*
+ * Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set
+ * in xa_init_flags().
+ *
* Context: Process context. Takes and releases the xa_lock while
* disabling interrupts. May sleep if the @gfp flags permit.
* Return: 0 if the allocation succeeded without wrapping. 1 if the
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 0675be0f3fa0..c6932d1a3fa8 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -784,6 +784,11 @@ static inline bool ipv6_addr_v4mapped(const struct in6_addr *a)
cpu_to_be32(0x0000ffff))) == 0UL;
}
+static inline bool ipv6_addr_v4mapped_any(const struct in6_addr *a)
+{
+ return ipv6_addr_v4mapped(a) && ipv4_is_zeronet(a->s6_addr32[3]);
+}
+
static inline bool ipv6_addr_v4mapped_loopback(const struct in6_addr *a)
{
return ipv6_addr_v4mapped(a) && ipv4_is_loopback(a->s6_addr32[3]);
@@ -1360,7 +1365,7 @@ static inline int __ip6_sock_set_addr_preferences(struct sock *sk, int val)
return 0;
}
-static inline int ip6_sock_set_addr_preferences(struct sock *sk, bool val)
+static inline int ip6_sock_set_addr_preferences(struct sock *sk, int val)
{
int ret;
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index a2b8d30c4c80..49f768d0ff37 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -764,7 +764,7 @@ scsi_template_proc_dir(const struct scsi_host_template *sht);
#define scsi_template_proc_dir(sht) NULL
#endif
extern void scsi_scan_host(struct Scsi_Host *);
-extern void scsi_rescan_device(struct device *);
+extern void scsi_rescan_device(struct scsi_device *);
extern void scsi_remove_host(struct Scsi_Host *);
extern struct Scsi_Host *scsi_host_get(struct Scsi_Host *);
extern int scsi_host_busy(struct Scsi_Host *shost);
diff --git a/include/sound/dmaengine_pcm.h b/include/sound/dmaengine_pcm.h
index c9a8bce9a785..d70c55f17df7 100644
--- a/include/sound/dmaengine_pcm.h
+++ b/include/sound/dmaengine_pcm.h
@@ -142,7 +142,7 @@ struct snd_dmaengine_pcm_config {
struct snd_pcm_substream *substream);
int (*process)(struct snd_pcm_substream *substream,
int channel, unsigned long hwoff,
- struct iov_iter *buf, unsigned long bytes);
+ unsigned long bytes);
dma_filter_fn compat_filter_fn;
struct device *dma_dev;
const char *chan_names[SNDRV_PCM_STREAM_LAST + 1];
diff --git a/include/sound/soc-component.h b/include/sound/soc-component.h
index 17bea3144551..ceca69b46a82 100644
--- a/include/sound/soc-component.h
+++ b/include/sound/soc-component.h
@@ -139,7 +139,7 @@ struct snd_soc_component_driver {
struct snd_pcm_audio_tstamp_report *audio_tstamp_report);
int (*copy)(struct snd_soc_component *component,
struct snd_pcm_substream *substream, int channel,
- unsigned long pos, struct iov_iter *buf,
+ unsigned long pos, struct iov_iter *iter,
unsigned long bytes);
struct page *(*page)(struct snd_soc_component *component,
struct snd_pcm_substream *substream,
@@ -511,7 +511,7 @@ int snd_soc_pcm_component_ioctl(struct snd_pcm_substream *substream,
int snd_soc_pcm_component_sync_stop(struct snd_pcm_substream *substream);
int snd_soc_pcm_component_copy(struct snd_pcm_substream *substream,
int channel, unsigned long pos,
- struct iov_iter *buf, unsigned long bytes);
+ struct iov_iter *iter, unsigned long bytes);
struct page *snd_soc_pcm_component_page(struct snd_pcm_substream *substream,
unsigned long offset);
int snd_soc_pcm_component_mmap(struct snd_pcm_substream *substream,
diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
index ee0bcff14b69..9b731976ce2f 100644
--- a/include/uapi/linux/elf.h
+++ b/include/uapi/linux/elf.h
@@ -445,6 +445,8 @@ typedef struct elf64_shdr {
#define NT_MIPS_DSP 0x800 /* MIPS DSP ASE registers */
#define NT_MIPS_FP_MODE 0x801 /* MIPS floating-point mode */
#define NT_MIPS_MSA 0x802 /* MIPS SIMD registers */
+#define NT_RISCV_CSR 0x900 /* RISC-V Control and Status Registers */
+#define NT_RISCV_VECTOR 0x901 /* RISC-V vector registers */
#define NT_LOONGARCH_CPUCFG 0xa00 /* LoongArch CPU config registers */
#define NT_LOONGARCH_CSR 0xa01 /* LoongArch control and status registers */
#define NT_LOONGARCH_LSX 0xa02 /* LoongArch Loongson SIMD Extension registers */
diff --git a/include/uapi/scsi/scsi_bsg_ufs.h b/include/uapi/scsi/scsi_bsg_ufs.h
index 7c7975f9905e..03f2beadf201 100644
--- a/include/uapi/scsi/scsi_bsg_ufs.h
+++ b/include/uapi/scsi/scsi_bsg_ufs.h
@@ -83,7 +83,7 @@ struct utp_upiu_header {
union {
__u8 tm_function;
__u8 query_function;
- };
+ } __attribute__((packed));
__u8 response;
__u8 status;
__u8 ehs_length;
diff --git a/io_uring/fdinfo.c b/io_uring/fdinfo.c
index 300455b4bc12..c53678875416 100644
--- a/io_uring/fdinfo.c
+++ b/io_uring/fdinfo.c
@@ -93,6 +93,8 @@ __cold void io_uring_show_fdinfo(struct seq_file *m, struct file *f)
struct io_uring_sqe *sqe;
unsigned int sq_idx;
+ if (ctx->flags & IORING_SETUP_NO_SQARRAY)
+ break;
sq_idx = READ_ONCE(ctx->sq_array[entry & sq_mask]);
if (sq_idx > sq_mask)
continue;
diff --git a/io_uring/io-wq.c b/io_uring/io-wq.c
index 62f345587df5..1ecc8c748768 100644
--- a/io_uring/io-wq.c
+++ b/io_uring/io-wq.c
@@ -174,6 +174,16 @@ static void io_worker_ref_put(struct io_wq *wq)
complete(&wq->worker_done);
}
+bool io_wq_worker_stopped(void)
+{
+ struct io_worker *worker = current->worker_private;
+
+ if (WARN_ON_ONCE(!io_wq_current_is_worker()))
+ return true;
+
+ return test_bit(IO_WQ_BIT_EXIT, &worker->wq->state);
+}
+
static void io_worker_cancel_cb(struct io_worker *worker)
{
struct io_wq_acct *acct = io_wq_get_acct(worker);
diff --git a/io_uring/io-wq.h b/io_uring/io-wq.h
index 06d9ca90c577..2b2a6406dd8e 100644
--- a/io_uring/io-wq.h
+++ b/io_uring/io-wq.h
@@ -52,6 +52,7 @@ void io_wq_hash_work(struct io_wq_work *work, void *val);
int io_wq_cpu_affinity(struct io_uring_task *tctx, cpumask_var_t mask);
int io_wq_max_workers(struct io_wq *wq, int *new_count);
+bool io_wq_worker_stopped(void);
static inline bool io_wq_is_hashed(struct io_wq_work *work)
{
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index e7675355048d..783ed0fff71b 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -150,6 +150,31 @@ static void io_queue_sqe(struct io_kiocb *req);
struct kmem_cache *req_cachep;
+static int __read_mostly sysctl_io_uring_disabled;
+static int __read_mostly sysctl_io_uring_group = -1;
+
+#ifdef CONFIG_SYSCTL
+static struct ctl_table kernel_io_uring_disabled_table[] = {
+ {
+ .procname = "io_uring_disabled",
+ .data = &sysctl_io_uring_disabled,
+ .maxlen = sizeof(sysctl_io_uring_disabled),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_TWO,
+ },
+ {
+ .procname = "io_uring_group",
+ .data = &sysctl_io_uring_group,
+ .maxlen = sizeof(gid_t),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {},
+};
+#endif
+
struct sock *io_uring_get_socket(struct file *file)
{
#if defined(CONFIG_UNIX)
@@ -883,7 +908,7 @@ static void __io_flush_post_cqes(struct io_ring_ctx *ctx)
struct io_uring_cqe *cqe = &ctx->completion_cqes[i];
if (!io_fill_cqe_aux(ctx, cqe->user_data, cqe->res, cqe->flags)) {
- if (ctx->task_complete) {
+ if (ctx->lockless_cq) {
spin_lock(&ctx->completion_lock);
io_cqring_event_overflow(ctx, cqe->user_data,
cqe->res, cqe->flags, 0, 0);
@@ -1541,7 +1566,7 @@ void __io_submit_flush_completions(struct io_ring_ctx *ctx)
if (!(req->flags & REQ_F_CQE_SKIP) &&
unlikely(!io_fill_cqe_req(ctx, req))) {
- if (ctx->task_complete) {
+ if (ctx->lockless_cq) {
spin_lock(&ctx->completion_lock);
io_req_cqe_overflow(req);
spin_unlock(&ctx->completion_lock);
@@ -1950,6 +1975,8 @@ fail:
if (!needs_poll) {
if (!(req->ctx->flags & IORING_SETUP_IOPOLL))
break;
+ if (io_wq_worker_stopped())
+ break;
cond_resched();
continue;
}
@@ -4038,9 +4065,30 @@ static long io_uring_setup(u32 entries, struct io_uring_params __user *params)
return io_uring_create(entries, &p, params);
}
+static inline bool io_uring_allowed(void)
+{
+ int disabled = READ_ONCE(sysctl_io_uring_disabled);
+ kgid_t io_uring_group;
+
+ if (disabled == 2)
+ return false;
+
+ if (disabled == 0 || capable(CAP_SYS_ADMIN))
+ return true;
+
+ io_uring_group = make_kgid(&init_user_ns, sysctl_io_uring_group);
+ if (!gid_valid(io_uring_group))
+ return false;
+
+ return in_group_p(io_uring_group);
+}
+
SYSCALL_DEFINE2(io_uring_setup, u32, entries,
struct io_uring_params __user *, params)
{
+ if (!io_uring_allowed())
+ return -EPERM;
+
return io_uring_setup(entries, params);
}
@@ -4634,6 +4682,10 @@ static int __init io_uring_init(void)
offsetof(struct io_kiocb, cmd.data),
sizeof_field(struct io_kiocb, cmd.data), NULL);
+#ifdef CONFIG_SYSCTL
+ register_sysctl_init("kernel", kernel_io_uring_disabled_table);
+#endif
+
return 0;
};
__initcall(io_uring_init);
diff --git a/io_uring/sqpoll.c b/io_uring/sqpoll.c
index ee2d2c687fda..bd6c2c7959a5 100644
--- a/io_uring/sqpoll.c
+++ b/io_uring/sqpoll.c
@@ -430,7 +430,9 @@ __cold int io_sqpoll_wq_cpu_affinity(struct io_ring_ctx *ctx,
if (sqd) {
io_sq_thread_park(sqd);
- ret = io_wq_cpu_affinity(sqd->thread->io_uring, mask);
+ /* Don't set affinity for a dying thread */
+ if (sqd->thread)
+ ret = io_wq_cpu_affinity(sqd->thread->io_uring, mask);
io_sq_thread_unpark(sqd);
}
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 0f8f036d8bd1..4e3ce0542e31 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -870,7 +870,7 @@ static struct bpf_prog_pack *alloc_new_pack(bpf_jit_fill_hole_t bpf_fill_ill_ins
GFP_KERNEL);
if (!pack)
return NULL;
- pack->ptr = module_alloc(BPF_PROG_PACK_SIZE);
+ pack->ptr = bpf_jit_alloc_exec(BPF_PROG_PACK_SIZE);
if (!pack->ptr) {
kfree(pack);
return NULL;
@@ -894,7 +894,7 @@ void *bpf_prog_pack_alloc(u32 size, bpf_jit_fill_hole_t bpf_fill_ill_insns)
mutex_lock(&pack_mutex);
if (size > BPF_PROG_PACK_SIZE) {
size = round_up(size, PAGE_SIZE);
- ptr = module_alloc(size);
+ ptr = bpf_jit_alloc_exec(size);
if (ptr) {
bpf_fill_ill_insns(ptr, size);
set_vm_flush_reset_perms(ptr);
@@ -932,7 +932,7 @@ void bpf_prog_pack_free(struct bpf_binary_header *hdr)
mutex_lock(&pack_mutex);
if (hdr->size > BPF_PROG_PACK_SIZE) {
- module_memfree(hdr);
+ bpf_jit_free_exec(hdr);
goto out;
}
@@ -956,7 +956,7 @@ void bpf_prog_pack_free(struct bpf_binary_header *hdr)
if (bitmap_find_next_zero_area(pack->bitmap, BPF_PROG_CHUNK_COUNT, 0,
BPF_PROG_CHUNK_COUNT, 0) == 0) {
list_del(&pack->list);
- module_memfree(pack->ptr);
+ bpf_jit_free_exec(pack->ptr);
kfree(pack);
}
out:
diff --git a/kernel/dma/Kconfig b/kernel/dma/Kconfig
index 4c1e9a3c0ab6..f488997b0717 100644
--- a/kernel/dma/Kconfig
+++ b/kernel/dma/Kconfig
@@ -160,7 +160,7 @@ if DMA_CMA
config DMA_NUMA_CMA
bool "Enable separate DMA Contiguous Memory Area for NUMA Node"
- default NUMA
+ depends on NUMA
help
Enable this option to get numa CMA areas so that NUMA devices
can get local memory by DMA coherent APIs.
diff --git a/kernel/dma/contiguous.c b/kernel/dma/contiguous.c
index 88c595e49e34..f005c66f378c 100644
--- a/kernel/dma/contiguous.c
+++ b/kernel/dma/contiguous.c
@@ -473,11 +473,6 @@ static int __init rmem_cma_setup(struct reserved_mem *rmem)
return -EBUSY;
}
- if (memblock_is_region_reserved(rmem->base, rmem->size)) {
- pr_info("Reserved memory: overlap with other memblock reserved region\n");
- return -EBUSY;
- }
-
if (!of_get_flat_dt_prop(node, "reusable", NULL) ||
of_get_flat_dt_prop(node, "no-map", NULL))
return -EINVAL;
diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c
index f190651bcadd..06366acd27b0 100644
--- a/kernel/dma/debug.c
+++ b/kernel/dma/debug.c
@@ -637,15 +637,19 @@ static struct dma_debug_entry *__dma_entry_alloc(void)
return entry;
}
-static void __dma_entry_alloc_check_leak(void)
+/*
+ * This should be called outside of free_entries_lock scope to avoid potential
+ * deadlocks with serial consoles that use DMA.
+ */
+static void __dma_entry_alloc_check_leak(u32 nr_entries)
{
- u32 tmp = nr_total_entries % nr_prealloc_entries;
+ u32 tmp = nr_entries % nr_prealloc_entries;
/* Shout each time we tick over some multiple of the initial pool */
if (tmp < DMA_DEBUG_DYNAMIC_ENTRIES) {
pr_info("dma_debug_entry pool grown to %u (%u00%%)\n",
- nr_total_entries,
- (nr_total_entries / nr_prealloc_entries));
+ nr_entries,
+ (nr_entries / nr_prealloc_entries));
}
}
@@ -656,8 +660,10 @@ static void __dma_entry_alloc_check_leak(void)
*/
static struct dma_debug_entry *dma_entry_alloc(void)
{
+ bool alloc_check_leak = false;
struct dma_debug_entry *entry;
unsigned long flags;
+ u32 nr_entries;
spin_lock_irqsave(&free_entries_lock, flags);
if (num_free_entries == 0) {
@@ -667,13 +673,17 @@ static struct dma_debug_entry *dma_entry_alloc(void)
pr_err("debugging out of memory - disabling\n");
return NULL;
}
- __dma_entry_alloc_check_leak();
+ alloc_check_leak = true;
+ nr_entries = nr_total_entries;
}
entry = __dma_entry_alloc();
spin_unlock_irqrestore(&free_entries_lock, flags);
+ if (alloc_check_leak)
+ __dma_entry_alloc_check_leak(nr_entries);
+
#ifdef CONFIG_STACKTRACE
entry->stack_len = stack_trace_save(entry->stack_entries,
ARRAY_SIZE(entry->stack_entries),
diff --git a/kernel/dma/pool.c b/kernel/dma/pool.c
index 1acec2e22827..b481c48a31a6 100644
--- a/kernel/dma/pool.c
+++ b/kernel/dma/pool.c
@@ -135,9 +135,9 @@ encrypt_mapping:
remove_mapping:
#ifdef CONFIG_DMA_DIRECT_REMAP
dma_common_free_remap(addr, pool_size);
-#endif
-free_page: __maybe_unused
+free_page:
__free_pages(page, order);
+#endif
out:
return ret;
}
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index 96fc38cb2e84..7e0b4dd02398 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -538,14 +538,12 @@ char *log_buf_addr_get(void)
{
return log_buf;
}
-EXPORT_SYMBOL_GPL(log_buf_addr_get);
/* Return log buffer size */
u32 log_buf_len_get(void)
{
return log_buf_len;
}
-EXPORT_SYMBOL_GPL(log_buf_len_get);
/*
* Define how much of the log buffer we could take at maximum. The value
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 78502d4c7214..a1651edc48d5 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -2198,6 +2198,8 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size,
err = -ENOMEM;
goto out_err;
}
+
+ cond_resched();
}
cpus_read_lock();
@@ -2388,6 +2390,11 @@ rb_iter_head_event(struct ring_buffer_iter *iter)
*/
commit = rb_page_commit(iter_head_page);
smp_rmb();
+
+ /* An event needs to be at least 8 bytes in size */
+ if (iter->head > commit - 8)
+ goto reset;
+
event = __rb_page_index(iter_head_page, iter->head);
length = rb_event_length(event);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 2b4ded753367..abaaf516fcae 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1772,7 +1772,7 @@ static void trace_create_maxlat_file(struct trace_array *tr,
init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq);
tr->d_max_latency = trace_create_file("tracing_max_latency",
TRACE_MODE_WRITE,
- d_tracer, &tr->max_latency,
+ d_tracer, tr,
&tracing_max_lat_fops);
}
@@ -1805,7 +1805,7 @@ void latency_fsnotify(struct trace_array *tr)
#define trace_create_maxlat_file(tr, d_tracer) \
trace_create_file("tracing_max_latency", TRACE_MODE_WRITE, \
- d_tracer, &tr->max_latency, &tracing_max_lat_fops)
+ d_tracer, tr, &tracing_max_lat_fops)
#endif
@@ -4973,6 +4973,33 @@ int tracing_open_generic_tr(struct inode *inode, struct file *filp)
return 0;
}
+/*
+ * The private pointer of the inode is the trace_event_file.
+ * Update the tr ref count associated to it.
+ */
+int tracing_open_file_tr(struct inode *inode, struct file *filp)
+{
+ struct trace_event_file *file = inode->i_private;
+ int ret;
+
+ ret = tracing_check_open_get_tr(file->tr);
+ if (ret)
+ return ret;
+
+ filp->private_data = inode->i_private;
+
+ return 0;
+}
+
+int tracing_release_file_tr(struct inode *inode, struct file *filp)
+{
+ struct trace_event_file *file = inode->i_private;
+
+ trace_array_put(file->tr);
+
+ return 0;
+}
+
static int tracing_mark_open(struct inode *inode, struct file *filp)
{
stream_open(inode, filp);
@@ -6691,14 +6718,18 @@ static ssize_t
tracing_max_lat_read(struct file *filp, char __user *ubuf,
size_t cnt, loff_t *ppos)
{
- return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
+ struct trace_array *tr = filp->private_data;
+
+ return tracing_nsecs_read(&tr->max_latency, ubuf, cnt, ppos);
}
static ssize_t
tracing_max_lat_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
- return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
+ struct trace_array *tr = filp->private_data;
+
+ return tracing_nsecs_write(&tr->max_latency, ubuf, cnt, ppos);
}
#endif
@@ -7752,18 +7783,20 @@ static const struct file_operations tracing_thresh_fops = {
#ifdef CONFIG_TRACER_MAX_TRACE
static const struct file_operations tracing_max_lat_fops = {
- .open = tracing_open_generic,
+ .open = tracing_open_generic_tr,
.read = tracing_max_lat_read,
.write = tracing_max_lat_write,
.llseek = generic_file_llseek,
+ .release = tracing_release_generic_tr,
};
#endif
static const struct file_operations set_tracer_fops = {
- .open = tracing_open_generic,
+ .open = tracing_open_generic_tr,
.read = tracing_set_trace_read,
.write = tracing_set_trace_write,
.llseek = generic_file_llseek,
+ .release = tracing_release_generic_tr,
};
static const struct file_operations tracing_pipe_fops = {
@@ -8956,12 +8989,33 @@ trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
return cnt;
}
+static int tracing_open_options(struct inode *inode, struct file *filp)
+{
+ struct trace_option_dentry *topt = inode->i_private;
+ int ret;
+
+ ret = tracing_check_open_get_tr(topt->tr);
+ if (ret)
+ return ret;
+
+ filp->private_data = inode->i_private;
+ return 0;
+}
+
+static int tracing_release_options(struct inode *inode, struct file *file)
+{
+ struct trace_option_dentry *topt = file->private_data;
+
+ trace_array_put(topt->tr);
+ return 0;
+}
static const struct file_operations trace_options_fops = {
- .open = tracing_open_generic,
+ .open = tracing_open_options,
.read = trace_options_read,
.write = trace_options_write,
.llseek = generic_file_llseek,
+ .release = tracing_release_options,
};
/*
@@ -9739,8 +9793,8 @@ init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
tr, &tracing_mark_fops);
file = __find_event_file(tr, "ftrace", "print");
- if (file && file->dir)
- trace_create_file("trigger", TRACE_MODE_WRITE, file->dir,
+ if (file && file->ef)
+ eventfs_add_file("trigger", TRACE_MODE_WRITE, file->ef,
file, &event_trigger_fops);
tr->trace_marker_file = file;
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 5669dd1f90d9..77debe53f07c 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -610,6 +610,8 @@ void tracing_reset_all_online_cpus(void);
void tracing_reset_all_online_cpus_unlocked(void);
int tracing_open_generic(struct inode *inode, struct file *filp);
int tracing_open_generic_tr(struct inode *inode, struct file *filp);
+int tracing_open_file_tr(struct inode *inode, struct file *filp);
+int tracing_release_file_tr(struct inode *inode, struct file *filp);
bool tracing_is_disabled(void);
bool tracer_tracing_is_on(struct trace_array *tr);
void tracer_tracing_on(struct trace_array *tr);
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index ed367d713be0..91951d038ba4 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -992,19 +992,6 @@ static void remove_subsystem(struct trace_subsystem_dir *dir)
static void remove_event_file_dir(struct trace_event_file *file)
{
- struct dentry *dir = file->dir;
- struct dentry *child;
-
- if (dir) {
- spin_lock(&dir->d_lock); /* probably unneeded */
- list_for_each_entry(child, &dir->d_subdirs, d_child) {
- if (d_really_is_positive(child)) /* probably unneeded */
- d_inode(child)->i_private = NULL;
- }
- spin_unlock(&dir->d_lock);
-
- tracefs_remove(dir);
- }
eventfs_remove(file->ef);
list_del(&file->list);
remove_subsystem(file->system);
@@ -2103,9 +2090,10 @@ static const struct file_operations ftrace_set_event_notrace_pid_fops = {
};
static const struct file_operations ftrace_enable_fops = {
- .open = tracing_open_generic,
+ .open = tracing_open_file_tr,
.read = event_enable_read,
.write = event_enable_write,
+ .release = tracing_release_file_tr,
.llseek = default_llseek,
};
@@ -2122,9 +2110,10 @@ static const struct file_operations ftrace_event_id_fops = {
};
static const struct file_operations ftrace_event_filter_fops = {
- .open = tracing_open_generic,
+ .open = tracing_open_file_tr,
.read = event_filter_read,
.write = event_filter_write,
+ .release = tracing_release_file_tr,
.llseek = default_llseek,
};
@@ -2297,6 +2286,7 @@ event_subsystem_dir(struct trace_array *tr, const char *name,
{
struct event_subsystem *system, *iter;
struct trace_subsystem_dir *dir;
+ struct eventfs_file *ef;
int res;
/* First see if we did not already create this dir */
@@ -2329,13 +2319,14 @@ event_subsystem_dir(struct trace_array *tr, const char *name,
} else
__get_system(system);
- dir->ef = eventfs_add_subsystem_dir(name, parent);
- if (IS_ERR(dir->ef)) {
+ ef = eventfs_add_subsystem_dir(name, parent);
+ if (IS_ERR(ef)) {
pr_warn("Failed to create system directory %s\n", name);
__put_system(system);
goto out_free;
}
+ dir->ef = ef;
dir->tr = tr;
dir->ref_count = 1;
dir->nr_events = 1;
@@ -2415,6 +2406,7 @@ event_create_dir(struct dentry *parent, struct trace_event_file *file)
struct trace_event_call *call = file->event_call;
struct eventfs_file *ef_subsystem = NULL;
struct trace_array *tr = file->tr;
+ struct eventfs_file *ef;
const char *name;
int ret;
@@ -2431,12 +2423,14 @@ event_create_dir(struct dentry *parent, struct trace_event_file *file)
return -ENOMEM;
name = trace_event_name(call);
- file->ef = eventfs_add_dir(name, ef_subsystem);
- if (IS_ERR(file->ef)) {
+ ef = eventfs_add_dir(name, ef_subsystem);
+ if (IS_ERR(ef)) {
pr_warn("Could not create tracefs '%s' directory\n", name);
return -1;
}
+ file->ef = ef;
+
if (call->class->reg && !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
eventfs_add_file("enable", TRACE_MODE_WRITE, file->ef, file,
&ftrace_enable_fops);
diff --git a/kernel/trace/trace_events_inject.c b/kernel/trace/trace_events_inject.c
index abe805d471eb..8650562bdaa9 100644
--- a/kernel/trace/trace_events_inject.c
+++ b/kernel/trace/trace_events_inject.c
@@ -328,7 +328,8 @@ event_inject_read(struct file *file, char __user *buf, size_t size,
}
const struct file_operations event_inject_fops = {
- .open = tracing_open_generic,
+ .open = tracing_open_file_tr,
.read = event_inject_read,
.write = event_inject_write,
+ .release = tracing_release_file_tr,
};
diff --git a/kernel/trace/trace_events_synth.c b/kernel/trace/trace_events_synth.c
index 9897d0bfcab7..14cb275a0bab 100644
--- a/kernel/trace/trace_events_synth.c
+++ b/kernel/trace/trace_events_synth.c
@@ -337,7 +337,7 @@ static void print_synth_event_num_val(struct trace_seq *s,
break;
default:
- trace_seq_printf(s, print_fmt, name, val, space);
+ trace_seq_printf(s, print_fmt, name, val->as_u64, space);
break;
}
}
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 319cfbeb0738..fa307f93fa2e 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -2237,6 +2237,17 @@ config TEST_DIV64
If unsure, say N.
+config TEST_IOV_ITER
+ tristate "Test iov_iter operation" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ Enable this to turn on testing of the operation of the I/O iterator
+ (iov_iter). This test is executed only once during system boot (so
+ affects only boot time), or at module load time.
+
+ If unsure, say N.
+
config KPROBES_SANITY_TEST
tristate "Kprobes sanity tests" if !KUNIT_ALL_TESTS
depends on DEBUG_KERNEL
diff --git a/lib/Makefile b/lib/Makefile
index 2e08397f6210..740109b6e2c8 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -64,6 +64,7 @@ obj-$(CONFIG_TEST_BITOPS) += test_bitops.o
CFLAGS_test_bitops.o += -Werror
obj-$(CONFIG_CPUMASK_KUNIT_TEST) += cpumask_kunit.o
obj-$(CONFIG_TEST_SYSCTL) += test_sysctl.o
+obj-$(CONFIG_TEST_IOV_ITER) += kunit_iov_iter.o
obj-$(CONFIG_HASH_KUNIT_TEST) += test_hash.o
obj-$(CONFIG_TEST_IDA) += test_ida.o
obj-$(CONFIG_TEST_UBSAN) += test_ubsan.o
diff --git a/lib/idr.c b/lib/idr.c
index 7ecdfdb5309e..13f2758c2377 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -100,7 +100,7 @@ EXPORT_SYMBOL_GPL(idr_alloc);
* @end: The maximum ID (exclusive).
* @gfp: Memory allocation flags.
*
- * Allocates an unused ID in the range specified by @nextid and @end. If
+ * Allocates an unused ID in the range specified by @start and @end. If
* @end is <= 0, it is treated as one larger than %INT_MAX. This allows
* callers to use @start + N as @end as long as N is within integer range.
* The search for an unused ID will start at the last ID allocated and will
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index b31597b0ca20..27234a820eeb 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -1654,14 +1654,14 @@ static ssize_t iov_iter_extract_bvec_pages(struct iov_iter *i,
size_t *offset0)
{
struct page **p, *page;
- size_t skip = i->iov_offset, offset;
+ size_t skip = i->iov_offset, offset, size;
int k;
for (;;) {
if (i->nr_segs == 0)
return 0;
- maxsize = min(maxsize, i->bvec->bv_len - skip);
- if (maxsize)
+ size = min(maxsize, i->bvec->bv_len - skip);
+ if (size)
break;
i->iov_offset = 0;
i->nr_segs--;
@@ -1674,16 +1674,16 @@ static ssize_t iov_iter_extract_bvec_pages(struct iov_iter *i,
offset = skip % PAGE_SIZE;
*offset0 = offset;
- maxpages = want_pages_array(pages, maxsize, offset, maxpages);
+ maxpages = want_pages_array(pages, size, offset, maxpages);
if (!maxpages)
return -ENOMEM;
p = *pages;
for (k = 0; k < maxpages; k++)
p[k] = page + k;
- maxsize = min_t(size_t, maxsize, maxpages * PAGE_SIZE - offset);
- iov_iter_advance(i, maxsize);
- return maxsize;
+ size = min_t(size_t, size, maxpages * PAGE_SIZE - offset);
+ iov_iter_advance(i, size);
+ return size;
}
/*
@@ -1698,14 +1698,14 @@ static ssize_t iov_iter_extract_kvec_pages(struct iov_iter *i,
{
struct page **p, *page;
const void *kaddr;
- size_t skip = i->iov_offset, offset, len;
+ size_t skip = i->iov_offset, offset, len, size;
int k;
for (;;) {
if (i->nr_segs == 0)
return 0;
- maxsize = min(maxsize, i->kvec->iov_len - skip);
- if (maxsize)
+ size = min(maxsize, i->kvec->iov_len - skip);
+ if (size)
break;
i->iov_offset = 0;
i->nr_segs--;
@@ -1717,13 +1717,13 @@ static ssize_t iov_iter_extract_kvec_pages(struct iov_iter *i,
offset = (unsigned long)kaddr & ~PAGE_MASK;
*offset0 = offset;
- maxpages = want_pages_array(pages, maxsize, offset, maxpages);
+ maxpages = want_pages_array(pages, size, offset, maxpages);
if (!maxpages)
return -ENOMEM;
p = *pages;
kaddr -= offset;
- len = offset + maxsize;
+ len = offset + size;
for (k = 0; k < maxpages; k++) {
size_t seg = min_t(size_t, len, PAGE_SIZE);
@@ -1737,9 +1737,9 @@ static ssize_t iov_iter_extract_kvec_pages(struct iov_iter *i,
kaddr += PAGE_SIZE;
}
- maxsize = min_t(size_t, maxsize, maxpages * PAGE_SIZE - offset);
- iov_iter_advance(i, maxsize);
- return maxsize;
+ size = min_t(size_t, size, maxpages * PAGE_SIZE - offset);
+ iov_iter_advance(i, size);
+ return size;
}
/*
diff --git a/lib/kunit/executor.c b/lib/kunit/executor.c
index 5181aa2e760b..a6348489d45f 100644
--- a/lib/kunit/executor.c
+++ b/lib/kunit/executor.c
@@ -65,7 +65,7 @@ struct kunit_glob_filter {
};
/* Split "suite_glob.test_glob" into two. Assumes filter_glob is not empty. */
-static void kunit_parse_glob_filter(struct kunit_glob_filter *parsed,
+static int kunit_parse_glob_filter(struct kunit_glob_filter *parsed,
const char *filter_glob)
{
const int len = strlen(filter_glob);
@@ -73,16 +73,28 @@ static void kunit_parse_glob_filter(struct kunit_glob_filter *parsed,
if (!period) {
parsed->suite_glob = kzalloc(len + 1, GFP_KERNEL);
+ if (!parsed->suite_glob)
+ return -ENOMEM;
+
parsed->test_glob = NULL;
strcpy(parsed->suite_glob, filter_glob);
- return;
+ return 0;
}
parsed->suite_glob = kzalloc(period - filter_glob + 1, GFP_KERNEL);
+ if (!parsed->suite_glob)
+ return -ENOMEM;
+
parsed->test_glob = kzalloc(len - (period - filter_glob) + 1, GFP_KERNEL);
+ if (!parsed->test_glob) {
+ kfree(parsed->suite_glob);
+ return -ENOMEM;
+ }
strncpy(parsed->suite_glob, filter_glob, period - filter_glob);
strncpy(parsed->test_glob, period + 1, len - (period - filter_glob));
+
+ return 0;
}
/* Create a copy of suite with only tests that match test_glob. */
@@ -152,21 +164,24 @@ kunit_filter_suites(const struct kunit_suite_set *suite_set,
}
copy_start = copy;
- if (filter_glob)
- kunit_parse_glob_filter(&parsed_glob, filter_glob);
+ if (filter_glob) {
+ *err = kunit_parse_glob_filter(&parsed_glob, filter_glob);
+ if (*err)
+ goto free_copy;
+ }
/* Parse attribute filters */
if (filters) {
filter_count = kunit_get_filter_count(filters);
parsed_filters = kcalloc(filter_count, sizeof(*parsed_filters), GFP_KERNEL);
if (!parsed_filters) {
- kfree(copy);
- return filtered;
+ *err = -ENOMEM;
+ goto free_parsed_glob;
}
for (j = 0; j < filter_count; j++)
parsed_filters[j] = kunit_next_attr_filter(&filters, err);
if (*err)
- goto err;
+ goto free_parsed_filters;
}
for (i = 0; &suite_set->start[i] != suite_set->end; i++) {
@@ -178,7 +193,7 @@ kunit_filter_suites(const struct kunit_suite_set *suite_set,
parsed_glob.test_glob);
if (IS_ERR(filtered_suite)) {
*err = PTR_ERR(filtered_suite);
- goto err;
+ goto free_parsed_filters;
}
}
if (filter_count > 0 && parsed_filters != NULL) {
@@ -195,10 +210,11 @@ kunit_filter_suites(const struct kunit_suite_set *suite_set,
filtered_suite = new_filtered_suite;
if (*err)
- goto err;
+ goto free_parsed_filters;
+
if (IS_ERR(filtered_suite)) {
*err = PTR_ERR(filtered_suite);
- goto err;
+ goto free_parsed_filters;
}
if (!filtered_suite)
break;
@@ -213,17 +229,19 @@ kunit_filter_suites(const struct kunit_suite_set *suite_set,
filtered.start = copy_start;
filtered.end = copy;
-err:
- if (*err)
- kfree(copy);
+free_parsed_filters:
+ if (filter_count)
+ kfree(parsed_filters);
+free_parsed_glob:
if (filter_glob) {
kfree(parsed_glob.suite_glob);
kfree(parsed_glob.test_glob);
}
- if (filter_count)
- kfree(parsed_filters);
+free_copy:
+ if (*err)
+ kfree(copy);
return filtered;
}
diff --git a/lib/kunit/executor_test.c b/lib/kunit/executor_test.c
index 4084071d0eb5..b4f6f96b2844 100644
--- a/lib/kunit/executor_test.c
+++ b/lib/kunit/executor_test.c
@@ -119,7 +119,7 @@ static void parse_filter_attr_test(struct kunit *test)
{
int j, filter_count;
struct kunit_attr_filter *parsed_filters;
- char *filters = "speed>slow, module!=example";
+ char filters[] = "speed>slow, module!=example", *filter = filters;
int err = 0;
filter_count = kunit_get_filter_count(filters);
@@ -128,7 +128,7 @@ static void parse_filter_attr_test(struct kunit *test)
parsed_filters = kunit_kcalloc(test, filter_count, sizeof(*parsed_filters),
GFP_KERNEL);
for (j = 0; j < filter_count; j++) {
- parsed_filters[j] = kunit_next_attr_filter(&filters, &err);
+ parsed_filters[j] = kunit_next_attr_filter(&filter, &err);
KUNIT_ASSERT_EQ_MSG(test, err, 0, "failed to parse filter '%s'", filters[j]);
}
@@ -154,6 +154,7 @@ static void filter_attr_test(struct kunit *test)
.start = subsuite, .end = &subsuite[2],
};
struct kunit_suite_set got;
+ char filter[] = "speed>slow";
int err = 0;
subsuite[0] = alloc_fake_suite(test, "normal_suite", dummy_attr_test_cases);
@@ -168,7 +169,7 @@ static void filter_attr_test(struct kunit *test)
* attribute is unset and thus, the filtering is based on the parent attribute
* of slow.
*/
- got = kunit_filter_suites(&suite_set, NULL, "speed>slow", NULL, &err);
+ got = kunit_filter_suites(&suite_set, NULL, filter, NULL, &err);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start);
KUNIT_ASSERT_EQ(test, err, 0);
kfree_at_end(test, got.start);
@@ -191,12 +192,13 @@ static void filter_attr_empty_test(struct kunit *test)
.start = subsuite, .end = &subsuite[2],
};
struct kunit_suite_set got;
+ char filter[] = "module!=dummy";
int err = 0;
subsuite[0] = alloc_fake_suite(test, "suite1", dummy_attr_test_cases);
subsuite[1] = alloc_fake_suite(test, "suite2", dummy_attr_test_cases);
- got = kunit_filter_suites(&suite_set, NULL, "module!=dummy", NULL, &err);
+ got = kunit_filter_suites(&suite_set, NULL, filter, NULL, &err);
KUNIT_ASSERT_EQ(test, err, 0);
kfree_at_end(test, got.start); /* just in case */
@@ -211,12 +213,13 @@ static void filter_attr_skip_test(struct kunit *test)
.start = subsuite, .end = &subsuite[1],
};
struct kunit_suite_set got;
+ char filter[] = "speed>slow";
int err = 0;
subsuite[0] = alloc_fake_suite(test, "suite", dummy_attr_test_cases);
/* Want: suite(slow, normal), NULL -> suite(slow with SKIP, normal), NULL */
- got = kunit_filter_suites(&suite_set, NULL, "speed>slow", "skip", &err);
+ got = kunit_filter_suites(&suite_set, NULL, filter, "skip", &err);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start);
KUNIT_ASSERT_EQ(test, err, 0);
kfree_at_end(test, got.start);
diff --git a/lib/kunit/test.c b/lib/kunit/test.c
index 49698a168437..421f13981412 100644
--- a/lib/kunit/test.c
+++ b/lib/kunit/test.c
@@ -784,12 +784,13 @@ static int kunit_module_notify(struct notifier_block *nb, unsigned long val,
switch (val) {
case MODULE_STATE_LIVE:
- kunit_module_init(mod);
break;
case MODULE_STATE_GOING:
kunit_module_exit(mod);
break;
case MODULE_STATE_COMING:
+ kunit_module_init(mod);
+ break;
case MODULE_STATE_UNFORMED:
break;
}
diff --git a/lib/kunit_iov_iter.c b/lib/kunit_iov_iter.c
new file mode 100644
index 000000000000..859b67c4d697
--- /dev/null
+++ b/lib/kunit_iov_iter.c
@@ -0,0 +1,777 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* I/O iterator tests. This can only test kernel-backed iterator types.
+ *
+ * Copyright (C) 2023 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
+#include <linux/uio.h>
+#include <linux/bvec.h>
+#include <kunit/test.h>
+
+MODULE_DESCRIPTION("iov_iter testing");
+MODULE_AUTHOR("David Howells <dhowells@redhat.com>");
+MODULE_LICENSE("GPL");
+
+struct kvec_test_range {
+ int from, to;
+};
+
+static const struct kvec_test_range kvec_test_ranges[] = {
+ { 0x00002, 0x00002 },
+ { 0x00027, 0x03000 },
+ { 0x05193, 0x18794 },
+ { 0x20000, 0x20000 },
+ { 0x20000, 0x24000 },
+ { 0x24000, 0x27001 },
+ { 0x29000, 0xffffb },
+ { 0xffffd, 0xffffe },
+ { -1 }
+};
+
+static inline u8 pattern(unsigned long x)
+{
+ return x & 0xff;
+}
+
+static void iov_kunit_unmap(void *data)
+{
+ vunmap(data);
+}
+
+static void *__init iov_kunit_create_buffer(struct kunit *test,
+ struct page ***ppages,
+ size_t npages)
+{
+ struct page **pages;
+ unsigned long got;
+ void *buffer;
+
+ pages = kunit_kcalloc(test, npages, sizeof(struct page *), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pages);
+ *ppages = pages;
+
+ got = alloc_pages_bulk_array(GFP_KERNEL, npages, pages);
+ if (got != npages) {
+ release_pages(pages, got);
+ KUNIT_ASSERT_EQ(test, got, npages);
+ }
+
+ buffer = vmap(pages, npages, VM_MAP | VM_MAP_PUT_PAGES, PAGE_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buffer);
+
+ kunit_add_action_or_reset(test, iov_kunit_unmap, buffer);
+ return buffer;
+}
+
+static void __init iov_kunit_load_kvec(struct kunit *test,
+ struct iov_iter *iter, int dir,
+ struct kvec *kvec, unsigned int kvmax,
+ void *buffer, size_t bufsize,
+ const struct kvec_test_range *pr)
+{
+ size_t size = 0;
+ int i;
+
+ for (i = 0; i < kvmax; i++, pr++) {
+ if (pr->from < 0)
+ break;
+ KUNIT_ASSERT_GE(test, pr->to, pr->from);
+ KUNIT_ASSERT_LE(test, pr->to, bufsize);
+ kvec[i].iov_base = buffer + pr->from;
+ kvec[i].iov_len = pr->to - pr->from;
+ size += pr->to - pr->from;
+ }
+ KUNIT_ASSERT_LE(test, size, bufsize);
+
+ iov_iter_kvec(iter, dir, kvec, i, size);
+}
+
+/*
+ * Test copying to a ITER_KVEC-type iterator.
+ */
+static void __init iov_kunit_copy_to_kvec(struct kunit *test)
+{
+ const struct kvec_test_range *pr;
+ struct iov_iter iter;
+ struct page **spages, **bpages;
+ struct kvec kvec[8];
+ u8 *scratch, *buffer;
+ size_t bufsize, npages, size, copied;
+ int i, patt;
+
+ bufsize = 0x100000;
+ npages = bufsize / PAGE_SIZE;
+
+ scratch = iov_kunit_create_buffer(test, &spages, npages);
+ for (i = 0; i < bufsize; i++)
+ scratch[i] = pattern(i);
+
+ buffer = iov_kunit_create_buffer(test, &bpages, npages);
+ memset(buffer, 0, bufsize);
+
+ iov_kunit_load_kvec(test, &iter, READ, kvec, ARRAY_SIZE(kvec),
+ buffer, bufsize, kvec_test_ranges);
+ size = iter.count;
+
+ copied = copy_to_iter(scratch, size, &iter);
+
+ KUNIT_EXPECT_EQ(test, copied, size);
+ KUNIT_EXPECT_EQ(test, iter.count, 0);
+ KUNIT_EXPECT_EQ(test, iter.nr_segs, 0);
+
+ /* Build the expected image in the scratch buffer. */
+ patt = 0;
+ memset(scratch, 0, bufsize);
+ for (pr = kvec_test_ranges; pr->from >= 0; pr++)
+ for (i = pr->from; i < pr->to; i++)
+ scratch[i] = pattern(patt++);
+
+ /* Compare the images */
+ for (i = 0; i < bufsize; i++) {
+ KUNIT_EXPECT_EQ_MSG(test, buffer[i], scratch[i], "at i=%x", i);
+ if (buffer[i] != scratch[i])
+ return;
+ }
+
+ KUNIT_SUCCEED();
+}
+
+/*
+ * Test copying from a ITER_KVEC-type iterator.
+ */
+static void __init iov_kunit_copy_from_kvec(struct kunit *test)
+{
+ const struct kvec_test_range *pr;
+ struct iov_iter iter;
+ struct page **spages, **bpages;
+ struct kvec kvec[8];
+ u8 *scratch, *buffer;
+ size_t bufsize, npages, size, copied;
+ int i, j;
+
+ bufsize = 0x100000;
+ npages = bufsize / PAGE_SIZE;
+
+ buffer = iov_kunit_create_buffer(test, &bpages, npages);
+ for (i = 0; i < bufsize; i++)
+ buffer[i] = pattern(i);
+
+ scratch = iov_kunit_create_buffer(test, &spages, npages);
+ memset(scratch, 0, bufsize);
+
+ iov_kunit_load_kvec(test, &iter, WRITE, kvec, ARRAY_SIZE(kvec),
+ buffer, bufsize, kvec_test_ranges);
+ size = min(iter.count, bufsize);
+
+ copied = copy_from_iter(scratch, size, &iter);
+
+ KUNIT_EXPECT_EQ(test, copied, size);
+ KUNIT_EXPECT_EQ(test, iter.count, 0);
+ KUNIT_EXPECT_EQ(test, iter.nr_segs, 0);
+
+ /* Build the expected image in the main buffer. */
+ i = 0;
+ memset(buffer, 0, bufsize);
+ for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
+ for (j = pr->from; j < pr->to; j++) {
+ buffer[i++] = pattern(j);
+ if (i >= bufsize)
+ goto stop;
+ }
+ }
+stop:
+
+ /* Compare the images */
+ for (i = 0; i < bufsize; i++) {
+ KUNIT_EXPECT_EQ_MSG(test, scratch[i], buffer[i], "at i=%x", i);
+ if (scratch[i] != buffer[i])
+ return;
+ }
+
+ KUNIT_SUCCEED();
+}
+
+struct bvec_test_range {
+ int page, from, to;
+};
+
+static const struct bvec_test_range bvec_test_ranges[] = {
+ { 0, 0x0002, 0x0002 },
+ { 1, 0x0027, 0x0893 },
+ { 2, 0x0193, 0x0794 },
+ { 3, 0x0000, 0x1000 },
+ { 4, 0x0000, 0x1000 },
+ { 5, 0x0000, 0x1000 },
+ { 6, 0x0000, 0x0ffb },
+ { 6, 0x0ffd, 0x0ffe },
+ { -1, -1, -1 }
+};
+
+static void __init iov_kunit_load_bvec(struct kunit *test,
+ struct iov_iter *iter, int dir,
+ struct bio_vec *bvec, unsigned int bvmax,
+ struct page **pages, size_t npages,
+ size_t bufsize,
+ const struct bvec_test_range *pr)
+{
+ struct page *can_merge = NULL, *page;
+ size_t size = 0;
+ int i;
+
+ for (i = 0; i < bvmax; i++, pr++) {
+ if (pr->from < 0)
+ break;
+ KUNIT_ASSERT_LT(test, pr->page, npages);
+ KUNIT_ASSERT_LT(test, pr->page * PAGE_SIZE, bufsize);
+ KUNIT_ASSERT_GE(test, pr->from, 0);
+ KUNIT_ASSERT_GE(test, pr->to, pr->from);
+ KUNIT_ASSERT_LE(test, pr->to, PAGE_SIZE);
+
+ page = pages[pr->page];
+ if (pr->from == 0 && pr->from != pr->to && page == can_merge) {
+ i--;
+ bvec[i].bv_len += pr->to;
+ } else {
+ bvec_set_page(&bvec[i], page, pr->to - pr->from, pr->from);
+ }
+
+ size += pr->to - pr->from;
+ if ((pr->to & ~PAGE_MASK) == 0)
+ can_merge = page + pr->to / PAGE_SIZE;
+ else
+ can_merge = NULL;
+ }
+
+ iov_iter_bvec(iter, dir, bvec, i, size);
+}
+
+/*
+ * Test copying to a ITER_BVEC-type iterator.
+ */
+static void __init iov_kunit_copy_to_bvec(struct kunit *test)
+{
+ const struct bvec_test_range *pr;
+ struct iov_iter iter;
+ struct bio_vec bvec[8];
+ struct page **spages, **bpages;
+ u8 *scratch, *buffer;
+ size_t bufsize, npages, size, copied;
+ int i, b, patt;
+
+ bufsize = 0x100000;
+ npages = bufsize / PAGE_SIZE;
+
+ scratch = iov_kunit_create_buffer(test, &spages, npages);
+ for (i = 0; i < bufsize; i++)
+ scratch[i] = pattern(i);
+
+ buffer = iov_kunit_create_buffer(test, &bpages, npages);
+ memset(buffer, 0, bufsize);
+
+ iov_kunit_load_bvec(test, &iter, READ, bvec, ARRAY_SIZE(bvec),
+ bpages, npages, bufsize, bvec_test_ranges);
+ size = iter.count;
+
+ copied = copy_to_iter(scratch, size, &iter);
+
+ KUNIT_EXPECT_EQ(test, copied, size);
+ KUNIT_EXPECT_EQ(test, iter.count, 0);
+ KUNIT_EXPECT_EQ(test, iter.nr_segs, 0);
+
+ /* Build the expected image in the scratch buffer. */
+ b = 0;
+ patt = 0;
+ memset(scratch, 0, bufsize);
+ for (pr = bvec_test_ranges; pr->from >= 0; pr++, b++) {
+ u8 *p = scratch + pr->page * PAGE_SIZE;
+
+ for (i = pr->from; i < pr->to; i++)
+ p[i] = pattern(patt++);
+ }
+
+ /* Compare the images */
+ for (i = 0; i < bufsize; i++) {
+ KUNIT_EXPECT_EQ_MSG(test, buffer[i], scratch[i], "at i=%x", i);
+ if (buffer[i] != scratch[i])
+ return;
+ }
+
+ KUNIT_SUCCEED();
+}
+
+/*
+ * Test copying from a ITER_BVEC-type iterator.
+ */
+static void __init iov_kunit_copy_from_bvec(struct kunit *test)
+{
+ const struct bvec_test_range *pr;
+ struct iov_iter iter;
+ struct bio_vec bvec[8];
+ struct page **spages, **bpages;
+ u8 *scratch, *buffer;
+ size_t bufsize, npages, size, copied;
+ int i, j;
+
+ bufsize = 0x100000;
+ npages = bufsize / PAGE_SIZE;
+
+ buffer = iov_kunit_create_buffer(test, &bpages, npages);
+ for (i = 0; i < bufsize; i++)
+ buffer[i] = pattern(i);
+
+ scratch = iov_kunit_create_buffer(test, &spages, npages);
+ memset(scratch, 0, bufsize);
+
+ iov_kunit_load_bvec(test, &iter, WRITE, bvec, ARRAY_SIZE(bvec),
+ bpages, npages, bufsize, bvec_test_ranges);
+ size = iter.count;
+
+ copied = copy_from_iter(scratch, size, &iter);
+
+ KUNIT_EXPECT_EQ(test, copied, size);
+ KUNIT_EXPECT_EQ(test, iter.count, 0);
+ KUNIT_EXPECT_EQ(test, iter.nr_segs, 0);
+
+ /* Build the expected image in the main buffer. */
+ i = 0;
+ memset(buffer, 0, bufsize);
+ for (pr = bvec_test_ranges; pr->from >= 0; pr++) {
+ size_t patt = pr->page * PAGE_SIZE;
+
+ for (j = pr->from; j < pr->to; j++) {
+ buffer[i++] = pattern(patt + j);
+ if (i >= bufsize)
+ goto stop;
+ }
+ }
+stop:
+
+ /* Compare the images */
+ for (i = 0; i < bufsize; i++) {
+ KUNIT_EXPECT_EQ_MSG(test, scratch[i], buffer[i], "at i=%x", i);
+ if (scratch[i] != buffer[i])
+ return;
+ }
+
+ KUNIT_SUCCEED();
+}
+
+static void iov_kunit_destroy_xarray(void *data)
+{
+ struct xarray *xarray = data;
+
+ xa_destroy(xarray);
+ kfree(xarray);
+}
+
+static void __init iov_kunit_load_xarray(struct kunit *test,
+ struct iov_iter *iter, int dir,
+ struct xarray *xarray,
+ struct page **pages, size_t npages)
+{
+ size_t size = 0;
+ int i;
+
+ for (i = 0; i < npages; i++) {
+ void *x = xa_store(xarray, i, pages[i], GFP_KERNEL);
+
+ KUNIT_ASSERT_FALSE(test, xa_is_err(x));
+ size += PAGE_SIZE;
+ }
+ iov_iter_xarray(iter, dir, xarray, 0, size);
+}
+
+static struct xarray *iov_kunit_create_xarray(struct kunit *test)
+{
+ struct xarray *xarray;
+
+ xarray = kzalloc(sizeof(struct xarray), GFP_KERNEL);
+ xa_init(xarray);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xarray);
+ kunit_add_action_or_reset(test, iov_kunit_destroy_xarray, xarray);
+ return xarray;
+}
+
+/*
+ * Test copying to a ITER_XARRAY-type iterator.
+ */
+static void __init iov_kunit_copy_to_xarray(struct kunit *test)
+{
+ const struct kvec_test_range *pr;
+ struct iov_iter iter;
+ struct xarray *xarray;
+ struct page **spages, **bpages;
+ u8 *scratch, *buffer;
+ size_t bufsize, npages, size, copied;
+ int i, patt;
+
+ bufsize = 0x100000;
+ npages = bufsize / PAGE_SIZE;
+
+ xarray = iov_kunit_create_xarray(test);
+
+ scratch = iov_kunit_create_buffer(test, &spages, npages);
+ for (i = 0; i < bufsize; i++)
+ scratch[i] = pattern(i);
+
+ buffer = iov_kunit_create_buffer(test, &bpages, npages);
+ memset(buffer, 0, bufsize);
+
+ iov_kunit_load_xarray(test, &iter, READ, xarray, bpages, npages);
+
+ i = 0;
+ for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
+ size = pr->to - pr->from;
+ KUNIT_ASSERT_LE(test, pr->to, bufsize);
+
+ iov_iter_xarray(&iter, READ, xarray, pr->from, size);
+ copied = copy_to_iter(scratch + i, size, &iter);
+
+ KUNIT_EXPECT_EQ(test, copied, size);
+ KUNIT_EXPECT_EQ(test, iter.count, 0);
+ KUNIT_EXPECT_EQ(test, iter.iov_offset, size);
+ i += size;
+ }
+
+ /* Build the expected image in the scratch buffer. */
+ patt = 0;
+ memset(scratch, 0, bufsize);
+ for (pr = kvec_test_ranges; pr->from >= 0; pr++)
+ for (i = pr->from; i < pr->to; i++)
+ scratch[i] = pattern(patt++);
+
+ /* Compare the images */
+ for (i = 0; i < bufsize; i++) {
+ KUNIT_EXPECT_EQ_MSG(test, buffer[i], scratch[i], "at i=%x", i);
+ if (buffer[i] != scratch[i])
+ return;
+ }
+
+ KUNIT_SUCCEED();
+}
+
+/*
+ * Test copying from a ITER_XARRAY-type iterator.
+ */
+static void __init iov_kunit_copy_from_xarray(struct kunit *test)
+{
+ const struct kvec_test_range *pr;
+ struct iov_iter iter;
+ struct xarray *xarray;
+ struct page **spages, **bpages;
+ u8 *scratch, *buffer;
+ size_t bufsize, npages, size, copied;
+ int i, j;
+
+ bufsize = 0x100000;
+ npages = bufsize / PAGE_SIZE;
+
+ xarray = iov_kunit_create_xarray(test);
+
+ buffer = iov_kunit_create_buffer(test, &bpages, npages);
+ for (i = 0; i < bufsize; i++)
+ buffer[i] = pattern(i);
+
+ scratch = iov_kunit_create_buffer(test, &spages, npages);
+ memset(scratch, 0, bufsize);
+
+ iov_kunit_load_xarray(test, &iter, READ, xarray, bpages, npages);
+
+ i = 0;
+ for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
+ size = pr->to - pr->from;
+ KUNIT_ASSERT_LE(test, pr->to, bufsize);
+
+ iov_iter_xarray(&iter, WRITE, xarray, pr->from, size);
+ copied = copy_from_iter(scratch + i, size, &iter);
+
+ KUNIT_EXPECT_EQ(test, copied, size);
+ KUNIT_EXPECT_EQ(test, iter.count, 0);
+ KUNIT_EXPECT_EQ(test, iter.iov_offset, size);
+ i += size;
+ }
+
+ /* Build the expected image in the main buffer. */
+ i = 0;
+ memset(buffer, 0, bufsize);
+ for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
+ for (j = pr->from; j < pr->to; j++) {
+ buffer[i++] = pattern(j);
+ if (i >= bufsize)
+ goto stop;
+ }
+ }
+stop:
+
+ /* Compare the images */
+ for (i = 0; i < bufsize; i++) {
+ KUNIT_EXPECT_EQ_MSG(test, scratch[i], buffer[i], "at i=%x", i);
+ if (scratch[i] != buffer[i])
+ return;
+ }
+
+ KUNIT_SUCCEED();
+}
+
+/*
+ * Test the extraction of ITER_KVEC-type iterators.
+ */
+static void __init iov_kunit_extract_pages_kvec(struct kunit *test)
+{
+ const struct kvec_test_range *pr;
+ struct iov_iter iter;
+ struct page **bpages, *pagelist[8], **pages = pagelist;
+ struct kvec kvec[8];
+ u8 *buffer;
+ ssize_t len;
+ size_t bufsize, size = 0, npages;
+ int i, from;
+
+ bufsize = 0x100000;
+ npages = bufsize / PAGE_SIZE;
+
+ buffer = iov_kunit_create_buffer(test, &bpages, npages);
+
+ iov_kunit_load_kvec(test, &iter, READ, kvec, ARRAY_SIZE(kvec),
+ buffer, bufsize, kvec_test_ranges);
+ size = iter.count;
+
+ pr = kvec_test_ranges;
+ from = pr->from;
+ do {
+ size_t offset0 = LONG_MAX;
+
+ for (i = 0; i < ARRAY_SIZE(pagelist); i++)
+ pagelist[i] = (void *)(unsigned long)0xaa55aa55aa55aa55ULL;
+
+ len = iov_iter_extract_pages(&iter, &pages, 100 * 1024,
+ ARRAY_SIZE(pagelist), 0, &offset0);
+ KUNIT_EXPECT_GE(test, len, 0);
+ if (len < 0)
+ break;
+ KUNIT_EXPECT_GE(test, (ssize_t)offset0, 0);
+ KUNIT_EXPECT_LT(test, offset0, PAGE_SIZE);
+ KUNIT_EXPECT_LE(test, len, size);
+ KUNIT_EXPECT_EQ(test, iter.count, size - len);
+ size -= len;
+
+ if (len == 0)
+ break;
+
+ for (i = 0; i < ARRAY_SIZE(pagelist); i++) {
+ struct page *p;
+ ssize_t part = min_t(ssize_t, len, PAGE_SIZE - offset0);
+ int ix;
+
+ KUNIT_ASSERT_GE(test, part, 0);
+ while (from == pr->to) {
+ pr++;
+ from = pr->from;
+ if (from < 0)
+ goto stop;
+ }
+ ix = from / PAGE_SIZE;
+ KUNIT_ASSERT_LT(test, ix, npages);
+ p = bpages[ix];
+ KUNIT_EXPECT_PTR_EQ(test, pagelist[i], p);
+ KUNIT_EXPECT_EQ(test, offset0, from % PAGE_SIZE);
+ from += part;
+ len -= part;
+ KUNIT_ASSERT_GE(test, len, 0);
+ if (len == 0)
+ break;
+ offset0 = 0;
+ }
+
+ if (test->status == KUNIT_FAILURE)
+ break;
+ } while (iov_iter_count(&iter) > 0);
+
+stop:
+ KUNIT_EXPECT_EQ(test, size, 0);
+ KUNIT_EXPECT_EQ(test, iter.count, 0);
+ KUNIT_SUCCEED();
+}
+
+/*
+ * Test the extraction of ITER_BVEC-type iterators.
+ */
+static void __init iov_kunit_extract_pages_bvec(struct kunit *test)
+{
+ const struct bvec_test_range *pr;
+ struct iov_iter iter;
+ struct page **bpages, *pagelist[8], **pages = pagelist;
+ struct bio_vec bvec[8];
+ ssize_t len;
+ size_t bufsize, size = 0, npages;
+ int i, from;
+
+ bufsize = 0x100000;
+ npages = bufsize / PAGE_SIZE;
+
+ iov_kunit_create_buffer(test, &bpages, npages);
+ iov_kunit_load_bvec(test, &iter, READ, bvec, ARRAY_SIZE(bvec),
+ bpages, npages, bufsize, bvec_test_ranges);
+ size = iter.count;
+
+ pr = bvec_test_ranges;
+ from = pr->from;
+ do {
+ size_t offset0 = LONG_MAX;
+
+ for (i = 0; i < ARRAY_SIZE(pagelist); i++)
+ pagelist[i] = (void *)(unsigned long)0xaa55aa55aa55aa55ULL;
+
+ len = iov_iter_extract_pages(&iter, &pages, 100 * 1024,
+ ARRAY_SIZE(pagelist), 0, &offset0);
+ KUNIT_EXPECT_GE(test, len, 0);
+ if (len < 0)
+ break;
+ KUNIT_EXPECT_GE(test, (ssize_t)offset0, 0);
+ KUNIT_EXPECT_LT(test, offset0, PAGE_SIZE);
+ KUNIT_EXPECT_LE(test, len, size);
+ KUNIT_EXPECT_EQ(test, iter.count, size - len);
+ size -= len;
+
+ if (len == 0)
+ break;
+
+ for (i = 0; i < ARRAY_SIZE(pagelist); i++) {
+ struct page *p;
+ ssize_t part = min_t(ssize_t, len, PAGE_SIZE - offset0);
+ int ix;
+
+ KUNIT_ASSERT_GE(test, part, 0);
+ while (from == pr->to) {
+ pr++;
+ from = pr->from;
+ if (from < 0)
+ goto stop;
+ }
+ ix = pr->page + from / PAGE_SIZE;
+ KUNIT_ASSERT_LT(test, ix, npages);
+ p = bpages[ix];
+ KUNIT_EXPECT_PTR_EQ(test, pagelist[i], p);
+ KUNIT_EXPECT_EQ(test, offset0, from % PAGE_SIZE);
+ from += part;
+ len -= part;
+ KUNIT_ASSERT_GE(test, len, 0);
+ if (len == 0)
+ break;
+ offset0 = 0;
+ }
+
+ if (test->status == KUNIT_FAILURE)
+ break;
+ } while (iov_iter_count(&iter) > 0);
+
+stop:
+ KUNIT_EXPECT_EQ(test, size, 0);
+ KUNIT_EXPECT_EQ(test, iter.count, 0);
+ KUNIT_SUCCEED();
+}
+
+/*
+ * Test the extraction of ITER_XARRAY-type iterators.
+ */
+static void __init iov_kunit_extract_pages_xarray(struct kunit *test)
+{
+ const struct kvec_test_range *pr;
+ struct iov_iter iter;
+ struct xarray *xarray;
+ struct page **bpages, *pagelist[8], **pages = pagelist;
+ ssize_t len;
+ size_t bufsize, size = 0, npages;
+ int i, from;
+
+ bufsize = 0x100000;
+ npages = bufsize / PAGE_SIZE;
+
+ xarray = iov_kunit_create_xarray(test);
+
+ iov_kunit_create_buffer(test, &bpages, npages);
+ iov_kunit_load_xarray(test, &iter, READ, xarray, bpages, npages);
+
+ for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
+ from = pr->from;
+ size = pr->to - from;
+ KUNIT_ASSERT_LE(test, pr->to, bufsize);
+
+ iov_iter_xarray(&iter, WRITE, xarray, from, size);
+
+ do {
+ size_t offset0 = LONG_MAX;
+
+ for (i = 0; i < ARRAY_SIZE(pagelist); i++)
+ pagelist[i] = (void *)(unsigned long)0xaa55aa55aa55aa55ULL;
+
+ len = iov_iter_extract_pages(&iter, &pages, 100 * 1024,
+ ARRAY_SIZE(pagelist), 0, &offset0);
+ KUNIT_EXPECT_GE(test, len, 0);
+ if (len < 0)
+ break;
+ KUNIT_EXPECT_LE(test, len, size);
+ KUNIT_EXPECT_EQ(test, iter.count, size - len);
+ if (len == 0)
+ break;
+ size -= len;
+ KUNIT_EXPECT_GE(test, (ssize_t)offset0, 0);
+ KUNIT_EXPECT_LT(test, offset0, PAGE_SIZE);
+
+ for (i = 0; i < ARRAY_SIZE(pagelist); i++) {
+ struct page *p;
+ ssize_t part = min_t(ssize_t, len, PAGE_SIZE - offset0);
+ int ix;
+
+ KUNIT_ASSERT_GE(test, part, 0);
+ ix = from / PAGE_SIZE;
+ KUNIT_ASSERT_LT(test, ix, npages);
+ p = bpages[ix];
+ KUNIT_EXPECT_PTR_EQ(test, pagelist[i], p);
+ KUNIT_EXPECT_EQ(test, offset0, from % PAGE_SIZE);
+ from += part;
+ len -= part;
+ KUNIT_ASSERT_GE(test, len, 0);
+ if (len == 0)
+ break;
+ offset0 = 0;
+ }
+
+ if (test->status == KUNIT_FAILURE)
+ goto stop;
+ } while (iov_iter_count(&iter) > 0);
+
+ KUNIT_EXPECT_EQ(test, size, 0);
+ KUNIT_EXPECT_EQ(test, iter.count, 0);
+ KUNIT_EXPECT_EQ(test, iter.iov_offset, pr->to - pr->from);
+ }
+
+stop:
+ KUNIT_SUCCEED();
+}
+
+static struct kunit_case __refdata iov_kunit_cases[] = {
+ KUNIT_CASE(iov_kunit_copy_to_kvec),
+ KUNIT_CASE(iov_kunit_copy_from_kvec),
+ KUNIT_CASE(iov_kunit_copy_to_bvec),
+ KUNIT_CASE(iov_kunit_copy_from_bvec),
+ KUNIT_CASE(iov_kunit_copy_to_xarray),
+ KUNIT_CASE(iov_kunit_copy_from_xarray),
+ KUNIT_CASE(iov_kunit_extract_pages_kvec),
+ KUNIT_CASE(iov_kunit_extract_pages_bvec),
+ KUNIT_CASE(iov_kunit_extract_pages_xarray),
+ {}
+};
+
+static struct kunit_suite iov_kunit_suite = {
+ .name = "iov_iter",
+ .test_cases = iov_kunit_cases,
+};
+
+kunit_test_suites(&iov_kunit_suite);
diff --git a/lib/raid6/Makefile b/lib/raid6/Makefile
index 45e17619422b..035b0a4db476 100644
--- a/lib/raid6/Makefile
+++ b/lib/raid6/Makefile
@@ -9,6 +9,7 @@ raid6_pq-$(CONFIG_ALTIVEC) += altivec1.o altivec2.o altivec4.o altivec8.o \
vpermxor1.o vpermxor2.o vpermxor4.o vpermxor8.o
raid6_pq-$(CONFIG_KERNEL_MODE_NEON) += neon.o neon1.o neon2.o neon4.o neon8.o recov_neon.o recov_neon_inner.o
raid6_pq-$(CONFIG_S390) += s390vx8.o recov_s390xc.o
+raid6_pq-$(CONFIG_LOONGARCH) += loongarch_simd.o recov_loongarch_simd.o
hostprogs += mktables
diff --git a/lib/raid6/algos.c b/lib/raid6/algos.c
index a22a05c9af8a..0ec534faf019 100644
--- a/lib/raid6/algos.c
+++ b/lib/raid6/algos.c
@@ -73,6 +73,14 @@ const struct raid6_calls * const raid6_algos[] = {
&raid6_neonx2,
&raid6_neonx1,
#endif
+#ifdef CONFIG_LOONGARCH
+#ifdef CONFIG_CPU_HAS_LASX
+ &raid6_lasx,
+#endif
+#ifdef CONFIG_CPU_HAS_LSX
+ &raid6_lsx,
+#endif
+#endif
#if defined(__ia64__)
&raid6_intx32,
&raid6_intx16,
@@ -104,6 +112,14 @@ const struct raid6_recov_calls *const raid6_recov_algos[] = {
#if defined(CONFIG_KERNEL_MODE_NEON)
&raid6_recov_neon,
#endif
+#ifdef CONFIG_LOONGARCH
+#ifdef CONFIG_CPU_HAS_LASX
+ &raid6_recov_lasx,
+#endif
+#ifdef CONFIG_CPU_HAS_LSX
+ &raid6_recov_lsx,
+#endif
+#endif
&raid6_recov_intx1,
NULL
};
diff --git a/lib/raid6/loongarch.h b/lib/raid6/loongarch.h
new file mode 100644
index 000000000000..acfc33ce7056
--- /dev/null
+++ b/lib/raid6/loongarch.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2023 WANG Xuerui <git@xen0n.name>
+ *
+ * raid6/loongarch.h
+ *
+ * Definitions common to LoongArch RAID-6 code only
+ */
+
+#ifndef _LIB_RAID6_LOONGARCH_H
+#define _LIB_RAID6_LOONGARCH_H
+
+#ifdef __KERNEL__
+
+#include <asm/cpu-features.h>
+#include <asm/fpu.h>
+
+#else /* for user-space testing */
+
+#include <sys/auxv.h>
+
+/* have to supply these defines for glibc 2.37- and musl */
+#ifndef HWCAP_LOONGARCH_LSX
+#define HWCAP_LOONGARCH_LSX (1 << 4)
+#endif
+#ifndef HWCAP_LOONGARCH_LASX
+#define HWCAP_LOONGARCH_LASX (1 << 5)
+#endif
+
+#define kernel_fpu_begin()
+#define kernel_fpu_end()
+
+#define cpu_has_lsx (getauxval(AT_HWCAP) & HWCAP_LOONGARCH_LSX)
+#define cpu_has_lasx (getauxval(AT_HWCAP) & HWCAP_LOONGARCH_LASX)
+
+#endif /* __KERNEL__ */
+
+#endif /* _LIB_RAID6_LOONGARCH_H */
diff --git a/lib/raid6/loongarch_simd.c b/lib/raid6/loongarch_simd.c
new file mode 100644
index 000000000000..aa5d9f924ca3
--- /dev/null
+++ b/lib/raid6/loongarch_simd.c
@@ -0,0 +1,422 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * RAID6 syndrome calculations in LoongArch SIMD (LSX & LASX)
+ *
+ * Copyright 2023 WANG Xuerui <git@xen0n.name>
+ *
+ * Based on the generic RAID-6 code (int.uc):
+ *
+ * Copyright 2002-2004 H. Peter Anvin
+ */
+
+#include <linux/raid/pq.h>
+#include "loongarch.h"
+
+/*
+ * The vector algorithms are currently priority 0, which means the generic
+ * scalar algorithms are not being disabled if vector support is present.
+ * This is like the similar LoongArch RAID5 XOR code, with the main reason
+ * repeated here: it cannot be ruled out at this point of time, that some
+ * future (maybe reduced) models could run the vector algorithms slower than
+ * the scalar ones, maybe for errata or micro-op reasons. It may be
+ * appropriate to revisit this after one or two more uarch generations.
+ */
+
+#ifdef CONFIG_CPU_HAS_LSX
+#define NSIZE 16
+
+static int raid6_has_lsx(void)
+{
+ return cpu_has_lsx;
+}
+
+static void raid6_lsx_gen_syndrome(int disks, size_t bytes, void **ptrs)
+{
+ u8 **dptr = (u8 **)ptrs;
+ u8 *p, *q;
+ int d, z, z0;
+
+ z0 = disks - 3; /* Highest data disk */
+ p = dptr[z0+1]; /* XOR parity */
+ q = dptr[z0+2]; /* RS syndrome */
+
+ kernel_fpu_begin();
+
+ /*
+ * $vr0, $vr1, $vr2, $vr3: wp
+ * $vr4, $vr5, $vr6, $vr7: wq
+ * $vr8, $vr9, $vr10, $vr11: wd
+ * $vr12, $vr13, $vr14, $vr15: w2
+ * $vr16, $vr17, $vr18, $vr19: w1
+ */
+ for (d = 0; d < bytes; d += NSIZE*4) {
+ /* wq$$ = wp$$ = *(unative_t *)&dptr[z0][d+$$*NSIZE]; */
+ asm volatile("vld $vr0, %0" : : "m"(dptr[z0][d+0*NSIZE]));
+ asm volatile("vld $vr1, %0" : : "m"(dptr[z0][d+1*NSIZE]));
+ asm volatile("vld $vr2, %0" : : "m"(dptr[z0][d+2*NSIZE]));
+ asm volatile("vld $vr3, %0" : : "m"(dptr[z0][d+3*NSIZE]));
+ asm volatile("vori.b $vr4, $vr0, 0");
+ asm volatile("vori.b $vr5, $vr1, 0");
+ asm volatile("vori.b $vr6, $vr2, 0");
+ asm volatile("vori.b $vr7, $vr3, 0");
+ for (z = z0-1; z >= 0; z--) {
+ /* wd$$ = *(unative_t *)&dptr[z][d+$$*NSIZE]; */
+ asm volatile("vld $vr8, %0" : : "m"(dptr[z][d+0*NSIZE]));
+ asm volatile("vld $vr9, %0" : : "m"(dptr[z][d+1*NSIZE]));
+ asm volatile("vld $vr10, %0" : : "m"(dptr[z][d+2*NSIZE]));
+ asm volatile("vld $vr11, %0" : : "m"(dptr[z][d+3*NSIZE]));
+ /* wp$$ ^= wd$$; */
+ asm volatile("vxor.v $vr0, $vr0, $vr8");
+ asm volatile("vxor.v $vr1, $vr1, $vr9");
+ asm volatile("vxor.v $vr2, $vr2, $vr10");
+ asm volatile("vxor.v $vr3, $vr3, $vr11");
+ /* w2$$ = MASK(wq$$); */
+ asm volatile("vslti.b $vr12, $vr4, 0");
+ asm volatile("vslti.b $vr13, $vr5, 0");
+ asm volatile("vslti.b $vr14, $vr6, 0");
+ asm volatile("vslti.b $vr15, $vr7, 0");
+ /* w1$$ = SHLBYTE(wq$$); */
+ asm volatile("vslli.b $vr16, $vr4, 1");
+ asm volatile("vslli.b $vr17, $vr5, 1");
+ asm volatile("vslli.b $vr18, $vr6, 1");
+ asm volatile("vslli.b $vr19, $vr7, 1");
+ /* w2$$ &= NBYTES(0x1d); */
+ asm volatile("vandi.b $vr12, $vr12, 0x1d");
+ asm volatile("vandi.b $vr13, $vr13, 0x1d");
+ asm volatile("vandi.b $vr14, $vr14, 0x1d");
+ asm volatile("vandi.b $vr15, $vr15, 0x1d");
+ /* w1$$ ^= w2$$; */
+ asm volatile("vxor.v $vr16, $vr16, $vr12");
+ asm volatile("vxor.v $vr17, $vr17, $vr13");
+ asm volatile("vxor.v $vr18, $vr18, $vr14");
+ asm volatile("vxor.v $vr19, $vr19, $vr15");
+ /* wq$$ = w1$$ ^ wd$$; */
+ asm volatile("vxor.v $vr4, $vr16, $vr8");
+ asm volatile("vxor.v $vr5, $vr17, $vr9");
+ asm volatile("vxor.v $vr6, $vr18, $vr10");
+ asm volatile("vxor.v $vr7, $vr19, $vr11");
+ }
+ /* *(unative_t *)&p[d+NSIZE*$$] = wp$$; */
+ asm volatile("vst $vr0, %0" : "=m"(p[d+NSIZE*0]));
+ asm volatile("vst $vr1, %0" : "=m"(p[d+NSIZE*1]));
+ asm volatile("vst $vr2, %0" : "=m"(p[d+NSIZE*2]));
+ asm volatile("vst $vr3, %0" : "=m"(p[d+NSIZE*3]));
+ /* *(unative_t *)&q[d+NSIZE*$$] = wq$$; */
+ asm volatile("vst $vr4, %0" : "=m"(q[d+NSIZE*0]));
+ asm volatile("vst $vr5, %0" : "=m"(q[d+NSIZE*1]));
+ asm volatile("vst $vr6, %0" : "=m"(q[d+NSIZE*2]));
+ asm volatile("vst $vr7, %0" : "=m"(q[d+NSIZE*3]));
+ }
+
+ kernel_fpu_end();
+}
+
+static void raid6_lsx_xor_syndrome(int disks, int start, int stop,
+ size_t bytes, void **ptrs)
+{
+ u8 **dptr = (u8 **)ptrs;
+ u8 *p, *q;
+ int d, z, z0;
+
+ z0 = stop; /* P/Q right side optimization */
+ p = dptr[disks-2]; /* XOR parity */
+ q = dptr[disks-1]; /* RS syndrome */
+
+ kernel_fpu_begin();
+
+ /*
+ * $vr0, $vr1, $vr2, $vr3: wp
+ * $vr4, $vr5, $vr6, $vr7: wq
+ * $vr8, $vr9, $vr10, $vr11: wd
+ * $vr12, $vr13, $vr14, $vr15: w2
+ * $vr16, $vr17, $vr18, $vr19: w1
+ */
+ for (d = 0; d < bytes; d += NSIZE*4) {
+ /* P/Q data pages */
+ /* wq$$ = wp$$ = *(unative_t *)&dptr[z0][d+$$*NSIZE]; */
+ asm volatile("vld $vr0, %0" : : "m"(dptr[z0][d+0*NSIZE]));
+ asm volatile("vld $vr1, %0" : : "m"(dptr[z0][d+1*NSIZE]));
+ asm volatile("vld $vr2, %0" : : "m"(dptr[z0][d+2*NSIZE]));
+ asm volatile("vld $vr3, %0" : : "m"(dptr[z0][d+3*NSIZE]));
+ asm volatile("vori.b $vr4, $vr0, 0");
+ asm volatile("vori.b $vr5, $vr1, 0");
+ asm volatile("vori.b $vr6, $vr2, 0");
+ asm volatile("vori.b $vr7, $vr3, 0");
+ for (z = z0-1; z >= start; z--) {
+ /* wd$$ = *(unative_t *)&dptr[z][d+$$*NSIZE]; */
+ asm volatile("vld $vr8, %0" : : "m"(dptr[z][d+0*NSIZE]));
+ asm volatile("vld $vr9, %0" : : "m"(dptr[z][d+1*NSIZE]));
+ asm volatile("vld $vr10, %0" : : "m"(dptr[z][d+2*NSIZE]));
+ asm volatile("vld $vr11, %0" : : "m"(dptr[z][d+3*NSIZE]));
+ /* wp$$ ^= wd$$; */
+ asm volatile("vxor.v $vr0, $vr0, $vr8");
+ asm volatile("vxor.v $vr1, $vr1, $vr9");
+ asm volatile("vxor.v $vr2, $vr2, $vr10");
+ asm volatile("vxor.v $vr3, $vr3, $vr11");
+ /* w2$$ = MASK(wq$$); */
+ asm volatile("vslti.b $vr12, $vr4, 0");
+ asm volatile("vslti.b $vr13, $vr5, 0");
+ asm volatile("vslti.b $vr14, $vr6, 0");
+ asm volatile("vslti.b $vr15, $vr7, 0");
+ /* w1$$ = SHLBYTE(wq$$); */
+ asm volatile("vslli.b $vr16, $vr4, 1");
+ asm volatile("vslli.b $vr17, $vr5, 1");
+ asm volatile("vslli.b $vr18, $vr6, 1");
+ asm volatile("vslli.b $vr19, $vr7, 1");
+ /* w2$$ &= NBYTES(0x1d); */
+ asm volatile("vandi.b $vr12, $vr12, 0x1d");
+ asm volatile("vandi.b $vr13, $vr13, 0x1d");
+ asm volatile("vandi.b $vr14, $vr14, 0x1d");
+ asm volatile("vandi.b $vr15, $vr15, 0x1d");
+ /* w1$$ ^= w2$$; */
+ asm volatile("vxor.v $vr16, $vr16, $vr12");
+ asm volatile("vxor.v $vr17, $vr17, $vr13");
+ asm volatile("vxor.v $vr18, $vr18, $vr14");
+ asm volatile("vxor.v $vr19, $vr19, $vr15");
+ /* wq$$ = w1$$ ^ wd$$; */
+ asm volatile("vxor.v $vr4, $vr16, $vr8");
+ asm volatile("vxor.v $vr5, $vr17, $vr9");
+ asm volatile("vxor.v $vr6, $vr18, $vr10");
+ asm volatile("vxor.v $vr7, $vr19, $vr11");
+ }
+
+ /* P/Q left side optimization */
+ for (z = start-1; z >= 0; z--) {
+ /* w2$$ = MASK(wq$$); */
+ asm volatile("vslti.b $vr12, $vr4, 0");
+ asm volatile("vslti.b $vr13, $vr5, 0");
+ asm volatile("vslti.b $vr14, $vr6, 0");
+ asm volatile("vslti.b $vr15, $vr7, 0");
+ /* w1$$ = SHLBYTE(wq$$); */
+ asm volatile("vslli.b $vr16, $vr4, 1");
+ asm volatile("vslli.b $vr17, $vr5, 1");
+ asm volatile("vslli.b $vr18, $vr6, 1");
+ asm volatile("vslli.b $vr19, $vr7, 1");
+ /* w2$$ &= NBYTES(0x1d); */
+ asm volatile("vandi.b $vr12, $vr12, 0x1d");
+ asm volatile("vandi.b $vr13, $vr13, 0x1d");
+ asm volatile("vandi.b $vr14, $vr14, 0x1d");
+ asm volatile("vandi.b $vr15, $vr15, 0x1d");
+ /* wq$$ = w1$$ ^ w2$$; */
+ asm volatile("vxor.v $vr4, $vr16, $vr12");
+ asm volatile("vxor.v $vr5, $vr17, $vr13");
+ asm volatile("vxor.v $vr6, $vr18, $vr14");
+ asm volatile("vxor.v $vr7, $vr19, $vr15");
+ }
+ /*
+ * *(unative_t *)&p[d+NSIZE*$$] ^= wp$$;
+ * *(unative_t *)&q[d+NSIZE*$$] ^= wq$$;
+ */
+ asm volatile(
+ "vld $vr20, %0\n\t"
+ "vld $vr21, %1\n\t"
+ "vld $vr22, %2\n\t"
+ "vld $vr23, %3\n\t"
+ "vld $vr24, %4\n\t"
+ "vld $vr25, %5\n\t"
+ "vld $vr26, %6\n\t"
+ "vld $vr27, %7\n\t"
+ "vxor.v $vr20, $vr20, $vr0\n\t"
+ "vxor.v $vr21, $vr21, $vr1\n\t"
+ "vxor.v $vr22, $vr22, $vr2\n\t"
+ "vxor.v $vr23, $vr23, $vr3\n\t"
+ "vxor.v $vr24, $vr24, $vr4\n\t"
+ "vxor.v $vr25, $vr25, $vr5\n\t"
+ "vxor.v $vr26, $vr26, $vr6\n\t"
+ "vxor.v $vr27, $vr27, $vr7\n\t"
+ "vst $vr20, %0\n\t"
+ "vst $vr21, %1\n\t"
+ "vst $vr22, %2\n\t"
+ "vst $vr23, %3\n\t"
+ "vst $vr24, %4\n\t"
+ "vst $vr25, %5\n\t"
+ "vst $vr26, %6\n\t"
+ "vst $vr27, %7\n\t"
+ : "+m"(p[d+NSIZE*0]), "+m"(p[d+NSIZE*1]),
+ "+m"(p[d+NSIZE*2]), "+m"(p[d+NSIZE*3]),
+ "+m"(q[d+NSIZE*0]), "+m"(q[d+NSIZE*1]),
+ "+m"(q[d+NSIZE*2]), "+m"(q[d+NSIZE*3])
+ );
+ }
+
+ kernel_fpu_end();
+}
+
+const struct raid6_calls raid6_lsx = {
+ raid6_lsx_gen_syndrome,
+ raid6_lsx_xor_syndrome,
+ raid6_has_lsx,
+ "lsx",
+ .priority = 0 /* see the comment near the top of the file for reason */
+};
+
+#undef NSIZE
+#endif /* CONFIG_CPU_HAS_LSX */
+
+#ifdef CONFIG_CPU_HAS_LASX
+#define NSIZE 32
+
+static int raid6_has_lasx(void)
+{
+ return cpu_has_lasx;
+}
+
+static void raid6_lasx_gen_syndrome(int disks, size_t bytes, void **ptrs)
+{
+ u8 **dptr = (u8 **)ptrs;
+ u8 *p, *q;
+ int d, z, z0;
+
+ z0 = disks - 3; /* Highest data disk */
+ p = dptr[z0+1]; /* XOR parity */
+ q = dptr[z0+2]; /* RS syndrome */
+
+ kernel_fpu_begin();
+
+ /*
+ * $xr0, $xr1: wp
+ * $xr2, $xr3: wq
+ * $xr4, $xr5: wd
+ * $xr6, $xr7: w2
+ * $xr8, $xr9: w1
+ */
+ for (d = 0; d < bytes; d += NSIZE*2) {
+ /* wq$$ = wp$$ = *(unative_t *)&dptr[z0][d+$$*NSIZE]; */
+ asm volatile("xvld $xr0, %0" : : "m"(dptr[z0][d+0*NSIZE]));
+ asm volatile("xvld $xr1, %0" : : "m"(dptr[z0][d+1*NSIZE]));
+ asm volatile("xvori.b $xr2, $xr0, 0");
+ asm volatile("xvori.b $xr3, $xr1, 0");
+ for (z = z0-1; z >= 0; z--) {
+ /* wd$$ = *(unative_t *)&dptr[z][d+$$*NSIZE]; */
+ asm volatile("xvld $xr4, %0" : : "m"(dptr[z][d+0*NSIZE]));
+ asm volatile("xvld $xr5, %0" : : "m"(dptr[z][d+1*NSIZE]));
+ /* wp$$ ^= wd$$; */
+ asm volatile("xvxor.v $xr0, $xr0, $xr4");
+ asm volatile("xvxor.v $xr1, $xr1, $xr5");
+ /* w2$$ = MASK(wq$$); */
+ asm volatile("xvslti.b $xr6, $xr2, 0");
+ asm volatile("xvslti.b $xr7, $xr3, 0");
+ /* w1$$ = SHLBYTE(wq$$); */
+ asm volatile("xvslli.b $xr8, $xr2, 1");
+ asm volatile("xvslli.b $xr9, $xr3, 1");
+ /* w2$$ &= NBYTES(0x1d); */
+ asm volatile("xvandi.b $xr6, $xr6, 0x1d");
+ asm volatile("xvandi.b $xr7, $xr7, 0x1d");
+ /* w1$$ ^= w2$$; */
+ asm volatile("xvxor.v $xr8, $xr8, $xr6");
+ asm volatile("xvxor.v $xr9, $xr9, $xr7");
+ /* wq$$ = w1$$ ^ wd$$; */
+ asm volatile("xvxor.v $xr2, $xr8, $xr4");
+ asm volatile("xvxor.v $xr3, $xr9, $xr5");
+ }
+ /* *(unative_t *)&p[d+NSIZE*$$] = wp$$; */
+ asm volatile("xvst $xr0, %0" : "=m"(p[d+NSIZE*0]));
+ asm volatile("xvst $xr1, %0" : "=m"(p[d+NSIZE*1]));
+ /* *(unative_t *)&q[d+NSIZE*$$] = wq$$; */
+ asm volatile("xvst $xr2, %0" : "=m"(q[d+NSIZE*0]));
+ asm volatile("xvst $xr3, %0" : "=m"(q[d+NSIZE*1]));
+ }
+
+ kernel_fpu_end();
+}
+
+static void raid6_lasx_xor_syndrome(int disks, int start, int stop,
+ size_t bytes, void **ptrs)
+{
+ u8 **dptr = (u8 **)ptrs;
+ u8 *p, *q;
+ int d, z, z0;
+
+ z0 = stop; /* P/Q right side optimization */
+ p = dptr[disks-2]; /* XOR parity */
+ q = dptr[disks-1]; /* RS syndrome */
+
+ kernel_fpu_begin();
+
+ /*
+ * $xr0, $xr1: wp
+ * $xr2, $xr3: wq
+ * $xr4, $xr5: wd
+ * $xr6, $xr7: w2
+ * $xr8, $xr9: w1
+ */
+ for (d = 0; d < bytes; d += NSIZE*2) {
+ /* P/Q data pages */
+ /* wq$$ = wp$$ = *(unative_t *)&dptr[z0][d+$$*NSIZE]; */
+ asm volatile("xvld $xr0, %0" : : "m"(dptr[z0][d+0*NSIZE]));
+ asm volatile("xvld $xr1, %0" : : "m"(dptr[z0][d+1*NSIZE]));
+ asm volatile("xvori.b $xr2, $xr0, 0");
+ asm volatile("xvori.b $xr3, $xr1, 0");
+ for (z = z0-1; z >= start; z--) {
+ /* wd$$ = *(unative_t *)&dptr[z][d+$$*NSIZE]; */
+ asm volatile("xvld $xr4, %0" : : "m"(dptr[z][d+0*NSIZE]));
+ asm volatile("xvld $xr5, %0" : : "m"(dptr[z][d+1*NSIZE]));
+ /* wp$$ ^= wd$$; */
+ asm volatile("xvxor.v $xr0, $xr0, $xr4");
+ asm volatile("xvxor.v $xr1, $xr1, $xr5");
+ /* w2$$ = MASK(wq$$); */
+ asm volatile("xvslti.b $xr6, $xr2, 0");
+ asm volatile("xvslti.b $xr7, $xr3, 0");
+ /* w1$$ = SHLBYTE(wq$$); */
+ asm volatile("xvslli.b $xr8, $xr2, 1");
+ asm volatile("xvslli.b $xr9, $xr3, 1");
+ /* w2$$ &= NBYTES(0x1d); */
+ asm volatile("xvandi.b $xr6, $xr6, 0x1d");
+ asm volatile("xvandi.b $xr7, $xr7, 0x1d");
+ /* w1$$ ^= w2$$; */
+ asm volatile("xvxor.v $xr8, $xr8, $xr6");
+ asm volatile("xvxor.v $xr9, $xr9, $xr7");
+ /* wq$$ = w1$$ ^ wd$$; */
+ asm volatile("xvxor.v $xr2, $xr8, $xr4");
+ asm volatile("xvxor.v $xr3, $xr9, $xr5");
+ }
+
+ /* P/Q left side optimization */
+ for (z = start-1; z >= 0; z--) {
+ /* w2$$ = MASK(wq$$); */
+ asm volatile("xvslti.b $xr6, $xr2, 0");
+ asm volatile("xvslti.b $xr7, $xr3, 0");
+ /* w1$$ = SHLBYTE(wq$$); */
+ asm volatile("xvslli.b $xr8, $xr2, 1");
+ asm volatile("xvslli.b $xr9, $xr3, 1");
+ /* w2$$ &= NBYTES(0x1d); */
+ asm volatile("xvandi.b $xr6, $xr6, 0x1d");
+ asm volatile("xvandi.b $xr7, $xr7, 0x1d");
+ /* wq$$ = w1$$ ^ w2$$; */
+ asm volatile("xvxor.v $xr2, $xr8, $xr6");
+ asm volatile("xvxor.v $xr3, $xr9, $xr7");
+ }
+ /*
+ * *(unative_t *)&p[d+NSIZE*$$] ^= wp$$;
+ * *(unative_t *)&q[d+NSIZE*$$] ^= wq$$;
+ */
+ asm volatile(
+ "xvld $xr10, %0\n\t"
+ "xvld $xr11, %1\n\t"
+ "xvld $xr12, %2\n\t"
+ "xvld $xr13, %3\n\t"
+ "xvxor.v $xr10, $xr10, $xr0\n\t"
+ "xvxor.v $xr11, $xr11, $xr1\n\t"
+ "xvxor.v $xr12, $xr12, $xr2\n\t"
+ "xvxor.v $xr13, $xr13, $xr3\n\t"
+ "xvst $xr10, %0\n\t"
+ "xvst $xr11, %1\n\t"
+ "xvst $xr12, %2\n\t"
+ "xvst $xr13, %3\n\t"
+ : "+m"(p[d+NSIZE*0]), "+m"(p[d+NSIZE*1]),
+ "+m"(q[d+NSIZE*0]), "+m"(q[d+NSIZE*1])
+ );
+ }
+
+ kernel_fpu_end();
+}
+
+const struct raid6_calls raid6_lasx = {
+ raid6_lasx_gen_syndrome,
+ raid6_lasx_xor_syndrome,
+ raid6_has_lasx,
+ "lasx",
+ .priority = 0 /* see the comment near the top of the file for reason */
+};
+#undef NSIZE
+#endif /* CONFIG_CPU_HAS_LASX */
diff --git a/lib/raid6/recov_loongarch_simd.c b/lib/raid6/recov_loongarch_simd.c
new file mode 100644
index 000000000000..94aeac85e6f7
--- /dev/null
+++ b/lib/raid6/recov_loongarch_simd.c
@@ -0,0 +1,513 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * RAID6 recovery algorithms in LoongArch SIMD (LSX & LASX)
+ *
+ * Copyright (C) 2023 WANG Xuerui <git@xen0n.name>
+ *
+ * Originally based on recov_avx2.c and recov_ssse3.c:
+ *
+ * Copyright (C) 2012 Intel Corporation
+ * Author: Jim Kukunas <james.t.kukunas@linux.intel.com>
+ */
+
+#include <linux/raid/pq.h>
+#include "loongarch.h"
+
+/*
+ * Unlike with the syndrome calculation algorithms, there's no boot-time
+ * selection of recovery algorithms by benchmarking, so we have to specify
+ * the priorities and hope the future cores will all have decent vector
+ * support (i.e. no LASX slower than LSX, or even scalar code).
+ */
+
+#ifdef CONFIG_CPU_HAS_LSX
+static int raid6_has_lsx(void)
+{
+ return cpu_has_lsx;
+}
+
+static void raid6_2data_recov_lsx(int disks, size_t bytes, int faila,
+ int failb, void **ptrs)
+{
+ u8 *p, *q, *dp, *dq;
+ const u8 *pbmul; /* P multiplier table for B data */
+ const u8 *qmul; /* Q multiplier table (for both) */
+
+ p = (u8 *)ptrs[disks - 2];
+ q = (u8 *)ptrs[disks - 1];
+
+ /*
+ * Compute syndrome with zero for the missing data pages
+ * Use the dead data pages as temporary storage for
+ * delta p and delta q
+ */
+ dp = (u8 *)ptrs[faila];
+ ptrs[faila] = (void *)raid6_empty_zero_page;
+ ptrs[disks - 2] = dp;
+ dq = (u8 *)ptrs[failb];
+ ptrs[failb] = (void *)raid6_empty_zero_page;
+ ptrs[disks - 1] = dq;
+
+ raid6_call.gen_syndrome(disks, bytes, ptrs);
+
+ /* Restore pointer table */
+ ptrs[faila] = dp;
+ ptrs[failb] = dq;
+ ptrs[disks - 2] = p;
+ ptrs[disks - 1] = q;
+
+ /* Now, pick the proper data tables */
+ pbmul = raid6_vgfmul[raid6_gfexi[failb - faila]];
+ qmul = raid6_vgfmul[raid6_gfinv[raid6_gfexp[faila] ^ raid6_gfexp[failb]]];
+
+ kernel_fpu_begin();
+
+ /*
+ * vr20, vr21: qmul
+ * vr22, vr23: pbmul
+ */
+ asm volatile("vld $vr20, %0" : : "m" (qmul[0]));
+ asm volatile("vld $vr21, %0" : : "m" (qmul[16]));
+ asm volatile("vld $vr22, %0" : : "m" (pbmul[0]));
+ asm volatile("vld $vr23, %0" : : "m" (pbmul[16]));
+
+ while (bytes) {
+ /* vr4 - vr7: Q */
+ asm volatile("vld $vr4, %0" : : "m" (q[0]));
+ asm volatile("vld $vr5, %0" : : "m" (q[16]));
+ asm volatile("vld $vr6, %0" : : "m" (q[32]));
+ asm volatile("vld $vr7, %0" : : "m" (q[48]));
+ /* vr4 - vr7: Q + Qxy */
+ asm volatile("vld $vr8, %0" : : "m" (dq[0]));
+ asm volatile("vld $vr9, %0" : : "m" (dq[16]));
+ asm volatile("vld $vr10, %0" : : "m" (dq[32]));
+ asm volatile("vld $vr11, %0" : : "m" (dq[48]));
+ asm volatile("vxor.v $vr4, $vr4, $vr8");
+ asm volatile("vxor.v $vr5, $vr5, $vr9");
+ asm volatile("vxor.v $vr6, $vr6, $vr10");
+ asm volatile("vxor.v $vr7, $vr7, $vr11");
+ /* vr0 - vr3: P */
+ asm volatile("vld $vr0, %0" : : "m" (p[0]));
+ asm volatile("vld $vr1, %0" : : "m" (p[16]));
+ asm volatile("vld $vr2, %0" : : "m" (p[32]));
+ asm volatile("vld $vr3, %0" : : "m" (p[48]));
+ /* vr0 - vr3: P + Pxy */
+ asm volatile("vld $vr8, %0" : : "m" (dp[0]));
+ asm volatile("vld $vr9, %0" : : "m" (dp[16]));
+ asm volatile("vld $vr10, %0" : : "m" (dp[32]));
+ asm volatile("vld $vr11, %0" : : "m" (dp[48]));
+ asm volatile("vxor.v $vr0, $vr0, $vr8");
+ asm volatile("vxor.v $vr1, $vr1, $vr9");
+ asm volatile("vxor.v $vr2, $vr2, $vr10");
+ asm volatile("vxor.v $vr3, $vr3, $vr11");
+
+ /* vr8 - vr11: higher 4 bits of each byte of (Q + Qxy) */
+ asm volatile("vsrli.b $vr8, $vr4, 4");
+ asm volatile("vsrli.b $vr9, $vr5, 4");
+ asm volatile("vsrli.b $vr10, $vr6, 4");
+ asm volatile("vsrli.b $vr11, $vr7, 4");
+ /* vr4 - vr7: lower 4 bits of each byte of (Q + Qxy) */
+ asm volatile("vandi.b $vr4, $vr4, 0x0f");
+ asm volatile("vandi.b $vr5, $vr5, 0x0f");
+ asm volatile("vandi.b $vr6, $vr6, 0x0f");
+ asm volatile("vandi.b $vr7, $vr7, 0x0f");
+ /* lookup from qmul[0] */
+ asm volatile("vshuf.b $vr4, $vr20, $vr20, $vr4");
+ asm volatile("vshuf.b $vr5, $vr20, $vr20, $vr5");
+ asm volatile("vshuf.b $vr6, $vr20, $vr20, $vr6");
+ asm volatile("vshuf.b $vr7, $vr20, $vr20, $vr7");
+ /* lookup from qmul[16] */
+ asm volatile("vshuf.b $vr8, $vr21, $vr21, $vr8");
+ asm volatile("vshuf.b $vr9, $vr21, $vr21, $vr9");
+ asm volatile("vshuf.b $vr10, $vr21, $vr21, $vr10");
+ asm volatile("vshuf.b $vr11, $vr21, $vr21, $vr11");
+ /* vr16 - vr19: B(Q + Qxy) */
+ asm volatile("vxor.v $vr16, $vr8, $vr4");
+ asm volatile("vxor.v $vr17, $vr9, $vr5");
+ asm volatile("vxor.v $vr18, $vr10, $vr6");
+ asm volatile("vxor.v $vr19, $vr11, $vr7");
+
+ /* vr4 - vr7: higher 4 bits of each byte of (P + Pxy) */
+ asm volatile("vsrli.b $vr4, $vr0, 4");
+ asm volatile("vsrli.b $vr5, $vr1, 4");
+ asm volatile("vsrli.b $vr6, $vr2, 4");
+ asm volatile("vsrli.b $vr7, $vr3, 4");
+ /* vr12 - vr15: lower 4 bits of each byte of (P + Pxy) */
+ asm volatile("vandi.b $vr12, $vr0, 0x0f");
+ asm volatile("vandi.b $vr13, $vr1, 0x0f");
+ asm volatile("vandi.b $vr14, $vr2, 0x0f");
+ asm volatile("vandi.b $vr15, $vr3, 0x0f");
+ /* lookup from pbmul[0] */
+ asm volatile("vshuf.b $vr12, $vr22, $vr22, $vr12");
+ asm volatile("vshuf.b $vr13, $vr22, $vr22, $vr13");
+ asm volatile("vshuf.b $vr14, $vr22, $vr22, $vr14");
+ asm volatile("vshuf.b $vr15, $vr22, $vr22, $vr15");
+ /* lookup from pbmul[16] */
+ asm volatile("vshuf.b $vr4, $vr23, $vr23, $vr4");
+ asm volatile("vshuf.b $vr5, $vr23, $vr23, $vr5");
+ asm volatile("vshuf.b $vr6, $vr23, $vr23, $vr6");
+ asm volatile("vshuf.b $vr7, $vr23, $vr23, $vr7");
+ /* vr4 - vr7: A(P + Pxy) */
+ asm volatile("vxor.v $vr4, $vr4, $vr12");
+ asm volatile("vxor.v $vr5, $vr5, $vr13");
+ asm volatile("vxor.v $vr6, $vr6, $vr14");
+ asm volatile("vxor.v $vr7, $vr7, $vr15");
+
+ /* vr4 - vr7: A(P + Pxy) + B(Q + Qxy) = Dx */
+ asm volatile("vxor.v $vr4, $vr4, $vr16");
+ asm volatile("vxor.v $vr5, $vr5, $vr17");
+ asm volatile("vxor.v $vr6, $vr6, $vr18");
+ asm volatile("vxor.v $vr7, $vr7, $vr19");
+ asm volatile("vst $vr4, %0" : "=m" (dq[0]));
+ asm volatile("vst $vr5, %0" : "=m" (dq[16]));
+ asm volatile("vst $vr6, %0" : "=m" (dq[32]));
+ asm volatile("vst $vr7, %0" : "=m" (dq[48]));
+
+ /* vr0 - vr3: P + Pxy + Dx = Dy */
+ asm volatile("vxor.v $vr0, $vr0, $vr4");
+ asm volatile("vxor.v $vr1, $vr1, $vr5");
+ asm volatile("vxor.v $vr2, $vr2, $vr6");
+ asm volatile("vxor.v $vr3, $vr3, $vr7");
+ asm volatile("vst $vr0, %0" : "=m" (dp[0]));
+ asm volatile("vst $vr1, %0" : "=m" (dp[16]));
+ asm volatile("vst $vr2, %0" : "=m" (dp[32]));
+ asm volatile("vst $vr3, %0" : "=m" (dp[48]));
+
+ bytes -= 64;
+ p += 64;
+ q += 64;
+ dp += 64;
+ dq += 64;
+ }
+
+ kernel_fpu_end();
+}
+
+static void raid6_datap_recov_lsx(int disks, size_t bytes, int faila,
+ void **ptrs)
+{
+ u8 *p, *q, *dq;
+ const u8 *qmul; /* Q multiplier table */
+
+ p = (u8 *)ptrs[disks - 2];
+ q = (u8 *)ptrs[disks - 1];
+
+ /*
+ * Compute syndrome with zero for the missing data page
+ * Use the dead data page as temporary storage for delta q
+ */
+ dq = (u8 *)ptrs[faila];
+ ptrs[faila] = (void *)raid6_empty_zero_page;
+ ptrs[disks - 1] = dq;
+
+ raid6_call.gen_syndrome(disks, bytes, ptrs);
+
+ /* Restore pointer table */
+ ptrs[faila] = dq;
+ ptrs[disks - 1] = q;
+
+ /* Now, pick the proper data tables */
+ qmul = raid6_vgfmul[raid6_gfinv[raid6_gfexp[faila]]];
+
+ kernel_fpu_begin();
+
+ /* vr22, vr23: qmul */
+ asm volatile("vld $vr22, %0" : : "m" (qmul[0]));
+ asm volatile("vld $vr23, %0" : : "m" (qmul[16]));
+
+ while (bytes) {
+ /* vr0 - vr3: P + Dx */
+ asm volatile("vld $vr0, %0" : : "m" (p[0]));
+ asm volatile("vld $vr1, %0" : : "m" (p[16]));
+ asm volatile("vld $vr2, %0" : : "m" (p[32]));
+ asm volatile("vld $vr3, %0" : : "m" (p[48]));
+ /* vr4 - vr7: Qx */
+ asm volatile("vld $vr4, %0" : : "m" (dq[0]));
+ asm volatile("vld $vr5, %0" : : "m" (dq[16]));
+ asm volatile("vld $vr6, %0" : : "m" (dq[32]));
+ asm volatile("vld $vr7, %0" : : "m" (dq[48]));
+ /* vr4 - vr7: Q + Qx */
+ asm volatile("vld $vr8, %0" : : "m" (q[0]));
+ asm volatile("vld $vr9, %0" : : "m" (q[16]));
+ asm volatile("vld $vr10, %0" : : "m" (q[32]));
+ asm volatile("vld $vr11, %0" : : "m" (q[48]));
+ asm volatile("vxor.v $vr4, $vr4, $vr8");
+ asm volatile("vxor.v $vr5, $vr5, $vr9");
+ asm volatile("vxor.v $vr6, $vr6, $vr10");
+ asm volatile("vxor.v $vr7, $vr7, $vr11");
+
+ /* vr8 - vr11: higher 4 bits of each byte of (Q + Qx) */
+ asm volatile("vsrli.b $vr8, $vr4, 4");
+ asm volatile("vsrli.b $vr9, $vr5, 4");
+ asm volatile("vsrli.b $vr10, $vr6, 4");
+ asm volatile("vsrli.b $vr11, $vr7, 4");
+ /* vr4 - vr7: lower 4 bits of each byte of (Q + Qx) */
+ asm volatile("vandi.b $vr4, $vr4, 0x0f");
+ asm volatile("vandi.b $vr5, $vr5, 0x0f");
+ asm volatile("vandi.b $vr6, $vr6, 0x0f");
+ asm volatile("vandi.b $vr7, $vr7, 0x0f");
+ /* lookup from qmul[0] */
+ asm volatile("vshuf.b $vr4, $vr22, $vr22, $vr4");
+ asm volatile("vshuf.b $vr5, $vr22, $vr22, $vr5");
+ asm volatile("vshuf.b $vr6, $vr22, $vr22, $vr6");
+ asm volatile("vshuf.b $vr7, $vr22, $vr22, $vr7");
+ /* lookup from qmul[16] */
+ asm volatile("vshuf.b $vr8, $vr23, $vr23, $vr8");
+ asm volatile("vshuf.b $vr9, $vr23, $vr23, $vr9");
+ asm volatile("vshuf.b $vr10, $vr23, $vr23, $vr10");
+ asm volatile("vshuf.b $vr11, $vr23, $vr23, $vr11");
+ /* vr4 - vr7: qmul(Q + Qx) = Dx */
+ asm volatile("vxor.v $vr4, $vr4, $vr8");
+ asm volatile("vxor.v $vr5, $vr5, $vr9");
+ asm volatile("vxor.v $vr6, $vr6, $vr10");
+ asm volatile("vxor.v $vr7, $vr7, $vr11");
+ asm volatile("vst $vr4, %0" : "=m" (dq[0]));
+ asm volatile("vst $vr5, %0" : "=m" (dq[16]));
+ asm volatile("vst $vr6, %0" : "=m" (dq[32]));
+ asm volatile("vst $vr7, %0" : "=m" (dq[48]));
+
+ /* vr0 - vr3: P + Dx + Dx = P */
+ asm volatile("vxor.v $vr0, $vr0, $vr4");
+ asm volatile("vxor.v $vr1, $vr1, $vr5");
+ asm volatile("vxor.v $vr2, $vr2, $vr6");
+ asm volatile("vxor.v $vr3, $vr3, $vr7");
+ asm volatile("vst $vr0, %0" : "=m" (p[0]));
+ asm volatile("vst $vr1, %0" : "=m" (p[16]));
+ asm volatile("vst $vr2, %0" : "=m" (p[32]));
+ asm volatile("vst $vr3, %0" : "=m" (p[48]));
+
+ bytes -= 64;
+ p += 64;
+ q += 64;
+ dq += 64;
+ }
+
+ kernel_fpu_end();
+}
+
+const struct raid6_recov_calls raid6_recov_lsx = {
+ .data2 = raid6_2data_recov_lsx,
+ .datap = raid6_datap_recov_lsx,
+ .valid = raid6_has_lsx,
+ .name = "lsx",
+ .priority = 1,
+};
+#endif /* CONFIG_CPU_HAS_LSX */
+
+#ifdef CONFIG_CPU_HAS_LASX
+static int raid6_has_lasx(void)
+{
+ return cpu_has_lasx;
+}
+
+static void raid6_2data_recov_lasx(int disks, size_t bytes, int faila,
+ int failb, void **ptrs)
+{
+ u8 *p, *q, *dp, *dq;
+ const u8 *pbmul; /* P multiplier table for B data */
+ const u8 *qmul; /* Q multiplier table (for both) */
+
+ p = (u8 *)ptrs[disks - 2];
+ q = (u8 *)ptrs[disks - 1];
+
+ /*
+ * Compute syndrome with zero for the missing data pages
+ * Use the dead data pages as temporary storage for
+ * delta p and delta q
+ */
+ dp = (u8 *)ptrs[faila];
+ ptrs[faila] = (void *)raid6_empty_zero_page;
+ ptrs[disks - 2] = dp;
+ dq = (u8 *)ptrs[failb];
+ ptrs[failb] = (void *)raid6_empty_zero_page;
+ ptrs[disks - 1] = dq;
+
+ raid6_call.gen_syndrome(disks, bytes, ptrs);
+
+ /* Restore pointer table */
+ ptrs[faila] = dp;
+ ptrs[failb] = dq;
+ ptrs[disks - 2] = p;
+ ptrs[disks - 1] = q;
+
+ /* Now, pick the proper data tables */
+ pbmul = raid6_vgfmul[raid6_gfexi[failb - faila]];
+ qmul = raid6_vgfmul[raid6_gfinv[raid6_gfexp[faila] ^ raid6_gfexp[failb]]];
+
+ kernel_fpu_begin();
+
+ /*
+ * xr20, xr21: qmul
+ * xr22, xr23: pbmul
+ */
+ asm volatile("vld $vr20, %0" : : "m" (qmul[0]));
+ asm volatile("vld $vr21, %0" : : "m" (qmul[16]));
+ asm volatile("vld $vr22, %0" : : "m" (pbmul[0]));
+ asm volatile("vld $vr23, %0" : : "m" (pbmul[16]));
+ asm volatile("xvreplve0.q $xr20, $xr20");
+ asm volatile("xvreplve0.q $xr21, $xr21");
+ asm volatile("xvreplve0.q $xr22, $xr22");
+ asm volatile("xvreplve0.q $xr23, $xr23");
+
+ while (bytes) {
+ /* xr0, xr1: Q */
+ asm volatile("xvld $xr0, %0" : : "m" (q[0]));
+ asm volatile("xvld $xr1, %0" : : "m" (q[32]));
+ /* xr0, xr1: Q + Qxy */
+ asm volatile("xvld $xr4, %0" : : "m" (dq[0]));
+ asm volatile("xvld $xr5, %0" : : "m" (dq[32]));
+ asm volatile("xvxor.v $xr0, $xr0, $xr4");
+ asm volatile("xvxor.v $xr1, $xr1, $xr5");
+ /* xr2, xr3: P */
+ asm volatile("xvld $xr2, %0" : : "m" (p[0]));
+ asm volatile("xvld $xr3, %0" : : "m" (p[32]));
+ /* xr2, xr3: P + Pxy */
+ asm volatile("xvld $xr4, %0" : : "m" (dp[0]));
+ asm volatile("xvld $xr5, %0" : : "m" (dp[32]));
+ asm volatile("xvxor.v $xr2, $xr2, $xr4");
+ asm volatile("xvxor.v $xr3, $xr3, $xr5");
+
+ /* xr4, xr5: higher 4 bits of each byte of (Q + Qxy) */
+ asm volatile("xvsrli.b $xr4, $xr0, 4");
+ asm volatile("xvsrli.b $xr5, $xr1, 4");
+ /* xr0, xr1: lower 4 bits of each byte of (Q + Qxy) */
+ asm volatile("xvandi.b $xr0, $xr0, 0x0f");
+ asm volatile("xvandi.b $xr1, $xr1, 0x0f");
+ /* lookup from qmul[0] */
+ asm volatile("xvshuf.b $xr0, $xr20, $xr20, $xr0");
+ asm volatile("xvshuf.b $xr1, $xr20, $xr20, $xr1");
+ /* lookup from qmul[16] */
+ asm volatile("xvshuf.b $xr4, $xr21, $xr21, $xr4");
+ asm volatile("xvshuf.b $xr5, $xr21, $xr21, $xr5");
+ /* xr6, xr7: B(Q + Qxy) */
+ asm volatile("xvxor.v $xr6, $xr4, $xr0");
+ asm volatile("xvxor.v $xr7, $xr5, $xr1");
+
+ /* xr4, xr5: higher 4 bits of each byte of (P + Pxy) */
+ asm volatile("xvsrli.b $xr4, $xr2, 4");
+ asm volatile("xvsrli.b $xr5, $xr3, 4");
+ /* xr0, xr1: lower 4 bits of each byte of (P + Pxy) */
+ asm volatile("xvandi.b $xr0, $xr2, 0x0f");
+ asm volatile("xvandi.b $xr1, $xr3, 0x0f");
+ /* lookup from pbmul[0] */
+ asm volatile("xvshuf.b $xr0, $xr22, $xr22, $xr0");
+ asm volatile("xvshuf.b $xr1, $xr22, $xr22, $xr1");
+ /* lookup from pbmul[16] */
+ asm volatile("xvshuf.b $xr4, $xr23, $xr23, $xr4");
+ asm volatile("xvshuf.b $xr5, $xr23, $xr23, $xr5");
+ /* xr0, xr1: A(P + Pxy) */
+ asm volatile("xvxor.v $xr0, $xr0, $xr4");
+ asm volatile("xvxor.v $xr1, $xr1, $xr5");
+
+ /* xr0, xr1: A(P + Pxy) + B(Q + Qxy) = Dx */
+ asm volatile("xvxor.v $xr0, $xr0, $xr6");
+ asm volatile("xvxor.v $xr1, $xr1, $xr7");
+
+ /* xr2, xr3: P + Pxy + Dx = Dy */
+ asm volatile("xvxor.v $xr2, $xr2, $xr0");
+ asm volatile("xvxor.v $xr3, $xr3, $xr1");
+
+ asm volatile("xvst $xr0, %0" : "=m" (dq[0]));
+ asm volatile("xvst $xr1, %0" : "=m" (dq[32]));
+ asm volatile("xvst $xr2, %0" : "=m" (dp[0]));
+ asm volatile("xvst $xr3, %0" : "=m" (dp[32]));
+
+ bytes -= 64;
+ p += 64;
+ q += 64;
+ dp += 64;
+ dq += 64;
+ }
+
+ kernel_fpu_end();
+}
+
+static void raid6_datap_recov_lasx(int disks, size_t bytes, int faila,
+ void **ptrs)
+{
+ u8 *p, *q, *dq;
+ const u8 *qmul; /* Q multiplier table */
+
+ p = (u8 *)ptrs[disks - 2];
+ q = (u8 *)ptrs[disks - 1];
+
+ /*
+ * Compute syndrome with zero for the missing data page
+ * Use the dead data page as temporary storage for delta q
+ */
+ dq = (u8 *)ptrs[faila];
+ ptrs[faila] = (void *)raid6_empty_zero_page;
+ ptrs[disks - 1] = dq;
+
+ raid6_call.gen_syndrome(disks, bytes, ptrs);
+
+ /* Restore pointer table */
+ ptrs[faila] = dq;
+ ptrs[disks - 1] = q;
+
+ /* Now, pick the proper data tables */
+ qmul = raid6_vgfmul[raid6_gfinv[raid6_gfexp[faila]]];
+
+ kernel_fpu_begin();
+
+ /* xr22, xr23: qmul */
+ asm volatile("vld $vr22, %0" : : "m" (qmul[0]));
+ asm volatile("xvreplve0.q $xr22, $xr22");
+ asm volatile("vld $vr23, %0" : : "m" (qmul[16]));
+ asm volatile("xvreplve0.q $xr23, $xr23");
+
+ while (bytes) {
+ /* xr0, xr1: P + Dx */
+ asm volatile("xvld $xr0, %0" : : "m" (p[0]));
+ asm volatile("xvld $xr1, %0" : : "m" (p[32]));
+ /* xr2, xr3: Qx */
+ asm volatile("xvld $xr2, %0" : : "m" (dq[0]));
+ asm volatile("xvld $xr3, %0" : : "m" (dq[32]));
+ /* xr2, xr3: Q + Qx */
+ asm volatile("xvld $xr4, %0" : : "m" (q[0]));
+ asm volatile("xvld $xr5, %0" : : "m" (q[32]));
+ asm volatile("xvxor.v $xr2, $xr2, $xr4");
+ asm volatile("xvxor.v $xr3, $xr3, $xr5");
+
+ /* xr4, xr5: higher 4 bits of each byte of (Q + Qx) */
+ asm volatile("xvsrli.b $xr4, $xr2, 4");
+ asm volatile("xvsrli.b $xr5, $xr3, 4");
+ /* xr2, xr3: lower 4 bits of each byte of (Q + Qx) */
+ asm volatile("xvandi.b $xr2, $xr2, 0x0f");
+ asm volatile("xvandi.b $xr3, $xr3, 0x0f");
+ /* lookup from qmul[0] */
+ asm volatile("xvshuf.b $xr2, $xr22, $xr22, $xr2");
+ asm volatile("xvshuf.b $xr3, $xr22, $xr22, $xr3");
+ /* lookup from qmul[16] */
+ asm volatile("xvshuf.b $xr4, $xr23, $xr23, $xr4");
+ asm volatile("xvshuf.b $xr5, $xr23, $xr23, $xr5");
+ /* xr2, xr3: qmul(Q + Qx) = Dx */
+ asm volatile("xvxor.v $xr2, $xr2, $xr4");
+ asm volatile("xvxor.v $xr3, $xr3, $xr5");
+
+ /* xr0, xr1: P + Dx + Dx = P */
+ asm volatile("xvxor.v $xr0, $xr0, $xr2");
+ asm volatile("xvxor.v $xr1, $xr1, $xr3");
+
+ asm volatile("xvst $xr2, %0" : "=m" (dq[0]));
+ asm volatile("xvst $xr3, %0" : "=m" (dq[32]));
+ asm volatile("xvst $xr0, %0" : "=m" (p[0]));
+ asm volatile("xvst $xr1, %0" : "=m" (p[32]));
+
+ bytes -= 64;
+ p += 64;
+ q += 64;
+ dq += 64;
+ }
+
+ kernel_fpu_end();
+}
+
+const struct raid6_recov_calls raid6_recov_lasx = {
+ .data2 = raid6_2data_recov_lasx,
+ .datap = raid6_datap_recov_lasx,
+ .valid = raid6_has_lasx,
+ .name = "lasx",
+ .priority = 2,
+};
+#endif /* CONFIG_CPU_HAS_LASX */
diff --git a/lib/raid6/test/Makefile b/lib/raid6/test/Makefile
index 1f693ea3b980..2abe0076a636 100644
--- a/lib/raid6/test/Makefile
+++ b/lib/raid6/test/Makefile
@@ -41,6 +41,16 @@ ifeq ($(findstring ppc,$(ARCH)),ppc)
gcc -c -x c - >/dev/null && rm ./-.o && echo yes)
endif
+ifeq ($(ARCH),loongarch64)
+ CFLAGS += -I../../../arch/loongarch/include -DCONFIG_LOONGARCH=1
+ CFLAGS += $(shell echo 'vld $$vr0, $$zero, 0' | \
+ gcc -c -x assembler - >/dev/null 2>&1 && \
+ rm ./-.o && echo -DCONFIG_CPU_HAS_LSX=1)
+ CFLAGS += $(shell echo 'xvld $$xr0, $$zero, 0' | \
+ gcc -c -x assembler - >/dev/null 2>&1 && \
+ rm ./-.o && echo -DCONFIG_CPU_HAS_LASX=1)
+endif
+
ifeq ($(IS_X86),yes)
OBJS += mmx.o sse1.o sse2.o avx2.o recov_ssse3.o recov_avx2.o avx512.o recov_avx512.o
CFLAGS += -DCONFIG_X86
@@ -54,6 +64,8 @@ else ifeq ($(HAS_ALTIVEC),yes)
CFLAGS += -DCONFIG_ALTIVEC
OBJS += altivec1.o altivec2.o altivec4.o altivec8.o \
vpermxor1.o vpermxor2.o vpermxor4.o vpermxor8.o
+else ifeq ($(ARCH),loongarch64)
+ OBJS += loongarch_simd.o recov_loongarch_simd.o
endif
.c.o:
diff --git a/lib/xarray.c b/lib/xarray.c
index 2071a3718f4e..39f07bfc4dcc 100644
--- a/lib/xarray.c
+++ b/lib/xarray.c
@@ -206,7 +206,7 @@ static void *xas_descend(struct xa_state *xas, struct xa_node *node)
void *entry = xa_entry(xas->xa, node, offset);
xas->xa_node = node;
- if (xa_is_sibling(entry)) {
+ while (xa_is_sibling(entry)) {
offset = xa_to_sibling(entry);
entry = xa_entry(xas->xa, node, offset);
if (node->shift && xa_is_node(entry))
@@ -1802,6 +1802,9 @@ EXPORT_SYMBOL(xa_get_order);
* stores the index into the @id pointer, then stores the entry at
* that index. A concurrent lookup will not see an uninitialised @id.
*
+ * Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set
+ * in xa_init_flags().
+ *
* Context: Any context. Expects xa_lock to be held on entry. May
* release and reacquire xa_lock if @gfp flags permit.
* Return: 0 on success, -ENOMEM if memory could not be allocated or
@@ -1850,6 +1853,9 @@ EXPORT_SYMBOL(__xa_alloc);
* The search for an empty entry will start at @next and will wrap
* around if necessary.
*
+ * Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set
+ * in xa_init_flags().
+ *
* Context: Any context. Expects xa_lock to be held on entry. May
* release and reacquire xa_lock if @gfp flags permit.
* Return: 0 if the allocation succeeded without wrapping. 1 if the
diff --git a/mm/kasan/init.c b/mm/kasan/init.c
index dcfec277e839..89895f38f722 100644
--- a/mm/kasan/init.c
+++ b/mm/kasan/init.c
@@ -139,6 +139,10 @@ static int __ref zero_pmd_populate(pud_t *pud, unsigned long addr,
return 0;
}
+void __weak __meminit pmd_init(void *addr)
+{
+}
+
static int __ref zero_pud_populate(p4d_t *p4d, unsigned long addr,
unsigned long end)
{
@@ -166,8 +170,9 @@ static int __ref zero_pud_populate(p4d_t *p4d, unsigned long addr,
if (!p)
return -ENOMEM;
} else {
- pud_populate(&init_mm, pud,
- early_alloc(PAGE_SIZE, NUMA_NO_NODE));
+ p = early_alloc(PAGE_SIZE, NUMA_NO_NODE);
+ pmd_init(p);
+ pud_populate(&init_mm, pud, p);
}
}
zero_pmd_populate(pud, addr, next);
@@ -176,6 +181,10 @@ static int __ref zero_pud_populate(p4d_t *p4d, unsigned long addr,
return 0;
}
+void __weak __meminit pud_init(void *addr)
+{
+}
+
static int __ref zero_p4d_populate(pgd_t *pgd, unsigned long addr,
unsigned long end)
{
@@ -207,8 +216,9 @@ static int __ref zero_p4d_populate(pgd_t *pgd, unsigned long addr,
if (!p)
return -ENOMEM;
} else {
- p4d_populate(&init_mm, p4d,
- early_alloc(PAGE_SIZE, NUMA_NO_NODE));
+ p = early_alloc(PAGE_SIZE, NUMA_NO_NODE);
+ pud_init(p);
+ p4d_populate(&init_mm, p4d, p);
}
}
zero_pud_populate(p4d, addr, next);
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index 2e973b36fe07..f70e3d7a602e 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -291,16 +291,22 @@ struct kasan_stack_ring {
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
+#ifndef __HAVE_ARCH_SHADOW_MAP
static inline const void *kasan_shadow_to_mem(const void *shadow_addr)
{
return (void *)(((unsigned long)shadow_addr - KASAN_SHADOW_OFFSET)
<< KASAN_SHADOW_SCALE_SHIFT);
}
+#endif
static __always_inline bool addr_has_metadata(const void *addr)
{
+#ifdef __HAVE_ARCH_SHADOW_MAP
+ return (kasan_mem_to_shadow((void *)addr) != NULL);
+#else
return (kasan_reset_tag(addr) >=
kasan_shadow_to_mem((void *)KASAN_SHADOW_START));
+#endif
}
/**
diff --git a/mm/kfence/core.c b/mm/kfence/core.c
index 96fd0411f5c5..3872528d0963 100644
--- a/mm/kfence/core.c
+++ b/mm/kfence/core.c
@@ -574,13 +574,14 @@ static void rcu_guarded_free(struct rcu_head *h)
*/
static unsigned long kfence_init_pool(void)
{
- unsigned long addr = (unsigned long)__kfence_pool;
+ unsigned long addr;
struct page *pages;
int i;
if (!arch_kfence_init_pool())
- return addr;
+ return (unsigned long)__kfence_pool;
+ addr = (unsigned long)__kfence_pool;
pages = virt_to_page(__kfence_pool);
/*
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 7876b7d703cb..c32f5e28758b 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -815,41 +815,45 @@ static bool inet_bind2_bucket_match(const struct inet_bind2_bucket *tb,
const struct net *net, unsigned short port,
int l3mdev, const struct sock *sk)
{
+ if (!net_eq(ib2_net(tb), net) || tb->port != port ||
+ tb->l3mdev != l3mdev)
+ return false;
+
#if IS_ENABLED(CONFIG_IPV6)
- if (sk->sk_family != tb->family)
+ if (sk->sk_family != tb->family) {
+ if (sk->sk_family == AF_INET)
+ return ipv6_addr_v4mapped(&tb->v6_rcv_saddr) &&
+ tb->v6_rcv_saddr.s6_addr32[3] == sk->sk_rcv_saddr;
+
return false;
+ }
if (sk->sk_family == AF_INET6)
- return net_eq(ib2_net(tb), net) && tb->port == port &&
- tb->l3mdev == l3mdev &&
- ipv6_addr_equal(&tb->v6_rcv_saddr, &sk->sk_v6_rcv_saddr);
- else
+ return ipv6_addr_equal(&tb->v6_rcv_saddr, &sk->sk_v6_rcv_saddr);
#endif
- return net_eq(ib2_net(tb), net) && tb->port == port &&
- tb->l3mdev == l3mdev && tb->rcv_saddr == sk->sk_rcv_saddr;
+ return tb->rcv_saddr == sk->sk_rcv_saddr;
}
bool inet_bind2_bucket_match_addr_any(const struct inet_bind2_bucket *tb, const struct net *net,
unsigned short port, int l3mdev, const struct sock *sk)
{
+ if (!net_eq(ib2_net(tb), net) || tb->port != port ||
+ tb->l3mdev != l3mdev)
+ return false;
+
#if IS_ENABLED(CONFIG_IPV6)
if (sk->sk_family != tb->family) {
if (sk->sk_family == AF_INET)
- return net_eq(ib2_net(tb), net) && tb->port == port &&
- tb->l3mdev == l3mdev &&
- ipv6_addr_any(&tb->v6_rcv_saddr);
+ return ipv6_addr_any(&tb->v6_rcv_saddr) ||
+ ipv6_addr_v4mapped_any(&tb->v6_rcv_saddr);
return false;
}
if (sk->sk_family == AF_INET6)
- return net_eq(ib2_net(tb), net) && tb->port == port &&
- tb->l3mdev == l3mdev &&
- ipv6_addr_any(&tb->v6_rcv_saddr);
- else
+ return ipv6_addr_any(&tb->v6_rcv_saddr);
#endif
- return net_eq(ib2_net(tb), net) && tb->port == port &&
- tb->l3mdev == l3mdev && tb->rcv_saddr == 0;
+ return tb->rcv_saddr == 0;
}
/* The socket's bhash2 hashbucket spinlock must be held when this is called */
diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
index 740539a218b7..dd1d8ffd5f59 100644
--- a/net/kcm/kcmsock.c
+++ b/net/kcm/kcmsock.c
@@ -930,17 +930,18 @@ partial_message:
out_error:
kcm_push(kcm);
- if (copied && sock->type == SOCK_SEQPACKET) {
+ if (sock->type == SOCK_SEQPACKET) {
/* Wrote some bytes before encountering an
* error, return partial success.
*/
- goto partial_message;
- }
-
- if (head != kcm->seq_skb)
+ if (copied)
+ goto partial_message;
+ if (head != kcm->seq_skb)
+ kfree_skb(head);
+ } else {
kfree_skb(head);
- else if (copied)
- kcm_tx_msg(head)->last_skb = skb;
+ kcm->seq_skb = NULL;
+ }
err = sk_stream_error(sk, msg->msg_flags, err);
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index 1ed4a611631f..d1fc295b83b5 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -817,7 +817,7 @@ static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk,
psock = sk_psock_get(sk);
if (!psock || !policy) {
err = tls_push_record(sk, flags, record_type);
- if (err && sk->sk_err == EBADMSG) {
+ if (err && err != -EINPROGRESS && sk->sk_err == EBADMSG) {
*copied -= sk_msg_free(sk, msg);
tls_free_open_rec(sk);
err = -sk->sk_err;
@@ -846,7 +846,7 @@ more_data:
switch (psock->eval) {
case __SK_PASS:
err = tls_push_record(sk, flags, record_type);
- if (err && sk->sk_err == EBADMSG) {
+ if (err && err != -EINPROGRESS && sk->sk_err == EBADMSG) {
*copied -= sk_msg_free(sk, msg);
tls_free_open_rec(sk);
err = -sk->sk_err;
diff --git a/scripts/headers_install.sh b/scripts/headers_install.sh
index afdddc82f02b..56d3c338d91d 100755
--- a/scripts/headers_install.sh
+++ b/scripts/headers_install.sh
@@ -81,7 +81,6 @@ arch/nios2/include/uapi/asm/swab.h:CONFIG_NIOS2_CI_SWAB_NO
arch/nios2/include/uapi/asm/swab.h:CONFIG_NIOS2_CI_SWAB_SUPPORT
arch/x86/include/uapi/asm/auxvec.h:CONFIG_IA32_EMULATION
arch/x86/include/uapi/asm/auxvec.h:CONFIG_X86_64
-arch/x86/include/uapi/asm/mman.h:CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
"
for c in $configs
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index 34a5386d444a..de499dce5265 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -1228,6 +1228,15 @@ static void check_export_symbol(struct module *mod, struct elf_info *elf,
*/
s->is_func = (ELF_ST_TYPE(sym->st_info) == STT_FUNC);
+ /*
+ * For parisc64, symbols prefixed $$ from the library have the symbol type
+ * STT_LOPROC. They should be handled as functions too.
+ */
+ if (elf->hdr->e_ident[EI_CLASS] == ELFCLASS64 &&
+ elf->hdr->e_machine == EM_PARISC &&
+ ELF_ST_TYPE(sym->st_info) == STT_LOPROC)
+ s->is_func = true;
+
if (match(secname, PATTERNS(INIT_SECTIONS)))
warn("%s: %s: EXPORT_SYMBOL used for init symbol. Remove __init or EXPORT_SYMBOL.\n",
mod->name, name);
diff --git a/security/landlock/ruleset.h b/security/landlock/ruleset.h
index d43231b783e4..55b1df8f66a8 100644
--- a/security/landlock/ruleset.h
+++ b/security/landlock/ruleset.h
@@ -67,7 +67,7 @@ struct landlock_rule {
* @layers: Stack of layers, from the latest to the newest, implemented
* as a flexible array member (FAM).
*/
- struct landlock_layer layers[];
+ struct landlock_layer layers[] __counted_by(num_layers);
};
/**
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index 4859fb1caec9..a11cd7d6295f 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -1992,8 +1992,8 @@ static int default_write_copy(struct snd_pcm_substream *substream,
int channel, unsigned long hwoff,
struct iov_iter *iter, unsigned long bytes)
{
- if (!copy_from_iter(get_dma_ptr(substream->runtime, channel, hwoff),
- bytes, iter))
+ if (copy_from_iter(get_dma_ptr(substream->runtime, channel, hwoff),
+ bytes, iter) != bytes)
return -EFAULT;
return 0;
}
@@ -2025,8 +2025,8 @@ static int default_read_copy(struct snd_pcm_substream *substream,
int channel, unsigned long hwoff,
struct iov_iter *iter, unsigned long bytes)
{
- if (!copy_to_iter(get_dma_ptr(substream->runtime, channel, hwoff),
- bytes, iter))
+ if (copy_to_iter(get_dma_ptr(substream->runtime, channel, hwoff),
+ bytes, iter) != bytes)
return -EFAULT;
return 0;
}
diff --git a/sound/core/seq/seq_memory.c b/sound/core/seq/seq_memory.c
index 174585bf59d2..b603bb93f896 100644
--- a/sound/core/seq/seq_memory.c
+++ b/sound/core/seq/seq_memory.c
@@ -187,8 +187,13 @@ int snd_seq_expand_var_event(const struct snd_seq_event *event, int count, char
err = expand_var_event(event, 0, len, buf, in_kernel);
if (err < 0)
return err;
- if (len != newlen)
- memset(buf + len, 0, newlen - len);
+ if (len != newlen) {
+ if (in_kernel)
+ memset(buf + len, 0, newlen - len);
+ else if (clear_user((__force void __user *)buf + len,
+ newlen - len))
+ return -EFAULT;
+ }
return newlen;
}
EXPORT_SYMBOL(snd_seq_expand_var_event);
diff --git a/sound/isa/sb/emu8000_pcm.c b/sound/isa/sb/emu8000_pcm.c
index c05935c2edc4..9234d4fe8ada 100644
--- a/sound/isa/sb/emu8000_pcm.c
+++ b/sound/isa/sb/emu8000_pcm.c
@@ -456,7 +456,7 @@ static int emu8k_pcm_silence(struct snd_pcm_substream *subs,
/* convert to word unit */
pos = (pos << 1) + rec->loop_start[voice];
count <<= 1;
- LOOP_WRITE(rec, pos, USER_SOCKPTR(NULL), count);
+ LOOP_WRITE(rec, pos, NULL, count);
return 0;
}
diff --git a/sound/pci/hda/patch_cs8409.c b/sound/pci/hda/patch_cs8409.c
index 0ba1fbcbb21e..627899959ffe 100644
--- a/sound/pci/hda/patch_cs8409.c
+++ b/sound/pci/hda/patch_cs8409.c
@@ -888,7 +888,7 @@ static void cs42l42_resume(struct sub_codec *cs42l42)
/* Initialize CS42L42 companion codec */
cs8409_i2c_bulk_write(cs42l42, cs42l42->init_seq, cs42l42->init_seq_num);
- usleep_range(30000, 35000);
+ msleep(CS42L42_INIT_TIMEOUT_MS);
/* Clear interrupts, by reading interrupt status registers */
cs8409_i2c_bulk_read(cs42l42, irq_regs, ARRAY_SIZE(irq_regs));
diff --git a/sound/pci/hda/patch_cs8409.h b/sound/pci/hda/patch_cs8409.h
index 2a8dfb4ff046..937e9387abdc 100644
--- a/sound/pci/hda/patch_cs8409.h
+++ b/sound/pci/hda/patch_cs8409.h
@@ -229,6 +229,7 @@ enum cs8409_coefficient_index_registers {
#define CS42L42_I2C_SLEEP_US (2000)
#define CS42L42_PDN_TIMEOUT_US (250000)
#define CS42L42_PDN_SLEEP_US (2000)
+#define CS42L42_INIT_TIMEOUT_MS (45)
#define CS42L42_FULL_SCALE_VOL_MASK (2)
#define CS42L42_FULL_SCALE_VOL_0DB (1)
#define CS42L42_FULL_SCALE_VOL_MINUS6DB (0)
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index a07df6f92960..b7e78bfcffd8 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -7057,6 +7057,27 @@ static void alc295_fixup_dell_inspiron_top_speakers(struct hda_codec *codec,
}
}
+/* Forcibly assign NID 0x03 to HP while NID 0x02 to SPK */
+static void alc287_fixup_bind_dacs(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+{
+ struct alc_spec *spec = codec->spec;
+ static const hda_nid_t conn[] = { 0x02, 0x03 }; /* exclude 0x06 */
+ static const hda_nid_t preferred_pairs[] = {
+ 0x17, 0x02, 0x21, 0x03, 0
+ };
+
+ if (action != HDA_FIXUP_ACT_PRE_PROBE)
+ return;
+
+ snd_hda_override_conn_list(codec, 0x17, ARRAY_SIZE(conn), conn);
+ spec->gen.preferred_dacs = preferred_pairs;
+ spec->gen.auto_mute_via_amp = 1;
+ snd_hda_codec_write_cache(codec, 0x14, 0, AC_VERB_SET_PIN_WIDGET_CONTROL,
+ 0x0); /* Make sure 0x14 was disable */
+}
+
+
enum {
ALC269_FIXUP_GPIO2,
ALC269_FIXUP_SONY_VAIO,
@@ -7319,6 +7340,7 @@ enum {
ALC287_FIXUP_TAS2781_I2C,
ALC245_FIXUP_HP_MUTE_LED_COEFBIT,
ALC245_FIXUP_HP_X360_MUTE_LEDS,
+ ALC287_FIXUP_THINKPAD_I2S_SPK,
};
/* A special fixup for Lenovo C940 and Yoga Duet 7;
@@ -9413,6 +9435,10 @@ static const struct hda_fixup alc269_fixups[] = {
.chained = true,
.chain_id = ALC245_FIXUP_HP_GPIO_LED
},
+ [ALC287_FIXUP_THINKPAD_I2S_SPK] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc287_fixup_bind_dacs,
+ },
};
static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -10544,6 +10570,10 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
{0x17, 0x90170111},
{0x19, 0x03a11030},
{0x21, 0x03211020}),
+ SND_HDA_PIN_QUIRK(0x10ec0287, 0x17aa, "Lenovo", ALC287_FIXUP_THINKPAD_I2S_SPK,
+ {0x17, 0x90170110},
+ {0x19, 0x03a11030},
+ {0x21, 0x03211020}),
SND_HDA_PIN_QUIRK(0x10ec0286, 0x1025, "Acer", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE,
{0x12, 0x90a60130},
{0x17, 0x90170110},
diff --git a/sound/pci/hda/tas2781_hda_i2c.c b/sound/pci/hda/tas2781_hda_i2c.c
index 37114fd61a38..fb802802939e 100644
--- a/sound/pci/hda/tas2781_hda_i2c.c
+++ b/sound/pci/hda/tas2781_hda_i2c.c
@@ -173,16 +173,6 @@ static int tasdevice_get_profile_id(struct snd_kcontrol *kcontrol,
return 0;
}
-static int tasdevice_hda_clamp(int val, int max)
-{
- if (val > max)
- val = max;
-
- if (val < 0)
- val = 0;
- return val;
-}
-
static int tasdevice_set_profile_id(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
@@ -191,7 +181,7 @@ static int tasdevice_set_profile_id(struct snd_kcontrol *kcontrol,
int max = tas_priv->rcabin.ncfgs - 1;
int val, ret = 0;
- val = tasdevice_hda_clamp(nr_profile, max);
+ val = clamp(nr_profile, 0, max);
if (tas_priv->rcabin.profile_cfg_id != val) {
tas_priv->rcabin.profile_cfg_id = val;
@@ -248,7 +238,7 @@ static int tasdevice_program_put(struct snd_kcontrol *kcontrol,
int max = tas_fw->nr_programs - 1;
int val, ret = 0;
- val = tasdevice_hda_clamp(nr_program, max);
+ val = clamp(nr_program, 0, max);
if (tas_priv->cur_prog != val) {
tas_priv->cur_prog = val;
@@ -277,7 +267,7 @@ static int tasdevice_config_put(struct snd_kcontrol *kcontrol,
int max = tas_fw->nr_configurations - 1;
int val, ret = 0;
- val = tasdevice_hda_clamp(nr_config, max);
+ val = clamp(nr_config, 0, max);
if (tas_priv->cur_conf != val) {
tas_priv->cur_conf = val;
diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c
index b304b3562c82..3ec15b46fa35 100644
--- a/sound/soc/amd/yc/acp6x-mach.c
+++ b/sound/soc/amd/yc/acp6x-mach.c
@@ -217,6 +217,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
.driver_data = &acp6x_card,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "82TL"),
+ }
+ },
+ {
+ .driver_data = &acp6x_card,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "82V2"),
}
},
@@ -328,6 +335,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
{
.driver_data = &acp6x_card,
.matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "HP"),
+ DMI_MATCH(DMI_BOARD_NAME, "8A3E"),
+ }
+ },
+ {
+ .driver_data = &acp6x_card,
+ .matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "MECHREVO"),
DMI_MATCH(DMI_BOARD_NAME, "MRID6"),
}
diff --git a/sound/soc/atmel/mchp-pdmc.c b/sound/soc/atmel/mchp-pdmc.c
index afe213a71212..dcc4e14b3dde 100644
--- a/sound/soc/atmel/mchp-pdmc.c
+++ b/sound/soc/atmel/mchp-pdmc.c
@@ -954,7 +954,7 @@ static int mchp_pdmc_dt_init(struct mchp_pdmc *dd)
/* used to clean the channel index found on RHR's MSB */
static int mchp_pdmc_process(struct snd_pcm_substream *substream,
int channel, unsigned long hwoff,
- struct iov_iter *buf, unsigned long bytes)
+ unsigned long bytes)
{
struct snd_pcm_runtime *runtime = substream->runtime;
u8 *dma_ptr = runtime->dma_area + hwoff +
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index 95b5bd883215..f1e1dbc509f6 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -1968,11 +1968,15 @@ config SND_SOC_UDA1380
tristate
depends on I2C
+config SND_SOC_WCD_CLASSH
+ tristate
+
config SND_SOC_WCD9335
tristate "WCD9335 Codec"
depends on SLIMBUS
select REGMAP_SLIMBUS
select REGMAP_IRQ
+ select SND_SOC_WCD_CLASSH
help
The WCD9335 is a standalone Hi-Fi audio CODEC IC, supports
Qualcomm Technologies, Inc. (QTI) multimedia solutions,
@@ -1987,6 +1991,7 @@ config SND_SOC_WCD934X
depends on SLIMBUS
select REGMAP_IRQ
select REGMAP_SLIMBUS
+ select SND_SOC_WCD_CLASSH
select SND_SOC_WCD_MBHC
depends on MFD_WCD934X || COMPILE_TEST
help
@@ -1997,6 +2002,7 @@ config SND_SOC_WCD938X
depends on SND_SOC_WCD938X_SDW
tristate
depends on SOUNDWIRE || !SOUNDWIRE
+ select SND_SOC_WCD_CLASSH
config SND_SOC_WCD938X_SDW
tristate "WCD9380/WCD9385 Codec - SDW"
diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile
index c8502a49b40a..a87e56938ce5 100644
--- a/sound/soc/codecs/Makefile
+++ b/sound/soc/codecs/Makefile
@@ -303,10 +303,11 @@ snd-soc-twl4030-objs := twl4030.o
snd-soc-twl6040-objs := twl6040.o
snd-soc-uda1334-objs := uda1334.o
snd-soc-uda1380-objs := uda1380.o
+snd-soc-wcd-classh-objs := wcd-clsh-v2.o
snd-soc-wcd-mbhc-objs := wcd-mbhc-v2.o
-snd-soc-wcd9335-objs := wcd-clsh-v2.o wcd9335.o
-snd-soc-wcd934x-objs := wcd-clsh-v2.o wcd934x.o
-snd-soc-wcd938x-objs := wcd938x.o wcd-clsh-v2.o
+snd-soc-wcd9335-objs := wcd9335.o
+snd-soc-wcd934x-objs := wcd934x.o
+snd-soc-wcd938x-objs := wcd938x.o
snd-soc-wcd938x-sdw-objs := wcd938x-sdw.o
snd-soc-wl1273-objs := wl1273.o
snd-soc-wm-adsp-objs := wm_adsp.o
@@ -685,6 +686,7 @@ obj-$(CONFIG_SND_SOC_TWL4030) += snd-soc-twl4030.o
obj-$(CONFIG_SND_SOC_TWL6040) += snd-soc-twl6040.o
obj-$(CONFIG_SND_SOC_UDA1334) += snd-soc-uda1334.o
obj-$(CONFIG_SND_SOC_UDA1380) += snd-soc-uda1380.o
+obj-$(CONFIG_SND_SOC_WCD_CLASSH) += snd-soc-wcd-classh.o
obj-$(CONFIG_SND_SOC_WCD_MBHC) += snd-soc-wcd-mbhc.o
obj-$(CONFIG_SND_SOC_WCD9335) += snd-soc-wcd9335.o
obj-$(CONFIG_SND_SOC_WCD934X) += snd-soc-wcd934x.o
diff --git a/sound/soc/codecs/cs35l45.c b/sound/soc/codecs/cs35l45.c
index d1edb9876c10..be4f4229576c 100644
--- a/sound/soc/codecs/cs35l45.c
+++ b/sound/soc/codecs/cs35l45.c
@@ -279,7 +279,7 @@ static const struct snd_kcontrol_new cs35l45_dsp_muxes[] = {
};
static const struct snd_kcontrol_new cs35l45_dac_muxes[] = {
- SOC_DAPM_ENUM("DACPCM1 Source", cs35l45_dacpcm_enums[0]),
+ SOC_DAPM_ENUM("DACPCM Source", cs35l45_dacpcm_enums[0]),
};
static const struct snd_soc_dapm_widget cs35l45_dapm_widgets[] = {
@@ -333,7 +333,7 @@ static const struct snd_soc_dapm_widget cs35l45_dapm_widgets[] = {
SND_SOC_DAPM_MUX("DSP_RX7 Source", SND_SOC_NOPM, 0, 0, &cs35l45_dsp_muxes[6]),
SND_SOC_DAPM_MUX("DSP_RX8 Source", SND_SOC_NOPM, 0, 0, &cs35l45_dsp_muxes[7]),
- SND_SOC_DAPM_MUX("DACPCM1 Source", SND_SOC_NOPM, 0, 0, &cs35l45_dac_muxes[0]),
+ SND_SOC_DAPM_MUX("DACPCM Source", SND_SOC_NOPM, 0, 0, &cs35l45_dac_muxes[0]),
SND_SOC_DAPM_OUT_DRV("AMP", SND_SOC_NOPM, 0, 0, NULL, 0),
@@ -403,7 +403,7 @@ static const struct snd_soc_dapm_route cs35l45_dapm_routes[] = {
{ "ASP_RX1", NULL, "ASP_EN" },
{ "ASP_RX2", NULL, "ASP_EN" },
- { "AMP", NULL, "DACPCM1 Source"},
+ { "AMP", NULL, "DACPCM Source"},
{ "AMP", NULL, "GLOBAL_EN"},
CS35L45_DSP_MUX_ROUTE("DSP_RX1"),
@@ -427,7 +427,7 @@ static const struct snd_soc_dapm_route cs35l45_dapm_routes[] = {
{"DSP1 Preload", NULL, "DSP1 Preloader"},
{"DSP1", NULL, "DSP1 Preloader"},
- CS35L45_DAC_MUX_ROUTE("DACPCM1"),
+ CS35L45_DAC_MUX_ROUTE("DACPCM"),
{ "SPK", NULL, "AMP"},
};
@@ -969,7 +969,7 @@ static irqreturn_t cs35l45_dsp_virt2_mbox_cb(int irq, void *data)
ret = regmap_read(cs35l45->regmap, CS35L45_DSP_VIRT2_MBOX_3, &mbox_val);
if (!ret && mbox_val)
- ret = cs35l45_dsp_virt2_mbox3_irq_handle(cs35l45, mbox_val & CS35L45_MBOX3_CMD_MASK,
+ cs35l45_dsp_virt2_mbox3_irq_handle(cs35l45, mbox_val & CS35L45_MBOX3_CMD_MASK,
(mbox_val & CS35L45_MBOX3_DATA_MASK) >> CS35L45_MBOX3_DATA_SHIFT);
/* Handle DSP trace log IRQ */
@@ -1078,6 +1078,7 @@ static int cs35l45_initialize(struct cs35l45_private *cs35l45)
switch (dev_id[0]) {
case 0x35A450:
+ case 0x35A460:
break;
default:
dev_err(cs35l45->dev, "Bad DEVID 0x%x\n", dev_id[0]);
diff --git a/sound/soc/codecs/cs35l56-shared.c b/sound/soc/codecs/cs35l56-shared.c
index ae373f335ea8..98b1e63360ae 100644
--- a/sound/soc/codecs/cs35l56-shared.c
+++ b/sound/soc/codecs/cs35l56-shared.c
@@ -243,26 +243,27 @@ int cs35l56_wait_for_firmware_boot(struct cs35l56_base *cs35l56_base)
{
unsigned int reg;
unsigned int val;
- int ret;
+ int read_ret, poll_ret;
if (cs35l56_base->rev < CS35L56_REVID_B0)
reg = CS35L56_DSP1_HALO_STATE_A1;
else
reg = CS35L56_DSP1_HALO_STATE;
- ret = regmap_read_poll_timeout(cs35l56_base->regmap, reg,
- val,
- (val < 0xFFFF) && (val >= CS35L56_HALO_STATE_BOOT_DONE),
- CS35L56_HALO_STATE_POLL_US,
- CS35L56_HALO_STATE_TIMEOUT_US);
-
- if ((ret < 0) && (ret != -ETIMEDOUT)) {
- dev_err(cs35l56_base->dev, "Failed to read HALO_STATE: %d\n", ret);
- return ret;
- }
-
- if ((ret == -ETIMEDOUT) || (val != CS35L56_HALO_STATE_BOOT_DONE)) {
- dev_err(cs35l56_base->dev, "Firmware boot fail: HALO_STATE=%#x\n", val);
+ /*
+ * This can't be a regmap_read_poll_timeout() because cs35l56 will NAK
+ * I2C until it has booted which would terminate the poll
+ */
+ poll_ret = read_poll_timeout(regmap_read, read_ret,
+ (val < 0xFFFF) && (val >= CS35L56_HALO_STATE_BOOT_DONE),
+ CS35L56_HALO_STATE_POLL_US,
+ CS35L56_HALO_STATE_TIMEOUT_US,
+ false,
+ cs35l56_base->regmap, reg, &val);
+
+ if (poll_ret) {
+ dev_err(cs35l56_base->dev, "Firmware boot timed out(%d): HALO_STATE=%#x\n",
+ read_ret, val);
return -EIO;
}
diff --git a/sound/soc/codecs/cs42l43.c b/sound/soc/codecs/cs42l43.c
index 24e718e51174..1a95c370fc4c 100644
--- a/sound/soc/codecs/cs42l43.c
+++ b/sound/soc/codecs/cs42l43.c
@@ -2205,7 +2205,8 @@ static int cs42l43_codec_probe(struct platform_device *pdev)
// Don't use devm as we need to get against the MFD device
priv->mclk = clk_get_optional(cs42l43->dev, "mclk");
if (IS_ERR(priv->mclk)) {
- dev_err_probe(priv->dev, PTR_ERR(priv->mclk), "Failed to get mclk\n");
+ ret = PTR_ERR(priv->mclk);
+ dev_err_probe(priv->dev, ret, "Failed to get mclk\n");
goto err_pm;
}
diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
index 038d93e20883..1a137ca3f496 100644
--- a/sound/soc/codecs/rt5645.c
+++ b/sound/soc/codecs/rt5645.c
@@ -3269,13 +3269,17 @@ static int rt5645_component_set_jack(struct snd_soc_component *component,
{
struct snd_soc_jack *mic_jack = NULL;
struct snd_soc_jack *btn_jack = NULL;
- int *type = (int *)data;
+ int type;
- if (*type & SND_JACK_MICROPHONE)
- mic_jack = hs_jack;
- if (*type & (SND_JACK_BTN_0 | SND_JACK_BTN_1 |
- SND_JACK_BTN_2 | SND_JACK_BTN_3))
- btn_jack = hs_jack;
+ if (hs_jack) {
+ type = *(int *)data;
+
+ if (type & SND_JACK_MICROPHONE)
+ mic_jack = hs_jack;
+ if (type & (SND_JACK_BTN_0 | SND_JACK_BTN_1 |
+ SND_JACK_BTN_2 | SND_JACK_BTN_3))
+ btn_jack = hs_jack;
+ }
return rt5645_set_jack_detect(component, hs_jack, mic_jack, btn_jack);
}
diff --git a/sound/soc/codecs/wcd-clsh-v2.c b/sound/soc/codecs/wcd-clsh-v2.c
index a75db27e5205..d96e23ec43d4 100644
--- a/sound/soc/codecs/wcd-clsh-v2.c
+++ b/sound/soc/codecs/wcd-clsh-v2.c
@@ -355,6 +355,7 @@ void wcd_clsh_set_hph_mode(struct wcd_clsh_ctrl *ctrl, int mode)
wcd_clsh_v2_set_hph_mode(comp, mode);
}
+EXPORT_SYMBOL_GPL(wcd_clsh_set_hph_mode);
static void wcd_clsh_set_flyback_current(struct snd_soc_component *comp,
int mode)
@@ -869,11 +870,13 @@ int wcd_clsh_ctrl_set_state(struct wcd_clsh_ctrl *ctrl,
return 0;
}
+EXPORT_SYMBOL_GPL(wcd_clsh_ctrl_set_state);
int wcd_clsh_ctrl_get_state(struct wcd_clsh_ctrl *ctrl)
{
return ctrl->state;
}
+EXPORT_SYMBOL_GPL(wcd_clsh_ctrl_get_state);
struct wcd_clsh_ctrl *wcd_clsh_ctrl_alloc(struct snd_soc_component *comp,
int version)
@@ -890,8 +893,13 @@ struct wcd_clsh_ctrl *wcd_clsh_ctrl_alloc(struct snd_soc_component *comp,
return ctrl;
}
+EXPORT_SYMBOL_GPL(wcd_clsh_ctrl_alloc);
void wcd_clsh_ctrl_free(struct wcd_clsh_ctrl *ctrl)
{
kfree(ctrl);
}
+EXPORT_SYMBOL_GPL(wcd_clsh_ctrl_free);
+
+MODULE_DESCRIPTION("WCD93XX Class-H driver");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/intel/avs/pcm.c b/sound/soc/intel/avs/pcm.c
index 1fbb2c2fadb5..8565a530706d 100644
--- a/sound/soc/intel/avs/pcm.c
+++ b/sound/soc/intel/avs/pcm.c
@@ -796,6 +796,28 @@ static int avs_component_probe(struct snd_soc_component *component)
ret = avs_load_topology(component, filename);
kfree(filename);
+ if (ret == -ENOENT && !strncmp(mach->tplg_filename, "hda-", 4)) {
+ unsigned int vendor_id;
+
+ if (sscanf(mach->tplg_filename, "hda-%08x-tplg.bin", &vendor_id) != 1)
+ return ret;
+
+ if (((vendor_id >> 16) & 0xFFFF) == 0x8086)
+ mach->tplg_filename = devm_kasprintf(adev->dev, GFP_KERNEL,
+ "hda-8086-generic-tplg.bin");
+ else
+ mach->tplg_filename = devm_kasprintf(adev->dev, GFP_KERNEL,
+ "hda-generic-tplg.bin");
+
+ filename = kasprintf(GFP_KERNEL, "%s/%s", component->driver->topology_name_prefix,
+ mach->tplg_filename);
+ if (!filename)
+ return -ENOMEM;
+
+ dev_info(card->dev, "trying to load fallback topology %s\n", mach->tplg_filename);
+ ret = avs_load_topology(component, filename);
+ kfree(filename);
+ }
if (ret < 0)
return ret;
diff --git a/sound/soc/soc-component.c b/sound/soc/soc-component.c
index f18406dfa1e4..ba7c0ae82e00 100644
--- a/sound/soc/soc-component.c
+++ b/sound/soc/soc-component.c
@@ -1054,7 +1054,7 @@ int snd_soc_pcm_component_sync_stop(struct snd_pcm_substream *substream)
int snd_soc_pcm_component_copy(struct snd_pcm_substream *substream,
int channel, unsigned long pos,
- struct iov_iter *buf, unsigned long bytes)
+ struct iov_iter *iter, unsigned long bytes)
{
struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
struct snd_soc_component *component;
@@ -1065,7 +1065,7 @@ int snd_soc_pcm_component_copy(struct snd_pcm_substream *substream,
if (component->driver->copy)
return soc_component_ret(component,
component->driver->copy(component, substream,
- channel, pos, buf, bytes));
+ channel, pos, iter, bytes));
return -EINVAL;
}
diff --git a/sound/soc/soc-generic-dmaengine-pcm.c b/sound/soc/soc-generic-dmaengine-pcm.c
index ff2166525dbc..d0653d775c87 100644
--- a/sound/soc/soc-generic-dmaengine-pcm.c
+++ b/sound/soc/soc-generic-dmaengine-pcm.c
@@ -290,29 +290,29 @@ static snd_pcm_uframes_t dmaengine_pcm_pointer(
static int dmaengine_copy(struct snd_soc_component *component,
struct snd_pcm_substream *substream,
int channel, unsigned long hwoff,
- struct iov_iter *buf, unsigned long bytes)
+ struct iov_iter *iter, unsigned long bytes)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct dmaengine_pcm *pcm = soc_component_to_pcm(component);
int (*process)(struct snd_pcm_substream *substream,
int channel, unsigned long hwoff,
- struct iov_iter *buf, unsigned long bytes) = pcm->config->process;
+ unsigned long bytes) = pcm->config->process;
bool is_playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
void *dma_ptr = runtime->dma_area + hwoff +
channel * (runtime->dma_bytes / runtime->channels);
if (is_playback)
- if (copy_from_iter(dma_ptr, bytes, buf) != bytes)
+ if (copy_from_iter(dma_ptr, bytes, iter) != bytes)
return -EFAULT;
if (process) {
- int ret = process(substream, channel, hwoff, buf, bytes);
+ int ret = process(substream, channel, hwoff, bytes);
if (ret < 0)
return ret;
}
if (!is_playback)
- if (copy_to_iter(dma_ptr, bytes, buf) != bytes)
+ if (copy_to_iter(dma_ptr, bytes, iter) != bytes)
return -EFAULT;
return 0;
diff --git a/sound/soc/stm/stm32_sai_sub.c b/sound/soc/stm/stm32_sai_sub.c
index f9b5d5969155..0acc848c1f00 100644
--- a/sound/soc/stm/stm32_sai_sub.c
+++ b/sound/soc/stm/stm32_sai_sub.c
@@ -1246,7 +1246,7 @@ static const struct snd_soc_dai_ops stm32_sai_pcm_dai_ops2 = {
static int stm32_sai_pcm_process_spdif(struct snd_pcm_substream *substream,
int channel, unsigned long hwoff,
- struct iov_iter *buf, unsigned long bytes)
+ unsigned long bytes)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
diff --git a/sound/usb/midi2.c b/sound/usb/midi2.c
index a27e244650c8..1ec177fe284e 100644
--- a/sound/usb/midi2.c
+++ b/sound/usb/midi2.c
@@ -265,7 +265,7 @@ static void free_midi_urbs(struct snd_usb_midi2_endpoint *ep)
if (!ep)
return;
- for (i = 0; i < ep->num_urbs; ++i) {
+ for (i = 0; i < NUM_URBS; ++i) {
ctx = &ep->urbs[i];
if (!ctx->urb)
break;
@@ -279,6 +279,7 @@ static void free_midi_urbs(struct snd_usb_midi2_endpoint *ep)
}
/* allocate URBs for an EP */
+/* the callers should handle allocation errors via free_midi_urbs() */
static int alloc_midi_urbs(struct snd_usb_midi2_endpoint *ep)
{
struct snd_usb_midi2_urb *ctx;
@@ -351,8 +352,10 @@ static int snd_usb_midi_v2_open(struct snd_ump_endpoint *ump, int dir)
return -EIO;
if (ep->direction == STR_OUT) {
err = alloc_midi_urbs(ep);
- if (err)
+ if (err) {
+ free_midi_urbs(ep);
return err;
+ }
}
return 0;
}
diff --git a/tools/build/Makefile.build b/tools/build/Makefile.build
index 89430338a3d9..fac42486a8cf 100644
--- a/tools/build/Makefile.build
+++ b/tools/build/Makefile.build
@@ -117,6 +117,16 @@ $(OUTPUT)%.s: %.c FORCE
$(call rule_mkdir)
$(call if_changed_dep,cc_s_c)
+# bison and flex files are generated in the OUTPUT directory
+# so it needs a separate rule to depend on them properly
+$(OUTPUT)%-bison.o: $(OUTPUT)%-bison.c FORCE
+ $(call rule_mkdir)
+ $(call if_changed_dep,$(host)cc_o_c)
+
+$(OUTPUT)%-flex.o: $(OUTPUT)%-flex.c FORCE
+ $(call rule_mkdir)
+ $(call if_changed_dep,$(host)cc_o_c)
+
# Gather build data:
# obj-y - list of build objects
# subdir-y - list of directories to nest
diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile
index f0c5de018a95..dad79ede4e0a 100644
--- a/tools/build/feature/Makefile
+++ b/tools/build/feature/Makefile
@@ -340,7 +340,7 @@ $(OUTPUT)test-jvmti-cmlr.bin:
$(BUILD)
$(OUTPUT)test-llvm.bin:
- $(BUILDXX) -std=gnu++14 \
+ $(BUILDXX) -std=gnu++17 \
-I$(shell $(LLVM_CONFIG) --includedir) \
-L$(shell $(LLVM_CONFIG) --libdir) \
$(shell $(LLVM_CONFIG) --libs Core BPF) \
@@ -348,17 +348,15 @@ $(OUTPUT)test-llvm.bin:
> $(@:.bin=.make.output) 2>&1
$(OUTPUT)test-llvm-version.bin:
- $(BUILDXX) -std=gnu++14 \
+ $(BUILDXX) -std=gnu++17 \
-I$(shell $(LLVM_CONFIG) --includedir) \
> $(@:.bin=.make.output) 2>&1
$(OUTPUT)test-clang.bin:
- $(BUILDXX) -std=gnu++14 \
+ $(BUILDXX) -std=gnu++17 \
-I$(shell $(LLVM_CONFIG) --includedir) \
-L$(shell $(LLVM_CONFIG) --libdir) \
- -Wl,--start-group -lclangBasic -lclangDriver \
- -lclangFrontend -lclangEdit -lclangLex \
- -lclangAST -Wl,--end-group \
+ -Wl,--start-group -lclang-cpp -Wl,--end-group \
$(shell $(LLVM_CONFIG) --libs Core option) \
$(shell $(LLVM_CONFIG) --system-libs) \
> $(@:.bin=.make.output) 2>&1
diff --git a/tools/build/feature/test-clang.cpp b/tools/build/feature/test-clang.cpp
deleted file mode 100644
index 7d87075cd1c5..000000000000
--- a/tools/build/feature/test-clang.cpp
+++ /dev/null
@@ -1,28 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include "clang/Basic/Version.h"
-#if CLANG_VERSION_MAJOR < 8
-#include "clang/Basic/VirtualFileSystem.h"
-#endif
-#include "clang/Driver/Driver.h"
-#include "clang/Frontend/TextDiagnosticPrinter.h"
-#include "llvm/ADT/IntrusiveRefCntPtr.h"
-#include "llvm/Support/ManagedStatic.h"
-#if CLANG_VERSION_MAJOR >= 8
-#include "llvm/Support/VirtualFileSystem.h"
-#endif
-#include "llvm/Support/raw_ostream.h"
-
-using namespace clang;
-using namespace clang::driver;
-
-int main()
-{
- IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
- IntrusiveRefCntPtr<DiagnosticOptions> DiagOpts = new DiagnosticOptions();
-
- DiagnosticsEngine Diags(DiagID, &*DiagOpts);
- Driver TheDriver("test", "bpf-pc-linux", Diags);
-
- llvm::llvm_shutdown();
- return 0;
-}
diff --git a/tools/build/feature/test-cxx.cpp b/tools/build/feature/test-cxx.cpp
deleted file mode 100644
index 396aaedd2418..000000000000
--- a/tools/build/feature/test-cxx.cpp
+++ /dev/null
@@ -1,16 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <iostream>
-#include <memory>
-
-static void print_str(std::string s)
-{
- std::cout << s << std::endl;
-}
-
-int main()
-{
- std::string s("Hello World!");
- print_str(std::move(s));
- std::cout << "|" << s << "|" << std::endl;
- return 0;
-}
diff --git a/tools/build/feature/test-llvm-version.cpp b/tools/build/feature/test-llvm-version.cpp
deleted file mode 100644
index 8a091625446a..000000000000
--- a/tools/build/feature/test-llvm-version.cpp
+++ /dev/null
@@ -1,12 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <cstdio>
-#include "llvm/Config/llvm-config.h"
-
-#define NUM_VERSION (((LLVM_VERSION_MAJOR) << 16) + (LLVM_VERSION_MINOR << 8) + LLVM_VERSION_PATCH)
-#define pass int main() {printf("%x\n", NUM_VERSION); return 0;}
-
-#if NUM_VERSION >= 0x030900
-pass
-#else
-# error This LLVM is not tested yet.
-#endif
diff --git a/tools/build/feature/test-llvm.cpp b/tools/build/feature/test-llvm.cpp
deleted file mode 100644
index 88a3d1bdd9f6..000000000000
--- a/tools/build/feature/test-llvm.cpp
+++ /dev/null
@@ -1,14 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include "llvm/Support/ManagedStatic.h"
-#include "llvm/Support/raw_ostream.h"
-#define NUM_VERSION (((LLVM_VERSION_MAJOR) << 16) + (LLVM_VERSION_MINOR << 8) + LLVM_VERSION_PATCH)
-
-#if NUM_VERSION < 0x030900
-# error "LLVM version too low"
-#endif
-int main()
-{
- llvm::errs() << "Hello World!\n";
- llvm::llvm_shutdown();
- return 0;
-}
diff --git a/tools/lib/perf/include/perf/event.h b/tools/lib/perf/include/perf/event.h
index ba2dcf64f4e6..ae64090184d3 100644
--- a/tools/lib/perf/include/perf/event.h
+++ b/tools/lib/perf/include/perf/event.h
@@ -148,8 +148,18 @@ struct perf_record_switch {
struct perf_record_header_attr {
struct perf_event_header header;
struct perf_event_attr attr;
- __u64 id[];
-};
+ /*
+ * Array of u64 id follows here but we cannot use a flexible array
+ * because size of attr in the data can be different then current
+ * version. Please use perf_record_header_attr_id() below.
+ *
+ * __u64 id[]; // do not use this
+ */
+};
+
+/* Returns the pointer to id array based on the actual attr size. */
+#define perf_record_header_attr_id(evt) \
+ ((void *)&(evt)->attr.attr + (evt)->attr.attr.size)
enum {
PERF_CPU_MAP__CPUS = 0,
diff --git a/tools/perf/Documentation/perf-bench.txt b/tools/perf/Documentation/perf-bench.txt
index f04f0eaded98..ca5789625cd2 100644
--- a/tools/perf/Documentation/perf-bench.txt
+++ b/tools/perf/Documentation/perf-bench.txt
@@ -67,6 +67,9 @@ SUBSYSTEM
'internals'::
Benchmark internal perf functionality.
+'uprobe'::
+ Benchmark overhead of uprobe + BPF.
+
'all'::
All benchmark subsystems.
diff --git a/tools/perf/Documentation/perf-config.txt b/tools/perf/Documentation/perf-config.txt
index 1478068ad5dd..0b4e79dbd3f6 100644
--- a/tools/perf/Documentation/perf-config.txt
+++ b/tools/perf/Documentation/perf-config.txt
@@ -125,9 +125,6 @@ Given a $HOME/.perfconfig like this:
group = true
skip-empty = true
- [llvm]
- dump-obj = true
- clang-opt = -g
You can hide source code of annotate feature setting the config to false with
@@ -657,36 +654,6 @@ ftrace.*::
-F option is not specified. Possible values are 'function' and
'function_graph'.
-llvm.*::
- llvm.clang-path::
- Path to clang. If omit, search it from $PATH.
-
- llvm.clang-bpf-cmd-template::
- Cmdline template. Below lines show its default value. Environment
- variable is used to pass options.
- "$CLANG_EXEC -D__KERNEL__ -D__NR_CPUS__=$NR_CPUS "\
- "-DLINUX_VERSION_CODE=$LINUX_VERSION_CODE " \
- "$CLANG_OPTIONS $PERF_BPF_INC_OPTIONS $KERNEL_INC_OPTIONS " \
- "-Wno-unused-value -Wno-pointer-sign " \
- "-working-directory $WORKING_DIR " \
- "-c \"$CLANG_SOURCE\" --target=bpf $CLANG_EMIT_LLVM -O2 -o - $LLVM_OPTIONS_PIPE"
-
- llvm.clang-opt::
- Options passed to clang.
-
- llvm.kbuild-dir::
- kbuild directory. If not set, use /lib/modules/`uname -r`/build.
- If set to "" deliberately, skip kernel header auto-detector.
-
- llvm.kbuild-opts::
- Options passed to 'make' when detecting kernel header options.
-
- llvm.dump-obj::
- Enable perf dump BPF object files compiled by LLVM.
-
- llvm.opts::
- Options passed to llc.
-
samples.*::
samples.context::
diff --git a/tools/perf/Documentation/perf-dlfilter.txt b/tools/perf/Documentation/perf-dlfilter.txt
index fb22e3b31dc5..8887cc20a809 100644
--- a/tools/perf/Documentation/perf-dlfilter.txt
+++ b/tools/perf/Documentation/perf-dlfilter.txt
@@ -64,6 +64,12 @@ internal filtering.
If implemented, 'filter_description' should return a one-line description
of the filter, and optionally a longer description.
+Do not assume the 'sample' argument is valid (dereferenceable)
+after 'filter_event' and 'filter_event_early' return.
+
+Do not assume data referenced by pointers in struct perf_dlfilter_sample
+is valid (dereferenceable) after 'filter_event' and 'filter_event_early' return.
+
The perf_dlfilter_sample structure
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -150,7 +156,8 @@ struct perf_dlfilter_fns {
const char *(*srcline)(void *ctx, __u32 *line_number);
struct perf_event_attr *(*attr)(void *ctx);
__s32 (*object_code)(void *ctx, __u64 ip, void *buf, __u32 len);
- void *(*reserved[120])(void *);
+ void (*al_cleanup)(void *ctx, struct perf_dlfilter_al *al);
+ void *(*reserved[119])(void *);
};
----
@@ -161,7 +168,8 @@ struct perf_dlfilter_fns {
'args' returns arguments from --dlarg options.
'resolve_address' provides information about 'address'. al->size must be set
-before calling. Returns 0 on success, -1 otherwise.
+before calling. Returns 0 on success, -1 otherwise. Call al_cleanup() (if present,
+see below) when 'al' data is no longer needed.
'insn' returns instruction bytes and length.
@@ -171,6 +179,12 @@ before calling. Returns 0 on success, -1 otherwise.
'object_code' reads object code and returns the number of bytes read.
+'al_cleanup' must be called (if present, so check perf_dlfilter_fns.al_cleanup != NULL)
+after resolve_address() to free any associated resources.
+
+Do not assume pointers obtained via perf_dlfilter_fns are valid (dereferenceable)
+after 'filter_event' and 'filter_event_early' return.
+
The perf_dlfilter_al structure
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -197,9 +211,13 @@ struct perf_dlfilter_al {
/* Below members are only populated by resolve_ip() */
__u8 filtered; /* true if this sample event will be filtered out */
const char *comm;
+ void *priv; /* Private data. Do not change */
};
----
+Do not assume data referenced by pointers in struct perf_dlfilter_al
+is valid (dereferenceable) after 'filter_event' and 'filter_event_early' return.
+
perf_dlfilter_sample flags
~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/tools/perf/Documentation/perf-ftrace.txt b/tools/perf/Documentation/perf-ftrace.txt
index df4595563801..d780b93fcf87 100644
--- a/tools/perf/Documentation/perf-ftrace.txt
+++ b/tools/perf/Documentation/perf-ftrace.txt
@@ -96,8 +96,9 @@ OPTIONS for 'perf ftrace trace'
--func-opts::
List of options allowed to set:
- call-graph - Display kernel stack trace for function tracer.
- irq-info - Display irq context info for function tracer.
+
+ - call-graph - Display kernel stack trace for function tracer.
+ - irq-info - Display irq context info for function tracer.
-G::
--graph-funcs=::
@@ -118,11 +119,12 @@ OPTIONS for 'perf ftrace trace'
--graph-opts::
List of options allowed to set:
- nosleep-time - Measure on-CPU time only for function_graph tracer.
- noirqs - Ignore functions that happen inside interrupt.
- verbose - Show process names, PIDs, timestamps, etc.
- thresh=<n> - Setup trace duration threshold in microseconds.
- depth=<n> - Set max depth for function graph tracer to follow.
+
+ - nosleep-time - Measure on-CPU time only for function_graph tracer.
+ - noirqs - Ignore functions that happen inside interrupt.
+ - verbose - Show process names, PIDs, timestamps, etc.
+ - thresh=<n> - Setup trace duration threshold in microseconds.
+ - depth=<n> - Set max depth for function graph tracer to follow.
OPTIONS for 'perf ftrace latency'
diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt
index 680396c56bd1..d5217be012d7 100644
--- a/tools/perf/Documentation/perf-record.txt
+++ b/tools/perf/Documentation/perf-record.txt
@@ -99,20 +99,6 @@ OPTIONS
If you want to profile write accesses in [0x1000~1008), just set
'mem:0x1000/8:w'.
- - a BPF source file (ending in .c) or a precompiled object file (ending
- in .o) selects one or more BPF events.
- The BPF program can attach to various perf events based on the ELF section
- names.
-
- When processing a '.c' file, perf searches an installed LLVM to compile it
- into an object file first. Optional clang options can be passed via the
- '--clang-opt' command line option, e.g.:
-
- perf record --clang-opt "-DLINUX_VERSION_CODE=0x50000" \
- -e tests/bpf-script-example.c
-
- Note: '--clang-opt' must be placed before '--event/-e'.
-
- a group of events surrounded by a pair of brace ("{event1,event2,...}").
Each event is separated by commas and the group should be quoted to
prevent the shell interpretation. You also need to use --group on
@@ -523,9 +509,10 @@ CLOCK_BOOTTIME, CLOCK_REALTIME and CLOCK_TAI.
Select AUX area tracing Snapshot Mode. This option is valid only with an
AUX area tracing event. Optionally, certain snapshot capturing parameters
can be specified in a string that follows this option:
- 'e': take one last snapshot on exit; guarantees that there is at least one
+
+ - 'e': take one last snapshot on exit; guarantees that there is at least one
snapshot in the output file;
- <size>: if the PMU supports this, specify the desired snapshot size.
+ - <size>: if the PMU supports this, specify the desired snapshot size.
In Snapshot Mode trace data is captured only when signal SIGUSR2 is received
and on exit if the above 'e' option is given.
@@ -547,14 +534,6 @@ PERF_RECORD_SWITCH_CPU_WIDE. In some cases (e.g. Intel PT, CoreSight or Arm SPE)
switch events will be enabled automatically, which can be suppressed by
by the option --no-switch-events.
---clang-path=PATH::
-Path to clang binary to use for compiling BPF scriptlets.
-(enabled when BPF support is on)
-
---clang-opt=OPTIONS::
-Options passed to clang when compiling BPF scriptlets.
-(enabled when BPF support is on)
-
--vmlinux=PATH::
Specify vmlinux path which has debuginfo.
(enabled when BPF prologue is on)
@@ -572,8 +551,9 @@ providing implementation for Posix AIO API.
--affinity=mode::
Set affinity mask of trace reading thread according to the policy defined by 'mode' value:
- node - thread affinity mask is set to NUMA node cpu mask of the processed mmap buffer
- cpu - thread affinity mask is set to cpu of the processed mmap buffer
+
+ - node - thread affinity mask is set to NUMA node cpu mask of the processed mmap buffer
+ - cpu - thread affinity mask is set to cpu of the processed mmap buffer
--mmap-flush=number::
@@ -625,16 +605,17 @@ Record timestamp boundary (time of first/last samples).
--switch-output[=mode]::
Generate multiple perf.data files, timestamp prefixed, switching to a new one
based on 'mode' value:
- "signal" - when receiving a SIGUSR2 (default value) or
- <size> - when reaching the size threshold, size is expected to
- be a number with appended unit character - B/K/M/G
- <time> - when reaching the time threshold, size is expected to
- be a number with appended unit character - s/m/h/d
- Note: the precision of the size threshold hugely depends
- on your configuration - the number and size of your ring
- buffers (-m). It is generally more precise for higher sizes
- (like >5M), for lower values expect different sizes.
+ - "signal" - when receiving a SIGUSR2 (default value) or
+ - <size> - when reaching the size threshold, size is expected to
+ be a number with appended unit character - B/K/M/G
+ - <time> - when reaching the time threshold, size is expected to
+ be a number with appended unit character - s/m/h/d
+
+ Note: the precision of the size threshold hugely depends
+ on your configuration - the number and size of your ring
+ buffers (-m). It is generally more precise for higher sizes
+ (like >5M), for lower values expect different sizes.
A possible use case is to, given an external event, slice the perf.data file
that gets then processed, possibly via a perf script, to decide if that
@@ -680,11 +661,12 @@ choice in this option. For example, --synth=no would have MMAP events for
kernel and modules.
Available types are:
- 'task' - synthesize FORK and COMM events for each task
- 'mmap' - synthesize MMAP events for each process (implies 'task')
- 'cgroup' - synthesize CGROUP events for each cgroup
- 'all' - synthesize all events (default)
- 'no' - do not synthesize any of the above events
+
+ - 'task' - synthesize FORK and COMM events for each task
+ - 'mmap' - synthesize MMAP events for each process (implies 'task')
+ - 'cgroup' - synthesize CGROUP events for each cgroup
+ - 'all' - synthesize all events (default)
+ - 'no' - do not synthesize any of the above events
--tail-synthesize::
Instead of collecting non-sample events (for example, fork, comm, mmap) at
@@ -736,18 +718,19 @@ ctl-fifo / ack-fifo are opened and used as ctl-fd / ack-fd as follows.
Listen on ctl-fd descriptor for command to control measurement.
Available commands:
- 'enable' : enable events
- 'disable' : disable events
- 'enable name' : enable event 'name'
- 'disable name' : disable event 'name'
- 'snapshot' : AUX area tracing snapshot).
- 'stop' : stop perf record
- 'ping' : ping
-
- 'evlist [-v|-g|-F] : display all events
- -F Show just the sample frequency used for each event.
- -v Show all fields.
- -g Show event group information.
+
+ - 'enable' : enable events
+ - 'disable' : disable events
+ - 'enable name' : enable event 'name'
+ - 'disable name' : disable event 'name'
+ - 'snapshot' : AUX area tracing snapshot).
+ - 'stop' : stop perf record
+ - 'ping' : ping
+ - 'evlist [-v|-g|-F] : display all events
+
+ -F Show just the sample frequency used for each event.
+ -v Show all fields.
+ -g Show event group information.
Measurements can be started with events disabled using --delay=-1 option. Optionally
send control command completion ('ack\n') to ack-fd descriptor to synchronize with the
@@ -808,10 +791,10 @@ the second monitors CPUs 1 and 5-7 with the affinity mask 5-7.
<spec> value can also be a string meaning predefined parallel threads
layout:
- cpu - create new data streaming thread for every monitored cpu
- core - create new thread to monitor CPUs grouped by a core
- package - create new thread to monitor CPUs grouped by a package
- numa - create new threed to monitor CPUs grouped by a NUMA domain
+ - cpu - create new data streaming thread for every monitored cpu
+ - core - create new thread to monitor CPUs grouped by a core
+ - package - create new thread to monitor CPUs grouped by a package
+ - numa - create new threed to monitor CPUs grouped by a NUMA domain
Predefined layouts can be used on systems with large number of CPUs in
order not to spawn multiple per-cpu streaming threads but still avoid LOST
diff --git a/tools/perf/Documentation/perf.data-file-format.txt b/tools/perf/Documentation/perf.data-file-format.txt
index 635ba043fd7d..010a4edcd384 100644
--- a/tools/perf/Documentation/perf.data-file-format.txt
+++ b/tools/perf/Documentation/perf.data-file-format.txt
@@ -43,7 +43,7 @@ struct perf_file_section {
Flags section:
-For each of the optional features a perf_file_section it placed after the data
+For each of the optional features a perf_file_section is placed after the data
section if the feature bit is set in the perf_header flags bitset. The
respective perf_file_section points to the data of the additional header and
defines its size.
diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
index c5db0de49868..d66b52407e19 100644
--- a/tools/perf/Makefile.config
+++ b/tools/perf/Makefile.config
@@ -246,6 +246,9 @@ ifeq ($(CC_NO_CLANG), 0)
else
CORE_CFLAGS += -O6
endif
+else
+ CORE_CFLAGS += -g
+ CXXFLAGS += -g
endif
ifdef PARSER_DEBUG
@@ -256,6 +259,11 @@ ifdef PARSER_DEBUG
$(call detected_var,PARSER_DEBUG_FLEX)
endif
+ifdef LTO
+ CORE_CFLAGS += -flto
+ CXXFLAGS += -flto
+endif
+
# Try different combinations to accommodate systems that only have
# python[2][3]-config in weird combinations in the following order of
# priority from lowest to highest:
@@ -319,18 +327,14 @@ FEATURE_CHECK_LDFLAGS-disassembler-four-args = -lbfd -lopcodes -ldl
FEATURE_CHECK_LDFLAGS-disassembler-init-styled = -lbfd -lopcodes -ldl
CORE_CFLAGS += -fno-omit-frame-pointer
-CORE_CFLAGS += -ggdb3
-CORE_CFLAGS += -funwind-tables
CORE_CFLAGS += -Wall
CORE_CFLAGS += -Wextra
CORE_CFLAGS += -std=gnu11
-CXXFLAGS += -std=gnu++14 -fno-exceptions -fno-rtti
+CXXFLAGS += -std=gnu++17 -fno-exceptions -fno-rtti
CXXFLAGS += -Wall
+CXXFLAGS += -Wextra
CXXFLAGS += -fno-omit-frame-pointer
-CXXFLAGS += -ggdb3
-CXXFLAGS += -funwind-tables
-CXXFLAGS += -Wno-strict-aliasing
HOSTCFLAGS += -Wall
HOSTCFLAGS += -Wextra
@@ -585,18 +589,6 @@ ifndef NO_LIBELF
LIBBPF_STATIC := 1
endif
endif
-
- ifndef NO_DWARF
- ifdef PERF_HAVE_ARCH_REGS_QUERY_REGISTER_OFFSET
- CFLAGS += -DHAVE_BPF_PROLOGUE
- $(call detected,CONFIG_BPF_PROLOGUE)
- else
- msg := $(warning BPF prologue is not supported by architecture $(SRCARCH), missing regs_query_register_offset());
- endif
- else
- msg := $(warning DWARF support is off, BPF prologue is disabled);
- endif
-
endif # NO_LIBBPF
endif # NO_LIBELF
@@ -1123,37 +1115,6 @@ ifndef NO_JVMTI
endif
endif
-USE_CXX = 0
-USE_CLANGLLVM = 0
-ifdef LIBCLANGLLVM
- $(call feature_check,cxx)
- ifneq ($(feature-cxx), 1)
- msg := $(warning No g++ found, disable clang and llvm support. Please install g++)
- else
- $(call feature_check,llvm)
- $(call feature_check,llvm-version)
- ifneq ($(feature-llvm), 1)
- msg := $(warning No suitable libLLVM found, disabling builtin clang and LLVM support. Please install llvm-dev(el) (>= 3.9.0))
- else
- $(call feature_check,clang)
- ifneq ($(feature-clang), 1)
- msg := $(warning No suitable libclang found, disabling builtin clang and LLVM support. Please install libclang-dev(el) (>= 3.9.0))
- else
- CFLAGS += -DHAVE_LIBCLANGLLVM_SUPPORT
- CXXFLAGS += -DHAVE_LIBCLANGLLVM_SUPPORT -I$(shell $(LLVM_CONFIG) --includedir)
- $(call detected,CONFIG_CXX)
- $(call detected,CONFIG_CLANGLLVM)
- USE_CXX = 1
- USE_LLVM = 1
- USE_CLANG = 1
- ifneq ($(feature-llvm-version),1)
- msg := $(warning This version of LLVM is not tested. May cause build errors)
- endif
- endif
- endif
- endif
-endif
-
ifndef NO_LIBPFM4
$(call feature_check,libpfm4)
ifeq ($(feature-libpfm4), 1)
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index 097316ef38e6..37af6df7b978 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -99,10 +99,6 @@ include ../scripts/utilities.mak
# Define NO_JVMTI_CMLR (debug only) if you do not want to process CMLR
# data for java source lines.
#
-# Define LIBCLANGLLVM if you DO want builtin clang and llvm support.
-# When selected, pass LLVM_CONFIG=/path/to/llvm-config to `make' if
-# llvm-config is not in $PATH.
-#
# Define CORESIGHT if you DO WANT support for CoreSight trace decoding.
#
# Define NO_AIO if you do not want support of Posix AIO based trace
@@ -381,7 +377,7 @@ ifndef NO_JVMTI
PROGRAMS += $(OUTPUT)$(LIBJVMTI)
endif
-DLFILTERS := dlfilter-test-api-v0.so dlfilter-show-cycles.so
+DLFILTERS := dlfilter-test-api-v0.so dlfilter-test-api-v2.so dlfilter-show-cycles.so
DLFILTERS := $(patsubst %,$(OUTPUT)dlfilters/%,$(DLFILTERS))
# what 'all' will build and 'install' will install, in perfexecdir
@@ -425,22 +421,6 @@ endif
EXTLIBS := $(call filter-out,$(EXCLUDE_EXTLIBS),$(EXTLIBS))
LIBS = -Wl,--whole-archive $(PERFLIBS) $(EXTRA_PERFLIBS) -Wl,--no-whole-archive -Wl,--start-group $(EXTLIBS) -Wl,--end-group
-ifeq ($(USE_CLANG), 1)
- CLANGLIBS_LIST = AST Basic CodeGen Driver Frontend Lex Tooling Edit Sema Analysis Parse Serialization
- CLANGLIBS_NOEXT_LIST = $(foreach l,$(CLANGLIBS_LIST),$(shell $(LLVM_CONFIG) --libdir)/libclang$(l))
- LIBCLANG = $(foreach l,$(CLANGLIBS_NOEXT_LIST),$(wildcard $(l).a $(l).so))
- LIBS += -Wl,--start-group $(LIBCLANG) -Wl,--end-group
-endif
-
-ifeq ($(USE_LLVM), 1)
- LIBLLVM = $(shell $(LLVM_CONFIG) --libs all) $(shell $(LLVM_CONFIG) --system-libs)
- LIBS += -L$(shell $(LLVM_CONFIG) --libdir) $(LIBLLVM)
-endif
-
-ifeq ($(USE_CXX), 1)
- LIBS += -lstdc++
-endif
-
export INSTALL SHELL_PATH
### Build rules
@@ -978,11 +958,6 @@ ifndef NO_JVMTI
endif
$(call QUIET_INSTALL, libexec) \
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)'
-ifndef NO_LIBBPF
- $(call QUIET_INSTALL, bpf-examples) \
- $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perf_examples_instdir_SQ)/bpf'; \
- $(INSTALL) examples/bpf/*.c -m 644 -t '$(DESTDIR_SQ)$(perf_examples_instdir_SQ)/bpf'
-endif
$(call QUIET_INSTALL, perf-archive) \
$(INSTALL) $(OUTPUT)perf-archive -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)'
$(call QUIET_INSTALL, perf-iostat) \
@@ -1057,6 +1032,8 @@ SKELETONS += $(SKEL_OUT)/bperf_leader.skel.h $(SKEL_OUT)/bperf_follower.skel.h
SKELETONS += $(SKEL_OUT)/bperf_cgroup.skel.h $(SKEL_OUT)/func_latency.skel.h
SKELETONS += $(SKEL_OUT)/off_cpu.skel.h $(SKEL_OUT)/lock_contention.skel.h
SKELETONS += $(SKEL_OUT)/kwork_trace.skel.h $(SKEL_OUT)/sample_filter.skel.h
+SKELETONS += $(SKEL_OUT)/bench_uprobe.skel.h
+SKELETONS += $(SKEL_OUT)/augmented_raw_syscalls.skel.h
$(SKEL_TMP_OUT) $(LIBAPI_OUTPUT) $(LIBBPF_OUTPUT) $(LIBPERF_OUTPUT) $(LIBSUBCMD_OUTPUT) $(LIBSYMBOL_OUTPUT):
$(Q)$(MKDIR) -p $@
@@ -1079,10 +1056,15 @@ ifneq ($(CROSS_COMPILE),)
CLANG_TARGET_ARCH = --target=$(notdir $(CROSS_COMPILE:%-=%))
endif
+CLANG_OPTIONS = -Wall
CLANG_SYS_INCLUDES = $(call get_sys_includes,$(CLANG),$(CLANG_TARGET_ARCH))
BPF_INCLUDE := -I$(SKEL_TMP_OUT)/.. -I$(LIBBPF_INCLUDE) $(CLANG_SYS_INCLUDES)
TOOLS_UAPI_INCLUDE := -I$(srctree)/tools/include/uapi
+ifneq ($(WERROR),0)
+ CLANG_OPTIONS += -Werror
+endif
+
$(BPFTOOL): | $(SKEL_TMP_OUT)
$(Q)CFLAGS= $(MAKE) -C ../bpf/bpftool \
OUTPUT=$(SKEL_TMP_OUT)/ bootstrap
@@ -1124,7 +1106,7 @@ else
endif
$(SKEL_TMP_OUT)/%.bpf.o: util/bpf_skel/%.bpf.c $(LIBBPF) $(SKEL_OUT)/vmlinux.h | $(SKEL_TMP_OUT)
- $(QUIET_CLANG)$(CLANG) -g -O2 --target=bpf -Wall -Werror $(BPF_INCLUDE) $(TOOLS_UAPI_INCLUDE) \
+ $(QUIET_CLANG)$(CLANG) -g -O2 --target=bpf $(CLANG_OPTIONS) $(BPF_INCLUDE) $(TOOLS_UAPI_INCLUDE) \
-c $(filter util/bpf_skel/%.bpf.c,$^) -o $@
$(SKEL_OUT)/%.skel.h: $(SKEL_TMP_OUT)/%.bpf.o | $(BPFTOOL)
diff --git a/tools/perf/arch/arm/include/perf_regs.h b/tools/perf/arch/arm/include/perf_regs.h
index 99a06550e25d..75ce1c370114 100644
--- a/tools/perf/arch/arm/include/perf_regs.h
+++ b/tools/perf/arch/arm/include/perf_regs.h
@@ -12,7 +12,4 @@ void perf_regs_load(u64 *regs);
#define PERF_REGS_MAX PERF_REG_ARM_MAX
#define PERF_SAMPLE_REGS_ABI PERF_SAMPLE_REGS_ABI_32
-#define PERF_REG_IP PERF_REG_ARM_PC
-#define PERF_REG_SP PERF_REG_ARM_SP
-
#endif /* ARCH_PERF_REGS_H */
diff --git a/tools/perf/arch/arm/util/cs-etm.c b/tools/perf/arch/arm/util/cs-etm.c
index 7c51fa182b51..b8d6a953fd74 100644
--- a/tools/perf/arch/arm/util/cs-etm.c
+++ b/tools/perf/arch/arm/util/cs-etm.c
@@ -79,9 +79,9 @@ static int cs_etm_validate_context_id(struct auxtrace_record *itr,
int err;
u32 val;
u64 contextid = evsel->core.attr.config &
- (perf_pmu__format_bits(&cs_etm_pmu->format, "contextid") |
- perf_pmu__format_bits(&cs_etm_pmu->format, "contextid1") |
- perf_pmu__format_bits(&cs_etm_pmu->format, "contextid2"));
+ (perf_pmu__format_bits(cs_etm_pmu, "contextid") |
+ perf_pmu__format_bits(cs_etm_pmu, "contextid1") |
+ perf_pmu__format_bits(cs_etm_pmu, "contextid2"));
if (!contextid)
return 0;
@@ -106,7 +106,7 @@ static int cs_etm_validate_context_id(struct auxtrace_record *itr,
}
if (contextid &
- perf_pmu__format_bits(&cs_etm_pmu->format, "contextid1")) {
+ perf_pmu__format_bits(cs_etm_pmu, "contextid1")) {
/*
* TRCIDR2.CIDSIZE, bit [9-5], indicates whether contextID
* tracing is supported:
@@ -122,7 +122,7 @@ static int cs_etm_validate_context_id(struct auxtrace_record *itr,
}
if (contextid &
- perf_pmu__format_bits(&cs_etm_pmu->format, "contextid2")) {
+ perf_pmu__format_bits(cs_etm_pmu, "contextid2")) {
/*
* TRCIDR2.VMIDOPT[30:29] != 0 and
* TRCIDR2.VMIDSIZE[14:10] == 0b00100 (32bit virtual contextid)
@@ -151,7 +151,7 @@ static int cs_etm_validate_timestamp(struct auxtrace_record *itr,
u32 val;
if (!(evsel->core.attr.config &
- perf_pmu__format_bits(&cs_etm_pmu->format, "timestamp")))
+ perf_pmu__format_bits(cs_etm_pmu, "timestamp")))
return 0;
if (!cs_etm_is_etmv4(itr, cpu)) {
diff --git a/tools/perf/arch/arm/util/perf_regs.c b/tools/perf/arch/arm/util/perf_regs.c
index 2833e101a7c6..2c56e8b56ddf 100644
--- a/tools/perf/arch/arm/util/perf_regs.c
+++ b/tools/perf/arch/arm/util/perf_regs.c
@@ -1,6 +1,17 @@
// SPDX-License-Identifier: GPL-2.0
+#include "perf_regs.h"
#include "../../../util/perf_regs.h"
const struct sample_reg sample_reg_masks[] = {
SMPL_REG_END
};
+
+uint64_t arch__intr_reg_mask(void)
+{
+ return PERF_REGS_MASK;
+}
+
+uint64_t arch__user_reg_mask(void)
+{
+ return PERF_REGS_MASK;
+}
diff --git a/tools/perf/arch/arm/util/unwind-libdw.c b/tools/perf/arch/arm/util/unwind-libdw.c
index 1834a0cd9ce3..4e02cef461e3 100644
--- a/tools/perf/arch/arm/util/unwind-libdw.c
+++ b/tools/perf/arch/arm/util/unwind-libdw.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <elfutils/libdwfl.h>
+#include "perf_regs.h"
#include "../../../util/unwind-libdw.h"
#include "../../../util/perf_regs.h"
#include "../../../util/sample.h"
diff --git a/tools/perf/arch/arm64/include/arch-tests.h b/tools/perf/arch/arm64/include/arch-tests.h
index 452b3d904521..474d7cf5afbd 100644
--- a/tools/perf/arch/arm64/include/arch-tests.h
+++ b/tools/perf/arch/arm64/include/arch-tests.h
@@ -2,6 +2,9 @@
#ifndef ARCH_TESTS_H
#define ARCH_TESTS_H
+struct test_suite;
+
+int test__cpuid_match(struct test_suite *test, int subtest);
extern struct test_suite *arch_tests[];
#endif
diff --git a/tools/perf/arch/arm64/include/perf_regs.h b/tools/perf/arch/arm64/include/perf_regs.h
index 35a3cc775b39..58639ee9f7ea 100644
--- a/tools/perf/arch/arm64/include/perf_regs.h
+++ b/tools/perf/arch/arm64/include/perf_regs.h
@@ -14,7 +14,4 @@ void perf_regs_load(u64 *regs);
#define PERF_REGS_MAX PERF_REG_ARM64_MAX
#define PERF_SAMPLE_REGS_ABI PERF_SAMPLE_REGS_ABI_64
-#define PERF_REG_IP PERF_REG_ARM64_PC
-#define PERF_REG_SP PERF_REG_ARM64_SP
-
#endif /* ARCH_PERF_REGS_H */
diff --git a/tools/perf/arch/arm64/tests/Build b/tools/perf/arch/arm64/tests/Build
index a61c06bdb757..e337c09e7f56 100644
--- a/tools/perf/arch/arm64/tests/Build
+++ b/tools/perf/arch/arm64/tests/Build
@@ -2,3 +2,4 @@ perf-y += regs_load.o
perf-$(CONFIG_DWARF_UNWIND) += dwarf-unwind.o
perf-y += arch-tests.o
+perf-y += cpuid-match.o
diff --git a/tools/perf/arch/arm64/tests/arch-tests.c b/tools/perf/arch/arm64/tests/arch-tests.c
index ad16b4f8f63e..74932e72c727 100644
--- a/tools/perf/arch/arm64/tests/arch-tests.c
+++ b/tools/perf/arch/arm64/tests/arch-tests.c
@@ -3,9 +3,13 @@
#include "tests/tests.h"
#include "arch-tests.h"
+
+DEFINE_SUITE("arm64 CPUID matching", cpuid_match);
+
struct test_suite *arch_tests[] = {
#ifdef HAVE_DWARF_UNWIND_SUPPORT
&suite__dwarf_unwind,
#endif
+ &suite__cpuid_match,
NULL,
};
diff --git a/tools/perf/arch/arm64/tests/cpuid-match.c b/tools/perf/arch/arm64/tests/cpuid-match.c
new file mode 100644
index 000000000000..e8e3947cca18
--- /dev/null
+++ b/tools/perf/arch/arm64/tests/cpuid-match.c
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/compiler.h>
+
+#include "arch-tests.h"
+#include "tests/tests.h"
+#include "util/header.h"
+
+int test__cpuid_match(struct test_suite *test __maybe_unused,
+ int subtest __maybe_unused)
+{
+ /* midr with no leading zeros matches */
+ if (strcmp_cpuid_str("0x410fd0c0", "0x00000000410fd0c0"))
+ return -1;
+ /* Upper case matches */
+ if (strcmp_cpuid_str("0x410fd0c0", "0x00000000410FD0C0"))
+ return -1;
+ /* r0p0 = r0p0 matches */
+ if (strcmp_cpuid_str("0x00000000410fd480", "0x00000000410fd480"))
+ return -1;
+ /* r0p1 > r0p0 matches */
+ if (strcmp_cpuid_str("0x00000000410fd480", "0x00000000410fd481"))
+ return -1;
+ /* r1p0 > r0p0 matches*/
+ if (strcmp_cpuid_str("0x00000000410fd480", "0x00000000411fd480"))
+ return -1;
+ /* r0p0 < r0p1 doesn't match */
+ if (!strcmp_cpuid_str("0x00000000410fd481", "0x00000000410fd480"))
+ return -1;
+ /* r0p0 < r1p0 doesn't match */
+ if (!strcmp_cpuid_str("0x00000000411fd480", "0x00000000410fd480"))
+ return -1;
+ /* Different CPU doesn't match */
+ if (!strcmp_cpuid_str("0x00000000410fd4c0", "0x00000000430f0af0"))
+ return -1;
+
+ return 0;
+}
diff --git a/tools/perf/arch/arm64/util/arm-spe.c b/tools/perf/arch/arm64/util/arm-spe.c
index 3b1676ff03f9..9cc3d6dcb849 100644
--- a/tools/perf/arch/arm64/util/arm-spe.c
+++ b/tools/perf/arch/arm64/util/arm-spe.c
@@ -230,7 +230,7 @@ static int arm_spe_recording_options(struct auxtrace_record *itr,
* inform that the resulting output's SPE samples contain physical addresses
* where applicable.
*/
- bit = perf_pmu__format_bits(&arm_spe_pmu->format, "pa_enable");
+ bit = perf_pmu__format_bits(arm_spe_pmu, "pa_enable");
if (arm_spe_evsel->core.attr.config & bit)
evsel__set_sample_bit(arm_spe_evsel, PHYS_ADDR);
diff --git a/tools/perf/arch/arm64/util/header.c b/tools/perf/arch/arm64/util/header.c
index 80b9f6287fe2..a2eef9ec5491 100644
--- a/tools/perf/arch/arm64/util/header.c
+++ b/tools/perf/arch/arm64/util/header.c
@@ -1,3 +1,6 @@
+#include <linux/kernel.h>
+#include <linux/bits.h>
+#include <linux/bitfield.h>
#include <stdio.h>
#include <stdlib.h>
#include <perf/cpumap.h>
@@ -10,15 +13,14 @@
#define MIDR "/regs/identification/midr_el1"
#define MIDR_SIZE 19
-#define MIDR_REVISION_MASK 0xf
-#define MIDR_VARIANT_SHIFT 20
-#define MIDR_VARIANT_MASK (0xf << MIDR_VARIANT_SHIFT)
+#define MIDR_REVISION_MASK GENMASK(3, 0)
+#define MIDR_VARIANT_MASK GENMASK(23, 20)
static int _get_cpuid(char *buf, size_t sz, struct perf_cpu_map *cpus)
{
const char *sysfs = sysfs__mountpoint();
- u64 midr = 0;
int cpu;
+ int ret = EINVAL;
if (!sysfs || sz < MIDR_SIZE)
return EINVAL;
@@ -44,22 +46,13 @@ static int _get_cpuid(char *buf, size_t sz, struct perf_cpu_map *cpus)
}
fclose(file);
- /* Ignore/clear Variant[23:20] and
- * Revision[3:0] of MIDR
- */
- midr = strtoul(buf, NULL, 16);
- midr &= (~(MIDR_VARIANT_MASK | MIDR_REVISION_MASK));
- scnprintf(buf, MIDR_SIZE, "0x%016lx", midr);
/* got midr break loop */
+ ret = 0;
break;
}
perf_cpu_map__put(cpus);
-
- if (!midr)
- return EINVAL;
-
- return 0;
+ return ret;
}
int get_cpuid(char *buf, size_t sz)
@@ -99,3 +92,47 @@ char *get_cpuid_str(struct perf_pmu *pmu)
return buf;
}
+
+/*
+ * Return 0 if idstr is a higher or equal to version of the same part as
+ * mapcpuid. Therefore, if mapcpuid has 0 for revision and variant then any
+ * version of idstr will match as long as it's the same CPU type.
+ *
+ * Return 1 if the CPU type is different or the version of idstr is lower.
+ */
+int strcmp_cpuid_str(const char *mapcpuid, const char *idstr)
+{
+ u64 map_id = strtoull(mapcpuid, NULL, 16);
+ char map_id_variant = FIELD_GET(MIDR_VARIANT_MASK, map_id);
+ char map_id_revision = FIELD_GET(MIDR_REVISION_MASK, map_id);
+ u64 id = strtoull(idstr, NULL, 16);
+ char id_variant = FIELD_GET(MIDR_VARIANT_MASK, id);
+ char id_revision = FIELD_GET(MIDR_REVISION_MASK, id);
+ u64 id_fields = ~(MIDR_VARIANT_MASK | MIDR_REVISION_MASK);
+
+ /* Compare without version first */
+ if ((map_id & id_fields) != (id & id_fields))
+ return 1;
+
+ /*
+ * ID matches, now compare version.
+ *
+ * Arm revisions (like r0p0) are compared here like two digit semver
+ * values eg. 1.3 < 2.0 < 2.1 < 2.2.
+ *
+ * r = high value = 'Variant' field in MIDR
+ * p = low value = 'Revision' field in MIDR
+ *
+ */
+ if (id_variant > map_id_variant)
+ return 0;
+
+ if (id_variant == map_id_variant && id_revision >= map_id_revision)
+ return 0;
+
+ /*
+ * variant is less than mapfile variant or variants are the same but
+ * the revision doesn't match. Return no match.
+ */
+ return 1;
+}
diff --git a/tools/perf/arch/arm64/util/machine.c b/tools/perf/arch/arm64/util/machine.c
index 235a0a1e1ec7..ba1144366e85 100644
--- a/tools/perf/arch/arm64/util/machine.c
+++ b/tools/perf/arch/arm64/util/machine.c
@@ -6,6 +6,7 @@
#include "debug.h"
#include "symbol.h"
#include "callchain.h"
+#include "perf_regs.h"
#include "record.h"
#include "util/perf_regs.h"
diff --git a/tools/perf/arch/arm64/util/mem-events.c b/tools/perf/arch/arm64/util/mem-events.c
index df817d1f9f3e..3bcc5c7035c2 100644
--- a/tools/perf/arch/arm64/util/mem-events.c
+++ b/tools/perf/arch/arm64/util/mem-events.c
@@ -20,7 +20,7 @@ struct perf_mem_event *perf_mem_events__ptr(int i)
return &perf_mem_events[i];
}
-char *perf_mem_events__name(int i, char *pmu_name __maybe_unused)
+const char *perf_mem_events__name(int i, const char *pmu_name __maybe_unused)
{
struct perf_mem_event *e = perf_mem_events__ptr(i);
diff --git a/tools/perf/arch/arm64/util/perf_regs.c b/tools/perf/arch/arm64/util/perf_regs.c
index 006692c9b040..1b79d8eab22f 100644
--- a/tools/perf/arch/arm64/util/perf_regs.c
+++ b/tools/perf/arch/arm64/util/perf_regs.c
@@ -6,6 +6,7 @@
#include <linux/kernel.h>
#include <linux/zalloc.h>
+#include "perf_regs.h"
#include "../../../perf-sys.h"
#include "../../../util/debug.h"
#include "../../../util/event.h"
@@ -139,6 +140,11 @@ int arch_sdt_arg_parse_op(char *old_op, char **new_op)
return SDT_ARG_VALID;
}
+uint64_t arch__intr_reg_mask(void)
+{
+ return PERF_REGS_MASK;
+}
+
uint64_t arch__user_reg_mask(void)
{
struct perf_event_attr attr = {
diff --git a/tools/perf/arch/arm64/util/pmu.c b/tools/perf/arch/arm64/util/pmu.c
index 512a8f13c4de..615084eb88d8 100644
--- a/tools/perf/arch/arm64/util/pmu.c
+++ b/tools/perf/arch/arm64/util/pmu.c
@@ -2,28 +2,12 @@
#include <internal/cpumap.h>
#include "../../../util/cpumap.h"
+#include "../../../util/header.h"
#include "../../../util/pmu.h"
#include "../../../util/pmus.h"
#include <api/fs/fs.h>
#include <math.h>
-static struct perf_pmu *pmu__find_core_pmu(void)
-{
- struct perf_pmu *pmu = NULL;
-
- while ((pmu = perf_pmus__scan_core(pmu))) {
- /*
- * The cpumap should cover all CPUs. Otherwise, some CPUs may
- * not support some events or have different event IDs.
- */
- if (RC_CHK_ACCESS(pmu->cpus)->nr != cpu__max_cpu().cpu)
- return NULL;
-
- return pmu;
- }
- return NULL;
-}
-
const struct pmu_metrics_table *pmu_metrics_table__find(void)
{
struct perf_pmu *pmu = pmu__find_core_pmu();
diff --git a/tools/perf/arch/arm64/util/unwind-libdw.c b/tools/perf/arch/arm64/util/unwind-libdw.c
index 09385081bb03..e056d50ab42e 100644
--- a/tools/perf/arch/arm64/util/unwind-libdw.c
+++ b/tools/perf/arch/arm64/util/unwind-libdw.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <elfutils/libdwfl.h>
+#include "perf_regs.h"
#include "../../../util/unwind-libdw.h"
#include "../../../util/perf_regs.h"
#include "../../../util/sample.h"
diff --git a/tools/perf/arch/csky/include/perf_regs.h b/tools/perf/arch/csky/include/perf_regs.h
index 1afcc0e916c2..076c7746c8a2 100644
--- a/tools/perf/arch/csky/include/perf_regs.h
+++ b/tools/perf/arch/csky/include/perf_regs.h
@@ -12,7 +12,4 @@
#define PERF_REGS_MAX PERF_REG_CSKY_MAX
#define PERF_SAMPLE_REGS_ABI PERF_SAMPLE_REGS_ABI_32
-#define PERF_REG_IP PERF_REG_CSKY_PC
-#define PERF_REG_SP PERF_REG_CSKY_SP
-
#endif /* ARCH_PERF_REGS_H */
diff --git a/tools/perf/arch/csky/util/perf_regs.c b/tools/perf/arch/csky/util/perf_regs.c
index 2864e2e3776d..c0877c264d49 100644
--- a/tools/perf/arch/csky/util/perf_regs.c
+++ b/tools/perf/arch/csky/util/perf_regs.c
@@ -1,6 +1,17 @@
// SPDX-License-Identifier: GPL-2.0
+#include "perf_regs.h"
#include "../../util/perf_regs.h"
const struct sample_reg sample_reg_masks[] = {
SMPL_REG_END
};
+
+uint64_t arch__intr_reg_mask(void)
+{
+ return PERF_REGS_MASK;
+}
+
+uint64_t arch__user_reg_mask(void)
+{
+ return PERF_REGS_MASK;
+}
diff --git a/tools/perf/arch/csky/util/unwind-libdw.c b/tools/perf/arch/csky/util/unwind-libdw.c
index 4bb4a06776e4..79df4374ab18 100644
--- a/tools/perf/arch/csky/util/unwind-libdw.c
+++ b/tools/perf/arch/csky/util/unwind-libdw.c
@@ -2,6 +2,7 @@
// Copyright (C) 2019 Hangzhou C-SKY Microsystems co.,ltd.
#include <elfutils/libdwfl.h>
+#include "perf_regs.h"
#include "../../util/unwind-libdw.h"
#include "../../util/perf_regs.h"
#include "../../util/event.h"
diff --git a/tools/perf/arch/loongarch/include/perf_regs.h b/tools/perf/arch/loongarch/include/perf_regs.h
index 7833c7dbd38d..45c799fa5330 100644
--- a/tools/perf/arch/loongarch/include/perf_regs.h
+++ b/tools/perf/arch/loongarch/include/perf_regs.h
@@ -7,8 +7,6 @@
#include <asm/perf_regs.h>
#define PERF_REGS_MAX PERF_REG_LOONGARCH_MAX
-#define PERF_REG_IP PERF_REG_LOONGARCH_PC
-#define PERF_REG_SP PERF_REG_LOONGARCH_R3
#define PERF_REGS_MASK ((1ULL << PERF_REG_LOONGARCH_MAX) - 1)
diff --git a/tools/perf/arch/loongarch/util/perf_regs.c b/tools/perf/arch/loongarch/util/perf_regs.c
index 2833e101a7c6..2c56e8b56ddf 100644
--- a/tools/perf/arch/loongarch/util/perf_regs.c
+++ b/tools/perf/arch/loongarch/util/perf_regs.c
@@ -1,6 +1,17 @@
// SPDX-License-Identifier: GPL-2.0
+#include "perf_regs.h"
#include "../../../util/perf_regs.h"
const struct sample_reg sample_reg_masks[] = {
SMPL_REG_END
};
+
+uint64_t arch__intr_reg_mask(void)
+{
+ return PERF_REGS_MASK;
+}
+
+uint64_t arch__user_reg_mask(void)
+{
+ return PERF_REGS_MASK;
+}
diff --git a/tools/perf/arch/loongarch/util/unwind-libdw.c b/tools/perf/arch/loongarch/util/unwind-libdw.c
index a9415385230a..7b3b9a4b21f8 100644
--- a/tools/perf/arch/loongarch/util/unwind-libdw.c
+++ b/tools/perf/arch/loongarch/util/unwind-libdw.c
@@ -2,6 +2,7 @@
/* Copyright (C) 2020-2023 Loongson Technology Corporation Limited */
#include <elfutils/libdwfl.h>
+#include "perf_regs.h"
#include "../../util/unwind-libdw.h"
#include "../../util/perf_regs.h"
#include "../../util/sample.h"
diff --git a/tools/perf/arch/mips/include/perf_regs.h b/tools/perf/arch/mips/include/perf_regs.h
index b8cd8bbb37ba..7082e91e0ed1 100644
--- a/tools/perf/arch/mips/include/perf_regs.h
+++ b/tools/perf/arch/mips/include/perf_regs.h
@@ -7,8 +7,6 @@
#include <asm/perf_regs.h>
#define PERF_REGS_MAX PERF_REG_MIPS_MAX
-#define PERF_REG_IP PERF_REG_MIPS_PC
-#define PERF_REG_SP PERF_REG_MIPS_R29
#define PERF_REGS_MASK ((1ULL << PERF_REG_MIPS_MAX) - 1)
diff --git a/tools/perf/arch/mips/util/perf_regs.c b/tools/perf/arch/mips/util/perf_regs.c
index 2864e2e3776d..c0877c264d49 100644
--- a/tools/perf/arch/mips/util/perf_regs.c
+++ b/tools/perf/arch/mips/util/perf_regs.c
@@ -1,6 +1,17 @@
// SPDX-License-Identifier: GPL-2.0
+#include "perf_regs.h"
#include "../../util/perf_regs.h"
const struct sample_reg sample_reg_masks[] = {
SMPL_REG_END
};
+
+uint64_t arch__intr_reg_mask(void)
+{
+ return PERF_REGS_MASK;
+}
+
+uint64_t arch__user_reg_mask(void)
+{
+ return PERF_REGS_MASK;
+}
diff --git a/tools/perf/arch/powerpc/include/perf_regs.h b/tools/perf/arch/powerpc/include/perf_regs.h
index 9bb17c3f370b..1c66f6ba6773 100644
--- a/tools/perf/arch/powerpc/include/perf_regs.h
+++ b/tools/perf/arch/powerpc/include/perf_regs.h
@@ -16,7 +16,4 @@ void perf_regs_load(u64 *regs);
#define PERF_SAMPLE_REGS_ABI PERF_SAMPLE_REGS_ABI_32
#endif
-#define PERF_REG_IP PERF_REG_POWERPC_NIP
-#define PERF_REG_SP PERF_REG_POWERPC_R1
-
#endif /* ARCH_PERF_REGS_H */
diff --git a/tools/perf/arch/powerpc/util/mem-events.c b/tools/perf/arch/powerpc/util/mem-events.c
index 4120fafe0be4..78b986e5268d 100644
--- a/tools/perf/arch/powerpc/util/mem-events.c
+++ b/tools/perf/arch/powerpc/util/mem-events.c
@@ -3,10 +3,10 @@
#include "mem-events.h"
/* PowerPC does not support 'ldlat' parameter. */
-char *perf_mem_events__name(int i, char *pmu_name __maybe_unused)
+const char *perf_mem_events__name(int i, const char *pmu_name __maybe_unused)
{
if (i == PERF_MEM_EVENTS__LOAD)
- return (char *) "cpu/mem-loads/";
+ return "cpu/mem-loads/";
- return (char *) "cpu/mem-stores/";
+ return "cpu/mem-stores/";
}
diff --git a/tools/perf/arch/powerpc/util/perf_regs.c b/tools/perf/arch/powerpc/util/perf_regs.c
index 8d07a78e742a..b38aa056eea0 100644
--- a/tools/perf/arch/powerpc/util/perf_regs.c
+++ b/tools/perf/arch/powerpc/util/perf_regs.c
@@ -4,6 +4,7 @@
#include <regex.h>
#include <linux/zalloc.h>
+#include "perf_regs.h"
#include "../../../util/perf_regs.h"
#include "../../../util/debug.h"
#include "../../../util/event.h"
@@ -226,3 +227,8 @@ uint64_t arch__intr_reg_mask(void)
}
return mask;
}
+
+uint64_t arch__user_reg_mask(void)
+{
+ return PERF_REGS_MASK;
+}
diff --git a/tools/perf/arch/powerpc/util/unwind-libdw.c b/tools/perf/arch/powerpc/util/unwind-libdw.c
index e616642c754c..e9a5a8bb67d9 100644
--- a/tools/perf/arch/powerpc/util/unwind-libdw.c
+++ b/tools/perf/arch/powerpc/util/unwind-libdw.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <elfutils/libdwfl.h>
#include <linux/kernel.h>
+#include "perf_regs.h"
#include "../../../util/unwind-libdw.h"
#include "../../../util/perf_regs.h"
#include "../../../util/sample.h"
diff --git a/tools/perf/arch/riscv/include/perf_regs.h b/tools/perf/arch/riscv/include/perf_regs.h
index 6944bf0de53e..d482edb413e5 100644
--- a/tools/perf/arch/riscv/include/perf_regs.h
+++ b/tools/perf/arch/riscv/include/perf_regs.h
@@ -16,7 +16,4 @@
#define PERF_SAMPLE_REGS_ABI PERF_SAMPLE_REGS_ABI_32
#endif
-#define PERF_REG_IP PERF_REG_RISCV_PC
-#define PERF_REG_SP PERF_REG_RISCV_SP
-
#endif /* ARCH_PERF_REGS_H */
diff --git a/tools/perf/arch/riscv/util/perf_regs.c b/tools/perf/arch/riscv/util/perf_regs.c
index 2864e2e3776d..c0877c264d49 100644
--- a/tools/perf/arch/riscv/util/perf_regs.c
+++ b/tools/perf/arch/riscv/util/perf_regs.c
@@ -1,6 +1,17 @@
// SPDX-License-Identifier: GPL-2.0
+#include "perf_regs.h"
#include "../../util/perf_regs.h"
const struct sample_reg sample_reg_masks[] = {
SMPL_REG_END
};
+
+uint64_t arch__intr_reg_mask(void)
+{
+ return PERF_REGS_MASK;
+}
+
+uint64_t arch__user_reg_mask(void)
+{
+ return PERF_REGS_MASK;
+}
diff --git a/tools/perf/arch/riscv/util/unwind-libdw.c b/tools/perf/arch/riscv/util/unwind-libdw.c
index 54a198714eb8..5c98010d8b59 100644
--- a/tools/perf/arch/riscv/util/unwind-libdw.c
+++ b/tools/perf/arch/riscv/util/unwind-libdw.c
@@ -2,6 +2,7 @@
/* Copyright (C) 2019 Hangzhou C-SKY Microsystems co.,ltd. */
#include <elfutils/libdwfl.h>
+#include "perf_regs.h"
#include "../../util/unwind-libdw.h"
#include "../../util/perf_regs.h"
#include "../../util/sample.h"
diff --git a/tools/perf/arch/s390/include/perf_regs.h b/tools/perf/arch/s390/include/perf_regs.h
index 52fcc0891da6..130dfad2b96a 100644
--- a/tools/perf/arch/s390/include/perf_regs.h
+++ b/tools/perf/arch/s390/include/perf_regs.h
@@ -11,7 +11,4 @@ void perf_regs_load(u64 *regs);
#define PERF_REGS_MAX PERF_REG_S390_MAX
#define PERF_SAMPLE_REGS_ABI PERF_SAMPLE_REGS_ABI_64
-#define PERF_REG_IP PERF_REG_S390_PC
-#define PERF_REG_SP PERF_REG_S390_R15
-
#endif /* ARCH_PERF_REGS_H */
diff --git a/tools/perf/arch/s390/util/perf_regs.c b/tools/perf/arch/s390/util/perf_regs.c
index 2864e2e3776d..c0877c264d49 100644
--- a/tools/perf/arch/s390/util/perf_regs.c
+++ b/tools/perf/arch/s390/util/perf_regs.c
@@ -1,6 +1,17 @@
// SPDX-License-Identifier: GPL-2.0
+#include "perf_regs.h"
#include "../../util/perf_regs.h"
const struct sample_reg sample_reg_masks[] = {
SMPL_REG_END
};
+
+uint64_t arch__intr_reg_mask(void)
+{
+ return PERF_REGS_MASK;
+}
+
+uint64_t arch__user_reg_mask(void)
+{
+ return PERF_REGS_MASK;
+}
diff --git a/tools/perf/arch/s390/util/unwind-libdw.c b/tools/perf/arch/s390/util/unwind-libdw.c
index 7d92452d5287..f50fb6dbb35c 100644
--- a/tools/perf/arch/s390/util/unwind-libdw.c
+++ b/tools/perf/arch/s390/util/unwind-libdw.c
@@ -5,6 +5,7 @@
#include "../../util/event.h"
#include "../../util/sample.h"
#include "dwarf-regs-table.h"
+#include "perf_regs.h"
bool libdw__arch_set_initial_registers(Dwfl_Thread *thread, void *arg)
diff --git a/tools/perf/arch/x86/entry/syscalls/syscalltbl.sh b/tools/perf/arch/x86/entry/syscalls/syscalltbl.sh
index fa526a993845..59d7914ed6bb 100755
--- a/tools/perf/arch/x86/entry/syscalls/syscalltbl.sh
+++ b/tools/perf/arch/x86/entry/syscalls/syscalltbl.sh
@@ -24,7 +24,7 @@ sorted_table=$(mktemp /tmp/syscalltbl.XXXXXX)
grep '^[0-9]' "$in" | sort -n > $sorted_table
max_nr=0
-while read nr abi name entry compat; do
+while read nr _abi name entry _compat; do
if [ $nr -ge 512 ] ; then # discard compat sycalls
break
fi
diff --git a/tools/perf/arch/x86/include/perf_regs.h b/tools/perf/arch/x86/include/perf_regs.h
index 16e23b722042..f209ce2c1dd9 100644
--- a/tools/perf/arch/x86/include/perf_regs.h
+++ b/tools/perf/arch/x86/include/perf_regs.h
@@ -20,7 +20,5 @@ void perf_regs_load(u64 *regs);
#define PERF_REGS_MASK (((1ULL << PERF_REG_X86_64_MAX) - 1) & ~REG_NOSUPPORT)
#define PERF_SAMPLE_REGS_ABI PERF_SAMPLE_REGS_ABI_64
#endif
-#define PERF_REG_IP PERF_REG_X86_IP
-#define PERF_REG_SP PERF_REG_X86_SP
#endif /* ARCH_PERF_REGS_H */
diff --git a/tools/perf/arch/x86/util/evlist.c b/tools/perf/arch/x86/util/evlist.c
index cbd582182932..b1ce0c52d88d 100644
--- a/tools/perf/arch/x86/util/evlist.c
+++ b/tools/perf/arch/x86/util/evlist.c
@@ -75,11 +75,12 @@ int arch_evlist__add_default_attrs(struct evlist *evlist,
int arch_evlist__cmp(const struct evsel *lhs, const struct evsel *rhs)
{
- if (topdown_sys_has_perf_metrics() && evsel__sys_has_perf_metrics(lhs)) {
+ if (topdown_sys_has_perf_metrics() &&
+ (arch_evsel__must_be_in_group(lhs) || arch_evsel__must_be_in_group(rhs))) {
/* Ensure the topdown slots comes first. */
- if (strcasestr(lhs->name, "slots"))
+ if (strcasestr(lhs->name, "slots") && !strcasestr(lhs->name, "uops_retired.slots"))
return -1;
- if (strcasestr(rhs->name, "slots"))
+ if (strcasestr(rhs->name, "slots") && !strcasestr(rhs->name, "uops_retired.slots"))
return 1;
/* Followed by topdown events. */
if (strcasestr(lhs->name, "topdown") && !strcasestr(rhs->name, "topdown"))
diff --git a/tools/perf/arch/x86/util/evsel.c b/tools/perf/arch/x86/util/evsel.c
index 81d22657922a..090d0f371891 100644
--- a/tools/perf/arch/x86/util/evsel.c
+++ b/tools/perf/arch/x86/util/evsel.c
@@ -40,12 +40,11 @@ bool evsel__sys_has_perf_metrics(const struct evsel *evsel)
bool arch_evsel__must_be_in_group(const struct evsel *evsel)
{
- if (!evsel__sys_has_perf_metrics(evsel))
+ if (!evsel__sys_has_perf_metrics(evsel) || !evsel->name ||
+ strcasestr(evsel->name, "uops_retired.slots"))
return false;
- return evsel->name &&
- (strcasestr(evsel->name, "slots") ||
- strcasestr(evsel->name, "topdown"));
+ return strcasestr(evsel->name, "topdown") || strcasestr(evsel->name, "slots");
}
int arch_evsel__hw_name(struct evsel *evsel, char *bf, size_t size)
diff --git a/tools/perf/arch/x86/util/intel-pt.c b/tools/perf/arch/x86/util/intel-pt.c
index 74b70fd379df..31807791589e 100644
--- a/tools/perf/arch/x86/util/intel-pt.c
+++ b/tools/perf/arch/x86/util/intel-pt.c
@@ -60,8 +60,7 @@ struct intel_pt_recording {
size_t priv_size;
};
-static int intel_pt_parse_terms_with_default(const char *pmu_name,
- struct list_head *formats,
+static int intel_pt_parse_terms_with_default(struct perf_pmu *pmu,
const char *str,
u64 *config)
{
@@ -75,13 +74,12 @@ static int intel_pt_parse_terms_with_default(const char *pmu_name,
INIT_LIST_HEAD(terms);
- err = parse_events_terms(terms, str);
+ err = parse_events_terms(terms, str, /*input=*/ NULL);
if (err)
goto out_free;
attr.config = *config;
- err = perf_pmu__config_terms(pmu_name, formats, &attr, terms, true,
- NULL);
+ err = perf_pmu__config_terms(pmu, &attr, terms, /*zero=*/true, /*err=*/NULL);
if (err)
goto out_free;
@@ -91,12 +89,10 @@ out_free:
return err;
}
-static int intel_pt_parse_terms(const char *pmu_name, struct list_head *formats,
- const char *str, u64 *config)
+static int intel_pt_parse_terms(struct perf_pmu *pmu, const char *str, u64 *config)
{
*config = 0;
- return intel_pt_parse_terms_with_default(pmu_name, formats, str,
- config);
+ return intel_pt_parse_terms_with_default(pmu, str, config);
}
static u64 intel_pt_masked_bits(u64 mask, u64 bits)
@@ -126,7 +122,7 @@ static int intel_pt_read_config(struct perf_pmu *intel_pt_pmu, const char *str,
*res = 0;
- mask = perf_pmu__format_bits(&intel_pt_pmu->format, str);
+ mask = perf_pmu__format_bits(intel_pt_pmu, str);
if (!mask)
return -EINVAL;
@@ -236,8 +232,7 @@ static u64 intel_pt_default_config(struct perf_pmu *intel_pt_pmu)
pr_debug2("%s default config: %s\n", intel_pt_pmu->name, buf);
- intel_pt_parse_terms(intel_pt_pmu->name, &intel_pt_pmu->format, buf,
- &config);
+ intel_pt_parse_terms(intel_pt_pmu, buf, &config);
close(dirfd);
return config;
@@ -348,16 +343,11 @@ static int intel_pt_info_fill(struct auxtrace_record *itr,
if (priv_size != ptr->priv_size)
return -EINVAL;
- intel_pt_parse_terms(intel_pt_pmu->name, &intel_pt_pmu->format,
- "tsc", &tsc_bit);
- intel_pt_parse_terms(intel_pt_pmu->name, &intel_pt_pmu->format,
- "noretcomp", &noretcomp_bit);
- intel_pt_parse_terms(intel_pt_pmu->name, &intel_pt_pmu->format,
- "mtc", &mtc_bit);
- mtc_freq_bits = perf_pmu__format_bits(&intel_pt_pmu->format,
- "mtc_period");
- intel_pt_parse_terms(intel_pt_pmu->name, &intel_pt_pmu->format,
- "cyc", &cyc_bit);
+ intel_pt_parse_terms(intel_pt_pmu, "tsc", &tsc_bit);
+ intel_pt_parse_terms(intel_pt_pmu, "noretcomp", &noretcomp_bit);
+ intel_pt_parse_terms(intel_pt_pmu, "mtc", &mtc_bit);
+ mtc_freq_bits = perf_pmu__format_bits(intel_pt_pmu, "mtc_period");
+ intel_pt_parse_terms(intel_pt_pmu, "cyc", &cyc_bit);
intel_pt_tsc_ctc_ratio(&tsc_ctc_ratio_n, &tsc_ctc_ratio_d);
@@ -511,7 +501,7 @@ static int intel_pt_val_config_term(struct perf_pmu *intel_pt_pmu, int dirfd,
valid |= 1;
- bits = perf_pmu__format_bits(&intel_pt_pmu->format, name);
+ bits = perf_pmu__format_bits(intel_pt_pmu, name);
config &= bits;
@@ -781,8 +771,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
intel_pt_evsel->core.attr.aux_watermark = aux_watermark;
}
- intel_pt_parse_terms(intel_pt_pmu->name, &intel_pt_pmu->format,
- "tsc", &tsc_bit);
+ intel_pt_parse_terms(intel_pt_pmu, "tsc", &tsc_bit);
if (opts->full_auxtrace && (intel_pt_evsel->core.attr.config & tsc_bit))
have_timing_info = true;
diff --git a/tools/perf/arch/x86/util/mem-events.c b/tools/perf/arch/x86/util/mem-events.c
index a8a782bcb121..191b372f9a2d 100644
--- a/tools/perf/arch/x86/util/mem-events.c
+++ b/tools/perf/arch/x86/util/mem-events.c
@@ -52,7 +52,7 @@ bool is_mem_loads_aux_event(struct evsel *leader)
return leader->core.attr.config == MEM_LOADS_AUX;
}
-char *perf_mem_events__name(int i, char *pmu_name)
+const char *perf_mem_events__name(int i, const char *pmu_name)
{
struct perf_mem_event *e = perf_mem_events__ptr(i);
@@ -65,7 +65,7 @@ char *perf_mem_events__name(int i, char *pmu_name)
if (!pmu_name) {
mem_loads_name__init = true;
- pmu_name = (char *)"cpu";
+ pmu_name = "cpu";
}
if (perf_pmus__have_event(pmu_name, "mem-loads-aux")) {
@@ -82,12 +82,12 @@ char *perf_mem_events__name(int i, char *pmu_name)
if (i == PERF_MEM_EVENTS__STORE) {
if (!pmu_name)
- pmu_name = (char *)"cpu";
+ pmu_name = "cpu";
scnprintf(mem_stores_name, sizeof(mem_stores_name),
e->name, pmu_name);
return mem_stores_name;
}
- return (char *)e->name;
+ return e->name;
}
diff --git a/tools/perf/arch/x86/util/perf_regs.c b/tools/perf/arch/x86/util/perf_regs.c
index 8ad4112ad10c..b813502a2727 100644
--- a/tools/perf/arch/x86/util/perf_regs.c
+++ b/tools/perf/arch/x86/util/perf_regs.c
@@ -5,6 +5,7 @@
#include <linux/kernel.h>
#include <linux/zalloc.h>
+#include "perf_regs.h"
#include "../../../perf-sys.h"
#include "../../../util/perf_regs.h"
#include "../../../util/debug.h"
@@ -317,3 +318,8 @@ uint64_t arch__intr_reg_mask(void)
return PERF_REGS_MASK;
}
+
+uint64_t arch__user_reg_mask(void)
+{
+ return PERF_REGS_MASK;
+}
diff --git a/tools/perf/arch/x86/util/pmu.c b/tools/perf/arch/x86/util/pmu.c
index 65d8cdff4d5f..f428cffb0378 100644
--- a/tools/perf/arch/x86/util/pmu.c
+++ b/tools/perf/arch/x86/util/pmu.c
@@ -126,7 +126,7 @@ close_dir:
return ret;
}
-static char *__pmu_find_real_name(const char *name)
+static const char *__pmu_find_real_name(const char *name)
{
struct pmu_alias *pmu_alias;
@@ -135,10 +135,10 @@ static char *__pmu_find_real_name(const char *name)
return pmu_alias->name;
}
- return (char *)name;
+ return name;
}
-char *pmu_find_real_name(const char *name)
+const char *pmu_find_real_name(const char *name)
{
if (cached_list)
return __pmu_find_real_name(name);
@@ -149,7 +149,7 @@ char *pmu_find_real_name(const char *name)
return __pmu_find_real_name(name);
}
-static char *__pmu_find_alias_name(const char *name)
+static const char *__pmu_find_alias_name(const char *name)
{
struct pmu_alias *pmu_alias;
@@ -160,7 +160,7 @@ static char *__pmu_find_alias_name(const char *name)
return NULL;
}
-char *pmu_find_alias_name(const char *name)
+const char *pmu_find_alias_name(const char *name)
{
if (cached_list)
return __pmu_find_alias_name(name);
diff --git a/tools/perf/arch/x86/util/unwind-libdw.c b/tools/perf/arch/x86/util/unwind-libdw.c
index ef71e8bf80bf..edb77e20e083 100644
--- a/tools/perf/arch/x86/util/unwind-libdw.c
+++ b/tools/perf/arch/x86/util/unwind-libdw.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <elfutils/libdwfl.h>
+#include "perf_regs.h"
#include "../../../util/unwind-libdw.h"
#include "../../../util/perf_regs.h"
#include "util/sample.h"
diff --git a/tools/perf/bench/Build b/tools/perf/bench/Build
index 07bbc449329e..c2ab30907ae7 100644
--- a/tools/perf/bench/Build
+++ b/tools/perf/bench/Build
@@ -17,6 +17,7 @@ perf-y += inject-buildid.o
perf-y += evlist-open-close.o
perf-y += breakpoint.o
perf-y += pmu-scan.o
+perf-y += uprobe.o
perf-$(CONFIG_X86_64) += mem-memcpy-x86-64-asm.o
perf-$(CONFIG_X86_64) += mem-memset-x86-64-asm.o
diff --git a/tools/perf/bench/bench.h b/tools/perf/bench/bench.h
index a0625c77bea3..faa18e6d2467 100644
--- a/tools/perf/bench/bench.h
+++ b/tools/perf/bench/bench.h
@@ -43,6 +43,9 @@ int bench_inject_build_id(int argc, const char **argv);
int bench_evlist_open_close(int argc, const char **argv);
int bench_breakpoint_thread(int argc, const char **argv);
int bench_breakpoint_enable(int argc, const char **argv);
+int bench_uprobe_baseline(int argc, const char **argv);
+int bench_uprobe_empty(int argc, const char **argv);
+int bench_uprobe_trace_printk(int argc, const char **argv);
int bench_pmu_scan(int argc, const char **argv);
#define BENCH_FORMAT_DEFAULT_STR "default"
diff --git a/tools/perf/bench/breakpoint.c b/tools/perf/bench/breakpoint.c
index 41385f89ffc7..dfd18f5db97d 100644
--- a/tools/perf/bench/breakpoint.c
+++ b/tools/perf/bench/breakpoint.c
@@ -47,6 +47,7 @@ struct breakpoint {
static int breakpoint_setup(void *addr)
{
struct perf_event_attr attr = { .size = 0, };
+ int fd;
attr.type = PERF_TYPE_BREAKPOINT;
attr.size = sizeof(attr);
@@ -56,7 +57,12 @@ static int breakpoint_setup(void *addr)
attr.bp_addr = (unsigned long)addr;
attr.bp_type = HW_BREAKPOINT_RW;
attr.bp_len = HW_BREAKPOINT_LEN_1;
- return syscall(SYS_perf_event_open, &attr, 0, -1, -1, 0);
+ fd = syscall(SYS_perf_event_open, &attr, 0, -1, -1, 0);
+
+ if (fd < 0)
+ fd = -errno;
+
+ return fd;
}
static void *passive_thread(void *arg)
@@ -122,8 +128,14 @@ int bench_breakpoint_thread(int argc, const char **argv)
for (i = 0; i < thread_params.nbreakpoints; i++) {
breakpoints[i].fd = breakpoint_setup(&breakpoints[i].watched);
- if (breakpoints[i].fd == -1)
+
+ if (breakpoints[i].fd < 0) {
+ if (breakpoints[i].fd == -ENODEV) {
+ printf("Skipping perf bench breakpoint thread: No hardware support\n");
+ return 0;
+ }
exit((perror("perf_event_open"), EXIT_FAILURE));
+ }
}
gettimeofday(&start, NULL);
for (i = 0; i < thread_params.nparallel; i++) {
@@ -196,8 +208,14 @@ int bench_breakpoint_enable(int argc, const char **argv)
exit(EXIT_FAILURE);
}
fd = breakpoint_setup(&watched);
- if (fd == -1)
+
+ if (fd < 0) {
+ if (fd == -ENODEV) {
+ printf("Skipping perf bench breakpoint enable: No hardware support\n");
+ return 0;
+ }
exit((perror("perf_event_open"), EXIT_FAILURE));
+ }
nthreads = enable_params.npassive + enable_params.nactive;
threads = calloc(nthreads, sizeof(threads[0]));
if (!threads)
diff --git a/tools/perf/bench/pmu-scan.c b/tools/perf/bench/pmu-scan.c
index c7d207f8e13c..9e4d36486f62 100644
--- a/tools/perf/bench/pmu-scan.c
+++ b/tools/perf/bench/pmu-scan.c
@@ -57,9 +57,7 @@ static int save_result(void)
r->is_core = pmu->is_core;
r->nr_caps = pmu->nr_caps;
- r->nr_aliases = 0;
- list_for_each(list, &pmu->aliases)
- r->nr_aliases++;
+ r->nr_aliases = perf_pmu__num_events(pmu);
r->nr_formats = 0;
list_for_each(list, &pmu->format)
@@ -98,9 +96,7 @@ static int check_result(bool core_only)
return -1;
}
- nr = 0;
- list_for_each(list, &pmu->aliases)
- nr++;
+ nr = perf_pmu__num_events(pmu);
if (nr != r->nr_aliases) {
pr_err("Unmatched number of event aliases in %s: expect %d vs got %d\n",
pmu->name, r->nr_aliases, nr);
diff --git a/tools/perf/bench/uprobe.c b/tools/perf/bench/uprobe.c
new file mode 100644
index 000000000000..914c0817fe8a
--- /dev/null
+++ b/tools/perf/bench/uprobe.c
@@ -0,0 +1,198 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/*
+ * uprobe.c
+ *
+ * uprobe benchmarks
+ *
+ * Copyright (C) 2023, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
+ */
+#include "../perf.h"
+#include "../util/util.h"
+#include <subcmd/parse-options.h>
+#include "../builtin.h"
+#include "bench.h"
+#include <linux/compiler.h>
+#include <linux/time64.h>
+
+#include <inttypes.h>
+#include <stdio.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <time.h>
+#include <unistd.h>
+#include <stdlib.h>
+
+#define LOOPS_DEFAULT 1000
+static int loops = LOOPS_DEFAULT;
+
+enum bench_uprobe {
+ BENCH_UPROBE__BASELINE,
+ BENCH_UPROBE__EMPTY,
+ BENCH_UPROBE__TRACE_PRINTK,
+};
+
+static const struct option options[] = {
+ OPT_INTEGER('l', "loop", &loops, "Specify number of loops"),
+ OPT_END()
+};
+
+static const char * const bench_uprobe_usage[] = {
+ "perf bench uprobe <options>",
+ NULL
+};
+
+#ifdef HAVE_BPF_SKEL
+#include "bpf_skel/bench_uprobe.skel.h"
+
+#define bench_uprobe__attach_uprobe(prog) \
+ skel->links.prog = bpf_program__attach_uprobe_opts(/*prog=*/skel->progs.prog, \
+ /*pid=*/-1, \
+ /*binary_path=*/"/lib64/libc.so.6", \
+ /*func_offset=*/0, \
+ /*opts=*/&uprobe_opts); \
+ if (!skel->links.prog) { \
+ err = -errno; \
+ fprintf(stderr, "Failed to attach bench uprobe \"%s\": %s\n", #prog, strerror(errno)); \
+ goto cleanup; \
+ }
+
+struct bench_uprobe_bpf *skel;
+
+static int bench_uprobe__setup_bpf_skel(enum bench_uprobe bench)
+{
+ DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, uprobe_opts);
+ int err;
+
+ /* Load and verify BPF application */
+ skel = bench_uprobe_bpf__open();
+ if (!skel) {
+ fprintf(stderr, "Failed to open and load uprobes bench BPF skeleton\n");
+ return -1;
+ }
+
+ err = bench_uprobe_bpf__load(skel);
+ if (err) {
+ fprintf(stderr, "Failed to load and verify BPF skeleton\n");
+ goto cleanup;
+ }
+
+ uprobe_opts.func_name = "usleep";
+ switch (bench) {
+ case BENCH_UPROBE__BASELINE: break;
+ case BENCH_UPROBE__EMPTY: bench_uprobe__attach_uprobe(empty); break;
+ case BENCH_UPROBE__TRACE_PRINTK: bench_uprobe__attach_uprobe(trace_printk); break;
+ default:
+ fprintf(stderr, "Invalid bench: %d\n", bench);
+ goto cleanup;
+ }
+
+ return err;
+cleanup:
+ bench_uprobe_bpf__destroy(skel);
+ return err;
+}
+
+static void bench_uprobe__teardown_bpf_skel(void)
+{
+ if (skel) {
+ bench_uprobe_bpf__destroy(skel);
+ skel = NULL;
+ }
+}
+#else
+static int bench_uprobe__setup_bpf_skel(enum bench_uprobe bench __maybe_unused) { return 0; }
+static void bench_uprobe__teardown_bpf_skel(void) {};
+#endif
+
+static int bench_uprobe_format__default_fprintf(const char *name, const char *unit, u64 diff, FILE *fp)
+{
+ static u64 baseline, previous;
+ s64 diff_to_baseline = diff - baseline,
+ diff_to_previous = diff - previous;
+ int printed = fprintf(fp, "# Executed %'d %s calls\n", loops, name);
+
+ printed += fprintf(fp, " %14s: %'" PRIu64 " %ss", "Total time", diff, unit);
+
+ if (baseline) {
+ printed += fprintf(fp, " %s%'" PRId64 " to baseline", diff_to_baseline > 0 ? "+" : "", diff_to_baseline);
+
+ if (previous != baseline)
+ fprintf(stdout, " %s%'" PRId64 " to previous", diff_to_previous > 0 ? "+" : "", diff_to_previous);
+ }
+
+ printed += fprintf(fp, "\n\n %'.3f %ss/op", (double)diff / (double)loops, unit);
+
+ if (baseline) {
+ printed += fprintf(fp, " %'.3f %ss/op to baseline", (double)diff_to_baseline / (double)loops, unit);
+
+ if (previous != baseline)
+ printed += fprintf(fp, " %'.3f %ss/op to previous", (double)diff_to_previous / (double)loops, unit);
+ } else {
+ baseline = diff;
+ }
+
+ fputc('\n', fp);
+
+ previous = diff;
+
+ return printed + 1;
+}
+
+static int bench_uprobe(int argc, const char **argv, enum bench_uprobe bench)
+{
+ const char *name = "usleep(1000)", *unit = "usec";
+ struct timespec start, end;
+ u64 diff;
+ int i;
+
+ argc = parse_options(argc, argv, options, bench_uprobe_usage, 0);
+
+ if (bench != BENCH_UPROBE__BASELINE && bench_uprobe__setup_bpf_skel(bench) < 0)
+ return 0;
+
+ clock_gettime(CLOCK_REALTIME, &start);
+
+ for (i = 0; i < loops; i++) {
+ usleep(USEC_PER_MSEC);
+ }
+
+ clock_gettime(CLOCK_REALTIME, &end);
+
+ diff = end.tv_sec * NSEC_PER_SEC + end.tv_nsec - (start.tv_sec * NSEC_PER_SEC + start.tv_nsec);
+ diff /= NSEC_PER_USEC;
+
+ switch (bench_format) {
+ case BENCH_FORMAT_DEFAULT:
+ bench_uprobe_format__default_fprintf(name, unit, diff, stdout);
+ break;
+
+ case BENCH_FORMAT_SIMPLE:
+ printf("%" PRIu64 "\n", diff);
+ break;
+
+ default:
+ /* reaching here is something of a disaster */
+ fprintf(stderr, "Unknown format:%d\n", bench_format);
+ exit(1);
+ }
+
+ if (bench != BENCH_UPROBE__BASELINE)
+ bench_uprobe__teardown_bpf_skel();
+
+ return 0;
+}
+
+int bench_uprobe_baseline(int argc, const char **argv)
+{
+ return bench_uprobe(argc, argv, BENCH_UPROBE__BASELINE);
+}
+
+int bench_uprobe_empty(int argc, const char **argv)
+{
+ return bench_uprobe(argc, argv, BENCH_UPROBE__EMPTY);
+}
+
+int bench_uprobe_trace_printk(int argc, const char **argv)
+{
+ return bench_uprobe(argc, argv, BENCH_UPROBE__TRACE_PRINTK);
+}
diff --git a/tools/perf/builtin-bench.c b/tools/perf/builtin-bench.c
index 5033e8bab276..1a8898d5b560 100644
--- a/tools/perf/builtin-bench.c
+++ b/tools/perf/builtin-bench.c
@@ -105,6 +105,13 @@ static struct bench breakpoint_benchmarks[] = {
{ NULL, NULL, NULL },
};
+static struct bench uprobe_benchmarks[] = {
+ { "baseline", "Baseline libc usleep(1000) call", bench_uprobe_baseline, },
+ { "empty", "Attach empty BPF prog to uprobe on usleep, system wide", bench_uprobe_empty, },
+ { "trace_printk", "Attach trace_printk BPF prog to uprobe on usleep syswide", bench_uprobe_trace_printk, },
+ { NULL, NULL, NULL },
+};
+
struct collection {
const char *name;
const char *summary;
@@ -124,6 +131,7 @@ static struct collection collections[] = {
#endif
{ "internals", "Perf-internals benchmarks", internals_benchmarks },
{ "breakpoint", "Breakpoint benchmarks", breakpoint_benchmarks },
+ { "uprobe", "uprobe benchmarks", uprobe_benchmarks },
{ "all", "All benchmarks", NULL },
{ NULL, NULL, NULL }
};
diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c
index e8a1b16aa5f8..57d300d8e570 100644
--- a/tools/perf/builtin-diff.c
+++ b/tools/perf/builtin-diff.c
@@ -1915,8 +1915,8 @@ static int data_init(int argc, const char **argv)
struct perf_data *data = &d->data;
data->path = use_default ? defaults[i] : argv[i];
- data->mode = PERF_DATA_MODE_READ,
- data->force = force,
+ data->mode = PERF_DATA_MODE_READ;
+ data->force = force;
d->idx = i;
}
diff --git a/tools/perf/builtin-list.c b/tools/perf/builtin-list.c
index 7fec2cca759f..a343823c8ddf 100644
--- a/tools/perf/builtin-list.c
+++ b/tools/perf/builtin-list.c
@@ -145,9 +145,20 @@ static void default_print_event(void *ps, const char *pmu_name, const char *topi
putchar('\n');
if (desc && print_state->desc) {
+ char *desc_with_unit = NULL;
+ int desc_len = -1;
+
+ if (pmu_name && strcmp(pmu_name, "default_core")) {
+ desc_len = strlen(desc);
+ desc_len = asprintf(&desc_with_unit,
+ desc[desc_len - 1] != '.'
+ ? "%s. Unit: %s" : "%s Unit: %s",
+ desc, pmu_name);
+ }
printf("%*s", 8, "[");
- wordwrap(desc, 8, pager_get_columns(), 0);
+ wordwrap(desc_len > 0 ? desc_with_unit : desc, 8, pager_get_columns(), 0);
printf("]\n");
+ free(desc_with_unit);
}
long_desc = long_desc ?: desc;
if (long_desc && print_state->long_desc) {
@@ -423,6 +434,13 @@ static void json_print_metric(void *ps __maybe_unused, const char *group,
strbuf_release(&buf);
}
+static bool default_skip_duplicate_pmus(void *ps)
+{
+ struct print_state *print_state = ps;
+
+ return !print_state->long_desc;
+}
+
int cmd_list(int argc, const char **argv)
{
int i, ret = 0;
@@ -434,6 +452,7 @@ int cmd_list(int argc, const char **argv)
.print_end = default_print_end,
.print_event = default_print_event,
.print_metric = default_print_metric,
+ .skip_duplicate_pmus = default_skip_duplicate_pmus,
};
const char *cputype = NULL;
const char *unit_name = NULL;
@@ -502,7 +521,7 @@ int cmd_list(int argc, const char **argv)
ret = -1;
goto out;
}
- default_ps.pmu_glob = pmu->name;
+ default_ps.pmu_glob = strdup(pmu->name);
}
}
print_cb.print_start(ps);
diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c
index c15386cb1033..b141f2134274 100644
--- a/tools/perf/builtin-lock.c
+++ b/tools/perf/builtin-lock.c
@@ -2052,6 +2052,7 @@ static int __cmd_contention(int argc, const char **argv)
if (IS_ERR(session)) {
pr_err("Initializing perf session failed\n");
err = PTR_ERR(session);
+ session = NULL;
goto out_delete;
}
@@ -2506,7 +2507,7 @@ int cmd_lock(int argc, const char **argv)
OPT_CALLBACK('M', "map-nr-entries", &bpf_map_entries, "num",
"Max number of BPF map entries", parse_map_entry),
OPT_CALLBACK(0, "max-stack", &max_stack_depth, "num",
- "Set the maximum stack depth when collecting lopck contention, "
+ "Set the maximum stack depth when collecting lock contention, "
"Default: " __stringify(CONTENTION_STACK_DEPTH), parse_max_stack),
OPT_INTEGER(0, "stack-skip", &stack_skip,
"Set the number of stack depth to skip when finding a lock caller, "
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index aec18db7ff23..34bb31f08bb5 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -37,8 +37,6 @@
#include "util/parse-branch-options.h"
#include "util/parse-regs-options.h"
#include "util/perf_api_probe.h"
-#include "util/llvm-utils.h"
-#include "util/bpf-loader.h"
#include "util/trigger.h"
#include "util/perf-hooks.h"
#include "util/cpu-set-sched.h"
@@ -2465,16 +2463,6 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
}
}
- err = bpf__apply_obj_config();
- if (err) {
- char errbuf[BUFSIZ];
-
- bpf__strerror_apply_obj_config(err, errbuf, sizeof(errbuf));
- pr_err("ERROR: Apply config to BPF failed: %s\n",
- errbuf);
- goto out_free_threads;
- }
-
/*
* Normally perf_session__new would do this, but it doesn't have the
* evlist.
@@ -3486,10 +3474,6 @@ static struct option __record_options[] = {
"collect kernel callchains"),
OPT_BOOLEAN(0, "user-callchains", &record.opts.user_callchains,
"collect user callchains"),
- OPT_STRING(0, "clang-path", &llvm_param.clang_path, "clang path",
- "clang binary to use for compiling BPF scriptlets"),
- OPT_STRING(0, "clang-opt", &llvm_param.clang_opt, "clang options",
- "options passed to clang when compiling BPF scriptlets"),
OPT_STRING(0, "vmlinux", &symbol_conf.vmlinux_name,
"file", "vmlinux pathname"),
OPT_BOOLEAN(0, "buildid-all", &record.buildid_all,
@@ -3967,27 +3951,6 @@ int cmd_record(int argc, const char **argv)
setlocale(LC_ALL, "");
-#ifndef HAVE_LIBBPF_SUPPORT
-# define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, "NO_LIBBPF=1", c)
- set_nobuild('\0', "clang-path", true);
- set_nobuild('\0', "clang-opt", true);
-# undef set_nobuild
-#endif
-
-#ifndef HAVE_BPF_PROLOGUE
-# if !defined (HAVE_DWARF_SUPPORT)
-# define REASON "NO_DWARF=1"
-# elif !defined (HAVE_LIBBPF_SUPPORT)
-# define REASON "NO_LIBBPF=1"
-# else
-# define REASON "this architecture doesn't support BPF prologue"
-# endif
-# define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, REASON, c)
- set_nobuild('\0', "vmlinux", true);
-# undef set_nobuild
-# undef REASON
-#endif
-
#ifndef HAVE_BPF_SKEL
# define set_nobuild(s, l, m, c) set_option_nobuild(record_options, s, l, m, c)
set_nobuild('\0', "off-cpu", "no BUILD_BPF_SKEL=1", true);
@@ -4116,14 +4079,6 @@ int cmd_record(int argc, const char **argv)
if (dry_run)
goto out;
- err = bpf__setup_stdout(rec->evlist);
- if (err) {
- bpf__strerror_setup_stdout(rec->evlist, err, errbuf, sizeof(errbuf));
- pr_err("ERROR: Setup BPF stdout failed: %s\n",
- errbuf);
- goto out;
- }
-
err = -ENOMEM;
if (rec->no_buildid_cache || rec->no_buildid) {
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index 200b3e7ea8da..517bf25750c8 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -2199,6 +2199,17 @@ static void process_event(struct perf_script *script,
if (PRINT_FIELD(RETIRE_LAT))
fprintf(fp, "%16" PRIu16, sample->retire_lat);
+ if (PRINT_FIELD(CGROUP)) {
+ const char *cgrp_name;
+ struct cgroup *cgrp = cgroup__find(machine->env,
+ sample->cgroup);
+ if (cgrp != NULL)
+ cgrp_name = cgrp->name;
+ else
+ cgrp_name = "unknown";
+ fprintf(fp, " %s", cgrp_name);
+ }
+
if (PRINT_FIELD(IP)) {
struct callchain_cursor *cursor = NULL;
@@ -2243,17 +2254,6 @@ static void process_event(struct perf_script *script,
if (PRINT_FIELD(CODE_PAGE_SIZE))
fprintf(fp, " %s", get_page_size_name(sample->code_page_size, str));
- if (PRINT_FIELD(CGROUP)) {
- const char *cgrp_name;
- struct cgroup *cgrp = cgroup__find(machine->env,
- sample->cgroup);
- if (cgrp != NULL)
- cgrp_name = cgrp->name;
- else
- cgrp_name = "unknown";
- fprintf(fp, " %s", cgrp_name);
- }
-
perf_sample__fprintf_ipc(sample, attr, fp);
fprintf(fp, "\n");
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 1baa2acb3ced..ea8c7eca5eee 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -1805,6 +1805,7 @@ int cmd_top(int argc, const char **argv)
top.session = perf_session__new(NULL, NULL);
if (IS_ERR(top.session)) {
status = PTR_ERR(top.session);
+ top.session = NULL;
goto out_delete_evlist;
}
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index 6e73d0e95715..e541d0e2777a 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -18,6 +18,10 @@
#include <api/fs/tracing_path.h>
#ifdef HAVE_LIBBPF_SUPPORT
#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+#ifdef HAVE_BPF_SKEL
+#include "bpf_skel/augmented_raw_syscalls.skel.h"
+#endif
#endif
#include "util/bpf_map.h"
#include "util/rlimit.h"
@@ -53,7 +57,6 @@
#include "trace/beauty/beauty.h"
#include "trace-event.h"
#include "util/parse-events.h"
-#include "util/bpf-loader.h"
#include "util/tracepoint.h"
#include "callchain.h"
#include "print_binary.h"
@@ -127,25 +130,19 @@ struct trace {
struct syscalltbl *sctbl;
struct {
struct syscall *table;
- struct { // per syscall BPF_MAP_TYPE_PROG_ARRAY
- struct bpf_map *sys_enter,
- *sys_exit;
- } prog_array;
struct {
struct evsel *sys_enter,
- *sys_exit,
- *augmented;
+ *sys_exit,
+ *bpf_output;
} events;
- struct bpf_program *unaugmented_prog;
} syscalls;
- struct {
- struct bpf_map *map;
- } dump;
+#ifdef HAVE_BPF_SKEL
+ struct augmented_raw_syscalls_bpf *skel;
+#endif
struct record_opts opts;
struct evlist *evlist;
struct machine *host;
struct thread *current;
- struct bpf_object *bpf_obj;
struct cgroup *cgroup;
u64 base_time;
FILE *output;
@@ -415,6 +412,7 @@ static int evsel__init_syscall_tp(struct evsel *evsel)
if (evsel__init_tp_uint_field(evsel, &sc->id, "__syscall_nr") &&
evsel__init_tp_uint_field(evsel, &sc->id, "nr"))
return -ENOENT;
+
return 0;
}
@@ -1296,6 +1294,22 @@ static struct thread_trace *thread_trace__new(void)
return ttrace;
}
+static void thread_trace__free_files(struct thread_trace *ttrace);
+
+static void thread_trace__delete(void *pttrace)
+{
+ struct thread_trace *ttrace = pttrace;
+
+ if (!ttrace)
+ return;
+
+ intlist__delete(ttrace->syscall_stats);
+ ttrace->syscall_stats = NULL;
+ thread_trace__free_files(ttrace);
+ zfree(&ttrace->entry_str);
+ free(ttrace);
+}
+
static struct thread_trace *thread__trace(struct thread *thread, FILE *fp)
{
struct thread_trace *ttrace;
@@ -1333,6 +1347,17 @@ void syscall_arg__set_ret_scnprintf(struct syscall_arg *arg,
static const size_t trace__entry_str_size = 2048;
+static void thread_trace__free_files(struct thread_trace *ttrace)
+{
+ for (int i = 0; i < ttrace->files.max; ++i) {
+ struct file *file = ttrace->files.table + i;
+ zfree(&file->pathname);
+ }
+
+ zfree(&ttrace->files.table);
+ ttrace->files.max = -1;
+}
+
static struct file *thread_trace__files_entry(struct thread_trace *ttrace, int fd)
{
if (fd < 0)
@@ -1635,6 +1660,8 @@ static int trace__symbols_init(struct trace *trace, struct evlist *evlist)
if (trace->host == NULL)
return -ENOMEM;
+ thread__set_priv_destructor(thread_trace__delete);
+
err = trace_event__register_resolver(trace->host, trace__machine__resolve_kernel_addr);
if (err < 0)
goto out;
@@ -2816,7 +2843,7 @@ static int trace__event_handler(struct trace *trace, struct evsel *evsel,
if (thread)
trace__fprintf_comm_tid(trace, thread, trace->output);
- if (evsel == trace->syscalls.events.augmented) {
+ if (evsel == trace->syscalls.events.bpf_output) {
int id = perf_evsel__sc_tp_uint(evsel, id, sample);
struct syscall *sc = trace__syscall_info(trace, evsel, id);
@@ -3136,13 +3163,8 @@ static void evlist__free_syscall_tp_fields(struct evlist *evlist)
struct evsel *evsel;
evlist__for_each_entry(evlist, evsel) {
- struct evsel_trace *et = evsel->priv;
-
- if (!et || !evsel->tp_format || strcmp(evsel->tp_format->system, "syscalls"))
- continue;
-
- zfree(&et->fmt);
- free(et);
+ evsel_trace__delete(evsel->priv);
+ evsel->priv = NULL;
}
}
@@ -3254,35 +3276,16 @@ out_enomem:
goto out;
}
-#ifdef HAVE_LIBBPF_SUPPORT
-static struct bpf_map *trace__find_bpf_map_by_name(struct trace *trace, const char *name)
-{
- if (trace->bpf_obj == NULL)
- return NULL;
-
- return bpf_object__find_map_by_name(trace->bpf_obj, name);
-}
-
-static void trace__set_bpf_map_filtered_pids(struct trace *trace)
-{
- trace->filter_pids.map = trace__find_bpf_map_by_name(trace, "pids_filtered");
-}
-
-static void trace__set_bpf_map_syscalls(struct trace *trace)
-{
- trace->syscalls.prog_array.sys_enter = trace__find_bpf_map_by_name(trace, "syscalls_sys_enter");
- trace->syscalls.prog_array.sys_exit = trace__find_bpf_map_by_name(trace, "syscalls_sys_exit");
-}
-
+#ifdef HAVE_BPF_SKEL
static struct bpf_program *trace__find_bpf_program_by_title(struct trace *trace, const char *name)
{
struct bpf_program *pos, *prog = NULL;
const char *sec_name;
- if (trace->bpf_obj == NULL)
+ if (trace->skel->obj == NULL)
return NULL;
- bpf_object__for_each_program(pos, trace->bpf_obj) {
+ bpf_object__for_each_program(pos, trace->skel->obj) {
sec_name = bpf_program__section_name(pos);
if (sec_name && !strcmp(sec_name, name)) {
prog = pos;
@@ -3300,12 +3303,12 @@ static struct bpf_program *trace__find_syscall_bpf_prog(struct trace *trace, str
if (prog_name == NULL) {
char default_prog_name[256];
- scnprintf(default_prog_name, sizeof(default_prog_name), "!syscalls:sys_%s_%s", type, sc->name);
+ scnprintf(default_prog_name, sizeof(default_prog_name), "tp/syscalls/sys_%s_%s", type, sc->name);
prog = trace__find_bpf_program_by_title(trace, default_prog_name);
if (prog != NULL)
goto out_found;
if (sc->fmt && sc->fmt->alias) {
- scnprintf(default_prog_name, sizeof(default_prog_name), "!syscalls:sys_%s_%s", type, sc->fmt->alias);
+ scnprintf(default_prog_name, sizeof(default_prog_name), "tp/syscalls/sys_%s_%s", type, sc->fmt->alias);
prog = trace__find_bpf_program_by_title(trace, default_prog_name);
if (prog != NULL)
goto out_found;
@@ -3323,7 +3326,7 @@ out_found:
pr_debug("Couldn't find BPF prog \"%s\" to associate with syscalls:sys_%s_%s, not augmenting it\n",
prog_name, type, sc->name);
out_unaugmented:
- return trace->syscalls.unaugmented_prog;
+ return trace->skel->progs.syscall_unaugmented;
}
static void trace__init_syscall_bpf_progs(struct trace *trace, int id)
@@ -3340,13 +3343,13 @@ static void trace__init_syscall_bpf_progs(struct trace *trace, int id)
static int trace__bpf_prog_sys_enter_fd(struct trace *trace, int id)
{
struct syscall *sc = trace__syscall_info(trace, NULL, id);
- return sc ? bpf_program__fd(sc->bpf_prog.sys_enter) : bpf_program__fd(trace->syscalls.unaugmented_prog);
+ return sc ? bpf_program__fd(sc->bpf_prog.sys_enter) : bpf_program__fd(trace->skel->progs.syscall_unaugmented);
}
static int trace__bpf_prog_sys_exit_fd(struct trace *trace, int id)
{
struct syscall *sc = trace__syscall_info(trace, NULL, id);
- return sc ? bpf_program__fd(sc->bpf_prog.sys_exit) : bpf_program__fd(trace->syscalls.unaugmented_prog);
+ return sc ? bpf_program__fd(sc->bpf_prog.sys_exit) : bpf_program__fd(trace->skel->progs.syscall_unaugmented);
}
static struct bpf_program *trace__find_usable_bpf_prog_entry(struct trace *trace, struct syscall *sc)
@@ -3371,7 +3374,7 @@ try_to_find_pair:
bool is_candidate = false;
if (pair == NULL || pair == sc ||
- pair->bpf_prog.sys_enter == trace->syscalls.unaugmented_prog)
+ pair->bpf_prog.sys_enter == trace->skel->progs.syscall_unaugmented)
continue;
for (field = sc->args, candidate_field = pair->args;
@@ -3395,6 +3398,19 @@ try_to_find_pair:
if (strcmp(field->type, candidate_field->type))
goto next_candidate;
+ /*
+ * This is limited in the BPF program but sys_write
+ * uses "const char *" for its "buf" arg so we need to
+ * use some heuristic that is kinda future proof...
+ */
+ if (strcmp(field->type, "const char *") == 0 &&
+ !(strstr(field->name, "name") ||
+ strstr(field->name, "path") ||
+ strstr(field->name, "file") ||
+ strstr(field->name, "root") ||
+ strstr(field->name, "description")))
+ goto next_candidate;
+
is_candidate = true;
}
@@ -3424,7 +3440,7 @@ try_to_find_pair:
*/
if (pair_prog == NULL) {
pair_prog = trace__find_syscall_bpf_prog(trace, pair, pair->fmt ? pair->fmt->bpf_prog_name.sys_enter : NULL, "enter");
- if (pair_prog == trace->syscalls.unaugmented_prog)
+ if (pair_prog == trace->skel->progs.syscall_unaugmented)
goto next_candidate;
}
@@ -3439,8 +3455,8 @@ try_to_find_pair:
static int trace__init_syscalls_bpf_prog_array_maps(struct trace *trace)
{
- int map_enter_fd = bpf_map__fd(trace->syscalls.prog_array.sys_enter),
- map_exit_fd = bpf_map__fd(trace->syscalls.prog_array.sys_exit);
+ int map_enter_fd = bpf_map__fd(trace->skel->maps.syscalls_sys_enter);
+ int map_exit_fd = bpf_map__fd(trace->skel->maps.syscalls_sys_exit);
int err = 0, key;
for (key = 0; key < trace->sctbl->syscalls.nr_entries; ++key) {
@@ -3502,7 +3518,7 @@ static int trace__init_syscalls_bpf_prog_array_maps(struct trace *trace)
* For now we're just reusing the sys_enter prog, and if it
* already has an augmenter, we don't need to find one.
*/
- if (sc->bpf_prog.sys_enter != trace->syscalls.unaugmented_prog)
+ if (sc->bpf_prog.sys_enter != trace->skel->progs.syscall_unaugmented)
continue;
/*
@@ -3525,74 +3541,9 @@ static int trace__init_syscalls_bpf_prog_array_maps(struct trace *trace)
break;
}
-
return err;
}
-
-static void trace__delete_augmented_syscalls(struct trace *trace)
-{
- struct evsel *evsel, *tmp;
-
- evlist__remove(trace->evlist, trace->syscalls.events.augmented);
- evsel__delete(trace->syscalls.events.augmented);
- trace->syscalls.events.augmented = NULL;
-
- evlist__for_each_entry_safe(trace->evlist, tmp, evsel) {
- if (evsel->bpf_obj == trace->bpf_obj) {
- evlist__remove(trace->evlist, evsel);
- evsel__delete(evsel);
- }
-
- }
-
- bpf_object__close(trace->bpf_obj);
- trace->bpf_obj = NULL;
-}
-#else // HAVE_LIBBPF_SUPPORT
-static struct bpf_map *trace__find_bpf_map_by_name(struct trace *trace __maybe_unused,
- const char *name __maybe_unused)
-{
- return NULL;
-}
-
-static void trace__set_bpf_map_filtered_pids(struct trace *trace __maybe_unused)
-{
-}
-
-static void trace__set_bpf_map_syscalls(struct trace *trace __maybe_unused)
-{
-}
-
-static struct bpf_program *trace__find_bpf_program_by_title(struct trace *trace __maybe_unused,
- const char *name __maybe_unused)
-{
- return NULL;
-}
-
-static int trace__init_syscalls_bpf_prog_array_maps(struct trace *trace __maybe_unused)
-{
- return 0;
-}
-
-static void trace__delete_augmented_syscalls(struct trace *trace __maybe_unused)
-{
-}
-#endif // HAVE_LIBBPF_SUPPORT
-
-static bool trace__only_augmented_syscalls_evsels(struct trace *trace)
-{
- struct evsel *evsel;
-
- evlist__for_each_entry(trace->evlist, evsel) {
- if (evsel == trace->syscalls.events.augmented ||
- evsel->bpf_obj == trace->bpf_obj)
- continue;
-
- return false;
- }
-
- return true;
-}
+#endif // HAVE_BPF_SKEL
static int trace__set_ev_qualifier_filter(struct trace *trace)
{
@@ -3956,23 +3907,31 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
err = evlist__open(evlist);
if (err < 0)
goto out_error_open;
+#ifdef HAVE_BPF_SKEL
+ if (trace->syscalls.events.bpf_output) {
+ struct perf_cpu cpu;
- err = bpf__apply_obj_config();
- if (err) {
- char errbuf[BUFSIZ];
-
- bpf__strerror_apply_obj_config(err, errbuf, sizeof(errbuf));
- pr_err("ERROR: Apply config to BPF failed: %s\n",
- errbuf);
- goto out_error_open;
+ /*
+ * Set up the __augmented_syscalls__ BPF map to hold for each
+ * CPU the bpf-output event's file descriptor.
+ */
+ perf_cpu_map__for_each_cpu(cpu, i, trace->syscalls.events.bpf_output->core.cpus) {
+ bpf_map__update_elem(trace->skel->maps.__augmented_syscalls__,
+ &cpu.cpu, sizeof(int),
+ xyarray__entry(trace->syscalls.events.bpf_output->core.fd,
+ cpu.cpu, 0),
+ sizeof(__u32), BPF_ANY);
+ }
}
-
+#endif
err = trace__set_filter_pids(trace);
if (err < 0)
goto out_error_mem;
- if (trace->syscalls.prog_array.sys_enter)
+#ifdef HAVE_BPF_SKEL
+ if (trace->skel && trace->skel->progs.sys_enter)
trace__init_syscalls_bpf_prog_array_maps(trace);
+#endif
if (trace->ev_qualifier_ids.nr > 0) {
err = trace__set_ev_qualifier_filter(trace);
@@ -4005,9 +3964,6 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
if (err < 0)
goto out_error_apply_filters;
- if (trace->dump.map)
- bpf_map__fprintf(trace->dump.map, trace->output);
-
err = evlist__mmap(evlist, trace->opts.mmap_pages);
if (err < 0)
goto out_error_mmap;
@@ -4704,6 +4660,18 @@ static void trace__exit(struct trace *trace)
zfree(&trace->perfconfig_events);
}
+#ifdef HAVE_BPF_SKEL
+static int bpf__setup_bpf_output(struct evlist *evlist)
+{
+ int err = parse_event(evlist, "bpf-output/no-inherit=1,name=__augmented_syscalls__/");
+
+ if (err)
+ pr_debug("ERROR: failed to create the \"__augmented_syscalls__\" bpf-output event\n");
+
+ return err;
+}
+#endif
+
int cmd_trace(int argc, const char **argv)
{
const char *trace_usage[] = {
@@ -4735,7 +4703,6 @@ int cmd_trace(int argc, const char **argv)
.max_stack = UINT_MAX,
.max_events = ULONG_MAX,
};
- const char *map_dump_str = NULL;
const char *output_name = NULL;
const struct option trace_options[] = {
OPT_CALLBACK('e', "event", &trace, "event",
@@ -4769,9 +4736,6 @@ int cmd_trace(int argc, const char **argv)
OPT_CALLBACK(0, "duration", &trace, "float",
"show only events with duration > N.M ms",
trace__set_duration),
-#ifdef HAVE_LIBBPF_SUPPORT
- OPT_STRING(0, "map-dump", &map_dump_str, "BPF map", "BPF map to periodically dump"),
-#endif
OPT_BOOLEAN(0, "sched", &trace.sched, "show blocking scheduler events"),
OPT_INCR('v', "verbose", &verbose, "be more verbose"),
OPT_BOOLEAN('T', "time", &trace.full_time,
@@ -4898,87 +4862,48 @@ int cmd_trace(int argc, const char **argv)
"cgroup monitoring only available in system-wide mode");
}
- evsel = bpf__setup_output_event(trace.evlist, "__augmented_syscalls__");
- if (IS_ERR(evsel)) {
- bpf__strerror_setup_output_event(trace.evlist, PTR_ERR(evsel), bf, sizeof(bf));
- pr_err("ERROR: Setup trace syscalls enter failed: %s\n", bf);
- goto out;
- }
-
- if (evsel) {
- trace.syscalls.events.augmented = evsel;
+#ifdef HAVE_BPF_SKEL
+ if (!trace.trace_syscalls)
+ goto skip_augmentation;
- evsel = evlist__find_tracepoint_by_name(trace.evlist, "raw_syscalls:sys_enter");
- if (evsel == NULL) {
- pr_err("ERROR: raw_syscalls:sys_enter not found in the augmented BPF object\n");
- goto out;
- }
+ trace.skel = augmented_raw_syscalls_bpf__open();
+ if (!trace.skel) {
+ pr_debug("Failed to open augmented syscalls BPF skeleton");
+ } else {
+ /*
+ * Disable attaching the BPF programs except for sys_enter and
+ * sys_exit that tail call into this as necessary.
+ */
+ struct bpf_program *prog;
- if (evsel->bpf_obj == NULL) {
- pr_err("ERROR: raw_syscalls:sys_enter not associated to a BPF object\n");
- goto out;
+ bpf_object__for_each_program(prog, trace.skel->obj) {
+ if (prog != trace.skel->progs.sys_enter && prog != trace.skel->progs.sys_exit)
+ bpf_program__set_autoattach(prog, /*autoattach=*/false);
}
- trace.bpf_obj = evsel->bpf_obj;
+ err = augmented_raw_syscalls_bpf__load(trace.skel);
- /*
- * If we have _just_ the augmenter event but don't have a
- * explicit --syscalls, then assume we want all strace-like
- * syscalls:
- */
- if (!trace.trace_syscalls && trace__only_augmented_syscalls_evsels(&trace))
- trace.trace_syscalls = true;
- /*
- * So, if we have a syscall augmenter, but trace_syscalls, aka
- * strace-like syscall tracing is not set, then we need to trow
- * away the augmenter, i.e. all the events that were created
- * from that BPF object file.
- *
- * This is more to fix the current .perfconfig trace.add_events
- * style of setting up the strace-like eBPF based syscall point
- * payload augmenter.
- *
- * All this complexity will be avoided by adding an alternative
- * to trace.add_events in the form of
- * trace.bpf_augmented_syscalls, that will be only parsed if we
- * need it.
- *
- * .perfconfig trace.add_events is still useful if we want, for
- * instance, have msr_write.msr in some .perfconfig profile based
- * 'perf trace --config determinism.profile' mode, where for some
- * particular goal/workload type we want a set of events and
- * output mode (with timings, etc) instead of having to add
- * all via the command line.
- *
- * Also --config to specify an alternate .perfconfig file needs
- * to be implemented.
- */
- if (!trace.trace_syscalls) {
- trace__delete_augmented_syscalls(&trace);
+ if (err < 0) {
+ libbpf_strerror(err, bf, sizeof(bf));
+ pr_debug("Failed to load augmented syscalls BPF skeleton: %s\n", bf);
} else {
- trace__set_bpf_map_filtered_pids(&trace);
- trace__set_bpf_map_syscalls(&trace);
- trace.syscalls.unaugmented_prog = trace__find_bpf_program_by_title(&trace, "!raw_syscalls:unaugmented");
+ augmented_raw_syscalls_bpf__attach(trace.skel);
+ trace__add_syscall_newtp(&trace);
}
}
- err = bpf__setup_stdout(trace.evlist);
+ err = bpf__setup_bpf_output(trace.evlist);
if (err) {
- bpf__strerror_setup_stdout(trace.evlist, err, bf, sizeof(bf));
- pr_err("ERROR: Setup BPF stdout failed: %s\n", bf);
+ libbpf_strerror(err, bf, sizeof(bf));
+ pr_err("ERROR: Setup BPF output event failed: %s\n", bf);
goto out;
}
-
+ trace.syscalls.events.bpf_output = evlist__last(trace.evlist);
+ assert(!strcmp(evsel__name(trace.syscalls.events.bpf_output), "__augmented_syscalls__"));
+skip_augmentation:
+#endif
err = -1;
- if (map_dump_str) {
- trace.dump.map = trace__find_bpf_map_by_name(&trace, map_dump_str);
- if (trace.dump.map == NULL) {
- pr_err("ERROR: BPF map \"%s\" not found\n", map_dump_str);
- goto out;
- }
- }
-
if (trace.trace_pgfaults) {
trace.opts.sample_address = true;
trace.opts.sample_time = true;
@@ -5029,7 +4954,7 @@ int cmd_trace(int argc, const char **argv)
* buffers that are being copied from kernel to userspace, think 'read'
* syscall.
*/
- if (trace.syscalls.events.augmented) {
+ if (trace.syscalls.events.bpf_output) {
evlist__for_each_entry(trace.evlist, evsel) {
bool raw_syscalls_sys_exit = strcmp(evsel__name(evsel), "raw_syscalls:sys_exit") == 0;
@@ -5038,9 +4963,9 @@ int cmd_trace(int argc, const char **argv)
goto init_augmented_syscall_tp;
}
- if (trace.syscalls.events.augmented->priv == NULL &&
+ if (trace.syscalls.events.bpf_output->priv == NULL &&
strstr(evsel__name(evsel), "syscalls:sys_enter")) {
- struct evsel *augmented = trace.syscalls.events.augmented;
+ struct evsel *augmented = trace.syscalls.events.bpf_output;
if (evsel__init_augmented_syscall_tp(augmented, evsel) ||
evsel__init_augmented_syscall_tp_args(augmented))
goto out;
@@ -5145,5 +5070,8 @@ out_close:
fclose(trace.output);
out:
trace__exit(&trace);
+#ifdef HAVE_BPF_SKEL
+ augmented_raw_syscalls_bpf__destroy(trace.skel);
+#endif
return err;
}
diff --git a/tools/perf/check-headers.sh b/tools/perf/check-headers.sh
index a0f1d8adce60..4314c9197850 100755
--- a/tools/perf/check-headers.sh
+++ b/tools/perf/check-headers.sh
@@ -123,7 +123,7 @@ check () {
shift
- check_2 "tools/$file" "$file" $*
+ check_2 "tools/$file" "$file" "$@"
}
beauty_check () {
@@ -131,7 +131,7 @@ beauty_check () {
shift
- check_2 "tools/perf/trace/beauty/$file" "$file" $*
+ check_2 "tools/perf/trace/beauty/$file" "$file" "$@"
}
# Check if we have the kernel headers (tools/perf/../../include), else
@@ -183,7 +183,7 @@ done
check_2 tools/perf/util/hashmap.h tools/lib/bpf/hashmap.h
check_2 tools/perf/util/hashmap.c tools/lib/bpf/hashmap.c
-cd tools/perf
+cd tools/perf || exit
if [ ${#FAILURES[@]} -gt 0 ]
then
diff --git a/tools/perf/dlfilters/dlfilter-test-api-v0.c b/tools/perf/dlfilters/dlfilter-test-api-v0.c
index b1f51efd67d6..72f263d49121 100644
--- a/tools/perf/dlfilters/dlfilter-test-api-v0.c
+++ b/tools/perf/dlfilters/dlfilter-test-api-v0.c
@@ -254,6 +254,30 @@ static int check_addr_al(void *ctx)
return 0;
}
+static int check_address_al(void *ctx, const struct perf_dlfilter_sample *sample)
+{
+ struct perf_dlfilter_al address_al;
+ const struct perf_dlfilter_al *al;
+
+ al = perf_dlfilter_fns.resolve_ip(ctx);
+ if (!al)
+ return test_fail("resolve_ip() failed");
+
+ address_al.size = sizeof(address_al);
+ if (perf_dlfilter_fns.resolve_address(ctx, sample->ip, &address_al))
+ return test_fail("resolve_address() failed");
+
+ CHECK(address_al.sym && al->sym);
+ CHECK(!strcmp(address_al.sym, al->sym));
+ CHECK(address_al.addr == al->addr);
+ CHECK(address_al.sym_start == al->sym_start);
+ CHECK(address_al.sym_end == al->sym_end);
+ CHECK(address_al.dso && al->dso);
+ CHECK(!strcmp(address_al.dso, al->dso));
+
+ return 0;
+}
+
static int check_attr(void *ctx)
{
struct perf_event_attr *attr = perf_dlfilter_fns.attr(ctx);
@@ -290,7 +314,7 @@ static int do_checks(void *data, const struct perf_dlfilter_sample *sample, void
if (early && !d->do_early)
return 0;
- if (check_al(ctx) || check_addr_al(ctx))
+ if (check_al(ctx) || check_addr_al(ctx) || check_address_al(ctx, sample))
return -1;
if (early)
diff --git a/tools/perf/dlfilters/dlfilter-test-api-v2.c b/tools/perf/dlfilters/dlfilter-test-api-v2.c
new file mode 100644
index 000000000000..38e593d92920
--- /dev/null
+++ b/tools/perf/dlfilters/dlfilter-test-api-v2.c
@@ -0,0 +1,377 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test v2 API for perf --dlfilter shared object
+ * Copyright (c) 2023, Intel Corporation.
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdbool.h>
+
+/*
+ * Copy v2 API instead of including current API
+ */
+#include <linux/perf_event.h>
+#include <linux/types.h>
+
+/*
+ * The following macro can be used to determine if this header defines
+ * perf_dlfilter_sample machine_pid and vcpu.
+ */
+#define PERF_DLFILTER_HAS_MACHINE_PID
+
+/* Definitions for perf_dlfilter_sample flags */
+enum {
+ PERF_DLFILTER_FLAG_BRANCH = 1ULL << 0,
+ PERF_DLFILTER_FLAG_CALL = 1ULL << 1,
+ PERF_DLFILTER_FLAG_RETURN = 1ULL << 2,
+ PERF_DLFILTER_FLAG_CONDITIONAL = 1ULL << 3,
+ PERF_DLFILTER_FLAG_SYSCALLRET = 1ULL << 4,
+ PERF_DLFILTER_FLAG_ASYNC = 1ULL << 5,
+ PERF_DLFILTER_FLAG_INTERRUPT = 1ULL << 6,
+ PERF_DLFILTER_FLAG_TX_ABORT = 1ULL << 7,
+ PERF_DLFILTER_FLAG_TRACE_BEGIN = 1ULL << 8,
+ PERF_DLFILTER_FLAG_TRACE_END = 1ULL << 9,
+ PERF_DLFILTER_FLAG_IN_TX = 1ULL << 10,
+ PERF_DLFILTER_FLAG_VMENTRY = 1ULL << 11,
+ PERF_DLFILTER_FLAG_VMEXIT = 1ULL << 12,
+};
+
+/*
+ * perf sample event information (as per perf script and <linux/perf_event.h>)
+ */
+struct perf_dlfilter_sample {
+ __u32 size; /* Size of this structure (for compatibility checking) */
+ __u16 ins_lat; /* Refer PERF_SAMPLE_WEIGHT_TYPE in <linux/perf_event.h> */
+ __u16 p_stage_cyc; /* Refer PERF_SAMPLE_WEIGHT_TYPE in <linux/perf_event.h> */
+ __u64 ip;
+ __s32 pid;
+ __s32 tid;
+ __u64 time;
+ __u64 addr;
+ __u64 id;
+ __u64 stream_id;
+ __u64 period;
+ __u64 weight; /* Refer PERF_SAMPLE_WEIGHT_TYPE in <linux/perf_event.h> */
+ __u64 transaction; /* Refer PERF_SAMPLE_TRANSACTION in <linux/perf_event.h> */
+ __u64 insn_cnt; /* For instructions-per-cycle (IPC) */
+ __u64 cyc_cnt; /* For instructions-per-cycle (IPC) */
+ __s32 cpu;
+ __u32 flags; /* Refer PERF_DLFILTER_FLAG_* above */
+ __u64 data_src; /* Refer PERF_SAMPLE_DATA_SRC in <linux/perf_event.h> */
+ __u64 phys_addr; /* Refer PERF_SAMPLE_PHYS_ADDR in <linux/perf_event.h> */
+ __u64 data_page_size; /* Refer PERF_SAMPLE_DATA_PAGE_SIZE in <linux/perf_event.h> */
+ __u64 code_page_size; /* Refer PERF_SAMPLE_CODE_PAGE_SIZE in <linux/perf_event.h> */
+ __u64 cgroup; /* Refer PERF_SAMPLE_CGROUP in <linux/perf_event.h> */
+ __u8 cpumode; /* Refer CPUMODE_MASK etc in <linux/perf_event.h> */
+ __u8 addr_correlates_sym; /* True => resolve_addr() can be called */
+ __u16 misc; /* Refer perf_event_header in <linux/perf_event.h> */
+ __u32 raw_size; /* Refer PERF_SAMPLE_RAW in <linux/perf_event.h> */
+ const void *raw_data; /* Refer PERF_SAMPLE_RAW in <linux/perf_event.h> */
+ __u64 brstack_nr; /* Number of brstack entries */
+ const struct perf_branch_entry *brstack; /* Refer <linux/perf_event.h> */
+ __u64 raw_callchain_nr; /* Number of raw_callchain entries */
+ const __u64 *raw_callchain; /* Refer <linux/perf_event.h> */
+ const char *event;
+ __s32 machine_pid;
+ __s32 vcpu;
+};
+
+/*
+ * Address location (as per perf script)
+ */
+struct perf_dlfilter_al {
+ __u32 size; /* Size of this structure (for compatibility checking) */
+ __u32 symoff;
+ const char *sym;
+ __u64 addr; /* Mapped address (from dso) */
+ __u64 sym_start;
+ __u64 sym_end;
+ const char *dso;
+ __u8 sym_binding; /* STB_LOCAL, STB_GLOBAL or STB_WEAK, refer <elf.h> */
+ __u8 is_64_bit; /* Only valid if dso is not NULL */
+ __u8 is_kernel_ip; /* True if in kernel space */
+ __u32 buildid_size;
+ __u8 *buildid;
+ /* Below members are only populated by resolve_ip() */
+ __u8 filtered; /* True if this sample event will be filtered out */
+ const char *comm;
+ void *priv; /* Private data (v2 API) */
+};
+
+struct perf_dlfilter_fns {
+ /* Return information about ip */
+ const struct perf_dlfilter_al *(*resolve_ip)(void *ctx);
+ /* Return information about addr (if addr_correlates_sym) */
+ const struct perf_dlfilter_al *(*resolve_addr)(void *ctx);
+ /* Return arguments from --dlarg option */
+ char **(*args)(void *ctx, int *dlargc);
+ /*
+ * Return information about address (al->size must be set before
+ * calling). Returns 0 on success, -1 otherwise. Call al_cleanup()
+ * when 'al' data is no longer needed.
+ */
+ __s32 (*resolve_address)(void *ctx, __u64 address, struct perf_dlfilter_al *al);
+ /* Return instruction bytes and length */
+ const __u8 *(*insn)(void *ctx, __u32 *length);
+ /* Return source file name and line number */
+ const char *(*srcline)(void *ctx, __u32 *line_number);
+ /* Return perf_event_attr, refer <linux/perf_event.h> */
+ struct perf_event_attr *(*attr)(void *ctx);
+ /* Read object code, return numbers of bytes read */
+ __s32 (*object_code)(void *ctx, __u64 ip, void *buf, __u32 len);
+ /*
+ * If present (i.e. must check al_cleanup != NULL), call after
+ * resolve_address() to free any associated resources. (v2 API)
+ */
+ void (*al_cleanup)(void *ctx, struct perf_dlfilter_al *al);
+ /* Reserved */
+ void *(*reserved[119])(void *);
+};
+
+struct perf_dlfilter_fns perf_dlfilter_fns;
+
+static int verbose;
+
+#define pr_debug(fmt, ...) do { \
+ if (verbose > 0) \
+ fprintf(stderr, fmt, ##__VA_ARGS__); \
+ } while (0)
+
+static int test_fail(const char *msg)
+{
+ pr_debug("%s\n", msg);
+ return -1;
+}
+
+#define CHECK(x) do { \
+ if (!(x)) \
+ return test_fail("Check '" #x "' failed\n"); \
+ } while (0)
+
+struct filter_data {
+ __u64 ip;
+ __u64 addr;
+ int do_early;
+ int early_filter_cnt;
+ int filter_cnt;
+};
+
+static struct filter_data *filt_dat;
+
+int start(void **data, void *ctx)
+{
+ int dlargc;
+ char **dlargv;
+ struct filter_data *d;
+ static bool called;
+
+ verbose = 1;
+
+ CHECK(!filt_dat && !called);
+ called = true;
+
+ d = calloc(1, sizeof(*d));
+ if (!d)
+ test_fail("Failed to allocate memory");
+ filt_dat = d;
+ *data = d;
+
+ dlargv = perf_dlfilter_fns.args(ctx, &dlargc);
+
+ CHECK(dlargc == 6);
+ CHECK(!strcmp(dlargv[0], "first"));
+ verbose = strtol(dlargv[1], NULL, 0);
+ d->ip = strtoull(dlargv[2], NULL, 0);
+ d->addr = strtoull(dlargv[3], NULL, 0);
+ d->do_early = strtol(dlargv[4], NULL, 0);
+ CHECK(!strcmp(dlargv[5], "last"));
+
+ pr_debug("%s API\n", __func__);
+
+ return 0;
+}
+
+#define CHECK_SAMPLE(x) do { \
+ if (sample->x != expected.x) \
+ return test_fail("'" #x "' not expected value\n"); \
+ } while (0)
+
+static int check_sample(struct filter_data *d, const struct perf_dlfilter_sample *sample)
+{
+ struct perf_dlfilter_sample expected = {
+ .ip = d->ip,
+ .pid = 12345,
+ .tid = 12346,
+ .time = 1234567890,
+ .addr = d->addr,
+ .id = 99,
+ .stream_id = 101,
+ .period = 543212345,
+ .cpu = 31,
+ .cpumode = PERF_RECORD_MISC_USER,
+ .addr_correlates_sym = 1,
+ .misc = PERF_RECORD_MISC_USER,
+ };
+
+ CHECK(sample->size >= sizeof(struct perf_dlfilter_sample));
+
+ CHECK_SAMPLE(ip);
+ CHECK_SAMPLE(pid);
+ CHECK_SAMPLE(tid);
+ CHECK_SAMPLE(time);
+ CHECK_SAMPLE(addr);
+ CHECK_SAMPLE(id);
+ CHECK_SAMPLE(stream_id);
+ CHECK_SAMPLE(period);
+ CHECK_SAMPLE(cpu);
+ CHECK_SAMPLE(cpumode);
+ CHECK_SAMPLE(addr_correlates_sym);
+ CHECK_SAMPLE(misc);
+
+ CHECK(!sample->raw_data);
+ CHECK_SAMPLE(brstack_nr);
+ CHECK(!sample->brstack);
+ CHECK_SAMPLE(raw_callchain_nr);
+ CHECK(!sample->raw_callchain);
+
+#define EVENT_NAME "branches:"
+ CHECK(!strncmp(sample->event, EVENT_NAME, strlen(EVENT_NAME)));
+
+ return 0;
+}
+
+static int check_al(void *ctx)
+{
+ const struct perf_dlfilter_al *al;
+
+ al = perf_dlfilter_fns.resolve_ip(ctx);
+ if (!al)
+ return test_fail("resolve_ip() failed");
+
+ CHECK(al->sym && !strcmp("foo", al->sym));
+ CHECK(!al->symoff);
+
+ return 0;
+}
+
+static int check_addr_al(void *ctx)
+{
+ const struct perf_dlfilter_al *addr_al;
+
+ addr_al = perf_dlfilter_fns.resolve_addr(ctx);
+ if (!addr_al)
+ return test_fail("resolve_addr() failed");
+
+ CHECK(addr_al->sym && !strcmp("bar", addr_al->sym));
+ CHECK(!addr_al->symoff);
+
+ return 0;
+}
+
+static int check_address_al(void *ctx, const struct perf_dlfilter_sample *sample)
+{
+ struct perf_dlfilter_al address_al;
+ const struct perf_dlfilter_al *al;
+
+ al = perf_dlfilter_fns.resolve_ip(ctx);
+ if (!al)
+ return test_fail("resolve_ip() failed");
+
+ address_al.size = sizeof(address_al);
+ if (perf_dlfilter_fns.resolve_address(ctx, sample->ip, &address_al))
+ return test_fail("resolve_address() failed");
+
+ CHECK(address_al.sym && al->sym);
+ CHECK(!strcmp(address_al.sym, al->sym));
+ CHECK(address_al.addr == al->addr);
+ CHECK(address_al.sym_start == al->sym_start);
+ CHECK(address_al.sym_end == al->sym_end);
+ CHECK(address_al.dso && al->dso);
+ CHECK(!strcmp(address_al.dso, al->dso));
+
+ /* al_cleanup() is v2 API so may not be present */
+ if (perf_dlfilter_fns.al_cleanup)
+ perf_dlfilter_fns.al_cleanup(ctx, &address_al);
+
+ return 0;
+}
+
+static int check_attr(void *ctx)
+{
+ struct perf_event_attr *attr = perf_dlfilter_fns.attr(ctx);
+
+ CHECK(attr);
+ CHECK(attr->type == PERF_TYPE_HARDWARE);
+ CHECK(attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
+
+ return 0;
+}
+
+static int do_checks(void *data, const struct perf_dlfilter_sample *sample, void *ctx, bool early)
+{
+ struct filter_data *d = data;
+
+ CHECK(data && filt_dat == data);
+
+ if (early) {
+ CHECK(!d->early_filter_cnt);
+ d->early_filter_cnt += 1;
+ } else {
+ CHECK(!d->filter_cnt);
+ CHECK(d->early_filter_cnt);
+ CHECK(d->do_early != 2);
+ d->filter_cnt += 1;
+ }
+
+ if (check_sample(data, sample))
+ return -1;
+
+ if (check_attr(ctx))
+ return -1;
+
+ if (early && !d->do_early)
+ return 0;
+
+ if (check_al(ctx) || check_addr_al(ctx) || check_address_al(ctx, sample))
+ return -1;
+
+ if (early)
+ return d->do_early == 2;
+
+ return 1;
+}
+
+int filter_event_early(void *data, const struct perf_dlfilter_sample *sample, void *ctx)
+{
+ pr_debug("%s API\n", __func__);
+
+ return do_checks(data, sample, ctx, true);
+}
+
+int filter_event(void *data, const struct perf_dlfilter_sample *sample, void *ctx)
+{
+ pr_debug("%s API\n", __func__);
+
+ return do_checks(data, sample, ctx, false);
+}
+
+int stop(void *data, void *ctx)
+{
+ static bool called;
+
+ pr_debug("%s API\n", __func__);
+
+ CHECK(data && filt_dat == data && !called);
+ called = true;
+
+ free(data);
+ filt_dat = NULL;
+ return 0;
+}
+
+const char *filter_description(const char **long_description)
+{
+ *long_description = "Filter used by the 'dlfilter C API' perf test";
+ return "dlfilter to test v2 C API";
+}
diff --git a/tools/perf/examples/bpf/5sec.c b/tools/perf/examples/bpf/5sec.c
deleted file mode 100644
index 3bd7fc17631f..000000000000
--- a/tools/perf/examples/bpf/5sec.c
+++ /dev/null
@@ -1,53 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- Description:
-
- . Disable strace like syscall tracing (--no-syscalls), or try tracing
- just some (-e *sleep).
-
- . Attach a filter function to a kernel function, returning when it should
- be considered, i.e. appear on the output.
-
- . Run it system wide, so that any sleep of >= 5 seconds and < than 6
- seconds gets caught.
-
- . Ask for callgraphs using DWARF info, so that userspace can be unwound
-
- . While this is running, run something like "sleep 5s".
-
- . If we decide to add tv_nsec as well, then it becomes:
-
- int probe(hrtimer_nanosleep, rqtp->tv_sec rqtp->tv_nsec)(void *ctx, int err, long sec, long nsec)
-
- I.e. add where it comes from (rqtp->tv_nsec) and where it will be
- accessible in the function body (nsec)
-
- # perf trace --no-syscalls -e tools/perf/examples/bpf/5sec.c/call-graph=dwarf/
- 0.000 perf_bpf_probe:func:(ffffffff9811b5f0) tv_sec=5
- hrtimer_nanosleep ([kernel.kallsyms])
- __x64_sys_nanosleep ([kernel.kallsyms])
- do_syscall_64 ([kernel.kallsyms])
- entry_SYSCALL_64 ([kernel.kallsyms])
- __GI___nanosleep (/usr/lib64/libc-2.26.so)
- rpl_nanosleep (/usr/bin/sleep)
- xnanosleep (/usr/bin/sleep)
- main (/usr/bin/sleep)
- __libc_start_main (/usr/lib64/libc-2.26.so)
- _start (/usr/bin/sleep)
- ^C#
-
- Copyright (C) 2018 Red Hat, Inc., Arnaldo Carvalho de Melo <acme@redhat.com>
-*/
-
-#include <linux/bpf.h>
-#include <bpf/bpf_helpers.h>
-
-#define NSEC_PER_SEC 1000000000L
-
-SEC("hrtimer_nanosleep=hrtimer_nanosleep rqtp")
-int hrtimer_nanosleep(void *ctx, int err, long long sec)
-{
- return sec / NSEC_PER_SEC == 5ULL;
-}
-
-char _license[] SEC("license") = "GPL";
diff --git a/tools/perf/examples/bpf/empty.c b/tools/perf/examples/bpf/empty.c
deleted file mode 100644
index 3e296c0c53d7..000000000000
--- a/tools/perf/examples/bpf/empty.c
+++ /dev/null
@@ -1,12 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/bpf.h>
-#include <bpf/bpf_helpers.h>
-
-struct syscall_enter_args;
-
-SEC("raw_syscalls:sys_enter")
-int sys_enter(struct syscall_enter_args *args)
-{
- return 0;
-}
-char _license[] SEC("license") = "GPL";
diff --git a/tools/perf/examples/bpf/hello.c b/tools/perf/examples/bpf/hello.c
deleted file mode 100644
index e9080b0df158..000000000000
--- a/tools/perf/examples/bpf/hello.c
+++ /dev/null
@@ -1,27 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/bpf.h>
-#include <bpf/bpf_helpers.h>
-
-struct __bpf_stdout__ {
- __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
- __type(key, int);
- __type(value, __u32);
- __uint(max_entries, __NR_CPUS__);
-} __bpf_stdout__ SEC(".maps");
-
-#define puts(from) \
- ({ const int __len = sizeof(from); \
- char __from[sizeof(from)] = from; \
- bpf_perf_event_output(args, &__bpf_stdout__, BPF_F_CURRENT_CPU, \
- &__from, __len & (sizeof(from) - 1)); })
-
-struct syscall_enter_args;
-
-SEC("raw_syscalls:sys_enter")
-int sys_enter(struct syscall_enter_args *args)
-{
- puts("Hello, world\n");
- return 0;
-}
-
-char _license[] SEC("license") = "GPL";
diff --git a/tools/perf/examples/bpf/sys_enter_openat.c b/tools/perf/examples/bpf/sys_enter_openat.c
deleted file mode 100644
index c4481c390d23..000000000000
--- a/tools/perf/examples/bpf/sys_enter_openat.c
+++ /dev/null
@@ -1,33 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Hook into 'openat' syscall entry tracepoint
- *
- * Test it with:
- *
- * perf trace -e tools/perf/examples/bpf/sys_enter_openat.c cat /etc/passwd > /dev/null
- *
- * It'll catch some openat syscalls related to the dynamic linked and
- * the last one should be the one for '/etc/passwd'.
- *
- * The syscall_enter_openat_args can be used to get the syscall fields
- * and use them for filtering calls, i.e. use in expressions for
- * the return value.
- */
-
-#include <bpf/bpf.h>
-
-struct syscall_enter_openat_args {
- unsigned long long unused;
- long syscall_nr;
- long dfd;
- char *filename_ptr;
- long flags;
- long mode;
-};
-
-int syscall_enter(openat)(struct syscall_enter_openat_args *args)
-{
- return 1;
-}
-
-license(GPL);
diff --git a/tools/perf/include/perf/perf_dlfilter.h b/tools/perf/include/perf/perf_dlfilter.h
index a26e2f129f83..16fc4568ac53 100644
--- a/tools/perf/include/perf/perf_dlfilter.h
+++ b/tools/perf/include/perf/perf_dlfilter.h
@@ -91,6 +91,7 @@ struct perf_dlfilter_al {
/* Below members are only populated by resolve_ip() */
__u8 filtered; /* True if this sample event will be filtered out */
const char *comm;
+ void *priv; /* Private data. Do not change */
};
struct perf_dlfilter_fns {
@@ -102,7 +103,8 @@ struct perf_dlfilter_fns {
char **(*args)(void *ctx, int *dlargc);
/*
* Return information about address (al->size must be set before
- * calling). Returns 0 on success, -1 otherwise.
+ * calling). Returns 0 on success, -1 otherwise. Call al_cleanup()
+ * when 'al' data is no longer needed.
*/
__s32 (*resolve_address)(void *ctx, __u64 address, struct perf_dlfilter_al *al);
/* Return instruction bytes and length */
@@ -113,8 +115,13 @@ struct perf_dlfilter_fns {
struct perf_event_attr *(*attr)(void *ctx);
/* Read object code, return numbers of bytes read */
__s32 (*object_code)(void *ctx, __u64 ip, void *buf, __u32 len);
+ /*
+ * If present (i.e. must check al_cleanup != NULL), call after
+ * resolve_address() to free any associated resources.
+ */
+ void (*al_cleanup)(void *ctx, struct perf_dlfilter_al *al);
/* Reserved */
- void *(*reserved[120])(void *);
+ void *(*reserved[119])(void *);
};
/*
diff --git a/tools/perf/perf.c b/tools/perf/perf.c
index 38cae4721583..d3fc8090413c 100644
--- a/tools/perf/perf.c
+++ b/tools/perf/perf.c
@@ -18,7 +18,6 @@
#include <subcmd/run-command.h>
#include "util/parse-events.h"
#include <subcmd/parse-options.h>
-#include "util/bpf-loader.h"
#include "util/debug.h"
#include "util/event.h"
#include "util/util.h" // usage()
@@ -324,7 +323,6 @@ static int run_builtin(struct cmd_struct *p, int argc, const char **argv)
perf_config__exit();
exit_browser(status);
perf_env__exit(&perf_env);
- bpf__clear();
if (status)
return status & 0xff;
diff --git a/tools/perf/pmu-events/Build b/tools/perf/pmu-events/Build
index 150765f2baee..1d18bb89402e 100644
--- a/tools/perf/pmu-events/Build
+++ b/tools/perf/pmu-events/Build
@@ -35,3 +35,9 @@ $(PMU_EVENTS_C): $(JSON) $(JSON_TEST) $(JEVENTS_PY) $(METRIC_PY) $(METRIC_TEST_L
$(call rule_mkdir)
$(Q)$(call echo-cmd,gen)$(PYTHON) $(JEVENTS_PY) $(JEVENTS_ARCH) $(JEVENTS_MODEL) pmu-events/arch $@
endif
+
+# pmu-events.c file is generated in the OUTPUT directory so it needs a
+# separate rule to depend on it properly
+$(OUTPUT)pmu-events/pmu-events.o: $(PMU_EVENTS_C)
+ $(call rule_mkdir)
+ $(call if_changed_dep,cc_o_c)
diff --git a/tools/perf/pmu-events/arch/arm64/ampere/ampereone/cache.json b/tools/perf/pmu-events/arch/arm64/ampere/ampereone/cache.json
index fc0633054211..7a2b7b200f14 100644
--- a/tools/perf/pmu-events/arch/arm64/ampere/ampereone/cache.json
+++ b/tools/perf/pmu-events/arch/arm64/ampere/ampereone/cache.json
@@ -93,9 +93,6 @@
"ArchStdEvent": "L1D_CACHE_LMISS_RD"
},
{
- "ArchStdEvent": "L1D_CACHE_LMISS"
- },
- {
"ArchStdEvent": "L1I_CACHE_LMISS"
},
{
diff --git a/tools/perf/pmu-events/arch/arm64/ampere/ampereone/core-imp-def.json b/tools/perf/pmu-events/arch/arm64/ampere/ampereone/core-imp-def.json
index 95c30243f2b2..88b23b85e33c 100644
--- a/tools/perf/pmu-events/arch/arm64/ampere/ampereone/core-imp-def.json
+++ b/tools/perf/pmu-events/arch/arm64/ampere/ampereone/core-imp-def.json
@@ -534,66 +534,6 @@
"BriefDescription": "L2D OTB allocate"
},
{
- "PublicDescription": "DTLB Translation cache hit on S1L2 walk cache entry",
- "EventCode": "0xD801",
- "EventName": "MMU_D_TRANS_CACHE_HIT_S1L2_WALK",
- "BriefDescription": "DTLB Translation cache hit on S1L2 walk cache entry"
- },
- {
- "PublicDescription": "DTLB Translation cache hit on S1L1 walk cache entry",
- "EventCode": "0xD802",
- "EventName": "MMU_D_TRANS_CACHE_HIT_S1L1_WALK",
- "BriefDescription": "DTLB Translation cache hit on S1L1 walk cache entry"
- },
- {
- "PublicDescription": "DTLB Translation cache hit on S1L0 walk cache entry",
- "EventCode": "0xD803",
- "EventName": "MMU_D_TRANS_CACHE_HIT_S1L0_WALK",
- "BriefDescription": "DTLB Translation cache hit on S1L0 walk cache entry"
- },
- {
- "PublicDescription": "DTLB Translation cache hit on S2L2 walk cache entry",
- "EventCode": "0xD804",
- "EventName": "MMU_D_TRANS_CACHE_HIT_S2L2_WALK",
- "BriefDescription": "DTLB Translation cache hit on S2L2 walk cache entry"
- },
- {
- "PublicDescription": "DTLB Translation cache hit on S2L1 walk cache entry",
- "EventCode": "0xD805",
- "EventName": "MMU_D_TRANS_CACHE_HIT_S2L1_WALK",
- "BriefDescription": "DTLB Translation cache hit on S2L1 walk cache entry"
- },
- {
- "PublicDescription": "DTLB Translation cache hit on S2L0 walk cache entry",
- "EventCode": "0xD806",
- "EventName": "MMU_D_TRANS_CACHE_HIT_S2L0_WALK",
- "BriefDescription": "DTLB Translation cache hit on S2L0 walk cache entry"
- },
- {
- "PublicDescription": "D-side S1 Page walk cache lookup",
- "EventCode": "0xD807",
- "EventName": "MMU_D_S1_WALK_CACHE_LOOKUP",
- "BriefDescription": "D-side S1 Page walk cache lookup"
- },
- {
- "PublicDescription": "D-side S1 Page walk cache refill",
- "EventCode": "0xD808",
- "EventName": "MMU_D_S1_WALK_CACHE_REFILL",
- "BriefDescription": "D-side S1 Page walk cache refill"
- },
- {
- "PublicDescription": "D-side S2 Page walk cache lookup",
- "EventCode": "0xD809",
- "EventName": "MMU_D_S2_WALK_CACHE_LOOKUP",
- "BriefDescription": "D-side S2 Page walk cache lookup"
- },
- {
- "PublicDescription": "D-side S2 Page walk cache refill",
- "EventCode": "0xD80A",
- "EventName": "MMU_D_S2_WALK_CACHE_REFILL",
- "BriefDescription": "D-side S2 Page walk cache refill"
- },
- {
"PublicDescription": "D-side Stage1 tablewalk fault",
"EventCode": "0xD80B",
"EventName": "MMU_D_S1_WALK_FAULT",
@@ -618,66 +558,6 @@
"BriefDescription": "L2I OTB allocate"
},
{
- "PublicDescription": "ITLB Translation cache hit on S1L2 walk cache entry",
- "EventCode": "0xD901",
- "EventName": "MMU_I_TRANS_CACHE_HIT_S1L2_WALK",
- "BriefDescription": "ITLB Translation cache hit on S1L2 walk cache entry"
- },
- {
- "PublicDescription": "ITLB Translation cache hit on S1L1 walk cache entry",
- "EventCode": "0xD902",
- "EventName": "MMU_I_TRANS_CACHE_HIT_S1L1_WALK",
- "BriefDescription": "ITLB Translation cache hit on S1L1 walk cache entry"
- },
- {
- "PublicDescription": "ITLB Translation cache hit on S1L0 walk cache entry",
- "EventCode": "0xD903",
- "EventName": "MMU_I_TRANS_CACHE_HIT_S1L0_WALK",
- "BriefDescription": "ITLB Translation cache hit on S1L0 walk cache entry"
- },
- {
- "PublicDescription": "ITLB Translation cache hit on S2L2 walk cache entry",
- "EventCode": "0xD904",
- "EventName": "MMU_I_TRANS_CACHE_HIT_S2L2_WALK",
- "BriefDescription": "ITLB Translation cache hit on S2L2 walk cache entry"
- },
- {
- "PublicDescription": "ITLB Translation cache hit on S2L1 walk cache entry",
- "EventCode": "0xD905",
- "EventName": "MMU_I_TRANS_CACHE_HIT_S2L1_WALK",
- "BriefDescription": "ITLB Translation cache hit on S2L1 walk cache entry"
- },
- {
- "PublicDescription": "ITLB Translation cache hit on S2L0 walk cache entry",
- "EventCode": "0xD906",
- "EventName": "MMU_I_TRANS_CACHE_HIT_S2L0_WALK",
- "BriefDescription": "ITLB Translation cache hit on S2L0 walk cache entry"
- },
- {
- "PublicDescription": "I-side S1 Page walk cache lookup",
- "EventCode": "0xD907",
- "EventName": "MMU_I_S1_WALK_CACHE_LOOKUP",
- "BriefDescription": "I-side S1 Page walk cache lookup"
- },
- {
- "PublicDescription": "I-side S1 Page walk cache refill",
- "EventCode": "0xD908",
- "EventName": "MMU_I_S1_WALK_CACHE_REFILL",
- "BriefDescription": "I-side S1 Page walk cache refill"
- },
- {
- "PublicDescription": "I-side S2 Page walk cache lookup",
- "EventCode": "0xD909",
- "EventName": "MMU_I_S2_WALK_CACHE_LOOKUP",
- "BriefDescription": "I-side S2 Page walk cache lookup"
- },
- {
- "PublicDescription": "I-side S2 Page walk cache refill",
- "EventCode": "0xD90A",
- "EventName": "MMU_I_S2_WALK_CACHE_REFILL",
- "BriefDescription": "I-side S2 Page walk cache refill"
- },
- {
"PublicDescription": "I-side Stage1 tablewalk fault",
"EventCode": "0xD90B",
"EventName": "MMU_I_S1_WALK_FAULT",
diff --git a/tools/perf/pmu-events/arch/arm64/ampere/ampereone/metrics.json b/tools/perf/pmu-events/arch/arm64/ampere/ampereone/metrics.json
new file mode 100644
index 000000000000..1e7e8901a445
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/ampere/ampereone/metrics.json
@@ -0,0 +1,362 @@
+[
+ {
+ "MetricExpr": "BR_MIS_PRED / BR_PRED",
+ "BriefDescription": "Branch predictor misprediction rate. May not count branches that are never resolved because they are in the misprediction shadow of an earlier branch",
+ "MetricGroup": "Branch Prediction",
+ "MetricName": "Misprediction"
+ },
+ {
+ "MetricExpr": "BR_MIS_PRED_RETIRED / BR_RETIRED",
+ "BriefDescription": "Branch predictor misprediction rate",
+ "MetricGroup": "Branch Prediction",
+ "MetricName": "Misprediction (retired)"
+ },
+ {
+ "MetricExpr": "BUS_ACCESS / ( BUS_CYCLES * 1)",
+ "BriefDescription": "Core-to-uncore bus utilization",
+ "MetricGroup": "Bus",
+ "MetricName": "Bus utilization"
+ },
+ {
+ "MetricExpr": "L1D_CACHE_REFILL / L1D_CACHE",
+ "BriefDescription": "L1D cache miss rate",
+ "MetricGroup": "Cache",
+ "MetricName": "L1D cache miss"
+ },
+ {
+ "MetricExpr": "L1D_CACHE_LMISS_RD / L1D_CACHE_RD",
+ "BriefDescription": "L1D cache read miss rate",
+ "MetricGroup": "Cache",
+ "MetricName": "L1D cache read miss"
+ },
+ {
+ "MetricExpr": "L1I_CACHE_REFILL / L1I_CACHE",
+ "BriefDescription": "L1I cache miss rate",
+ "MetricGroup": "Cache",
+ "MetricName": "L1I cache miss"
+ },
+ {
+ "MetricExpr": "L2D_CACHE_REFILL / L2D_CACHE",
+ "BriefDescription": "L2 cache miss rate",
+ "MetricGroup": "Cache",
+ "MetricName": "L2 cache miss"
+ },
+ {
+ "MetricExpr": "L1I_CACHE_LMISS / L1I_CACHE",
+ "BriefDescription": "L1I cache read miss rate",
+ "MetricGroup": "Cache",
+ "MetricName": "L1I cache read miss"
+ },
+ {
+ "MetricExpr": "L2D_CACHE_LMISS_RD / L2D_CACHE_RD",
+ "BriefDescription": "L2 cache read miss rate",
+ "MetricGroup": "Cache",
+ "MetricName": "L2 cache read miss"
+ },
+ {
+ "MetricExpr": "(L1D_CACHE_LMISS_RD * 1000) / INST_RETIRED",
+ "BriefDescription": "Misses per thousand instructions (data)",
+ "MetricGroup": "Cache",
+ "MetricName": "MPKI data"
+ },
+ {
+ "MetricExpr": "(L1I_CACHE_LMISS * 1000) / INST_RETIRED",
+ "BriefDescription": "Misses per thousand instructions (instruction)",
+ "MetricGroup": "Cache",
+ "MetricName": "MPKI instruction"
+ },
+ {
+ "MetricExpr": "ASE_SPEC / OP_SPEC",
+ "BriefDescription": "Proportion of advanced SIMD data processing operations (excluding DP_SPEC/LD_SPEC) operations",
+ "MetricGroup": "Instruction",
+ "MetricName": "ASE mix"
+ },
+ {
+ "MetricExpr": "CRYPTO_SPEC / OP_SPEC",
+ "BriefDescription": "Proportion of crypto data processing operations",
+ "MetricGroup": "Instruction",
+ "MetricName": "Crypto mix"
+ },
+ {
+ "MetricExpr": "VFP_SPEC / (duration_time *1000000000)",
+ "BriefDescription": "Giga-floating point operations per second",
+ "MetricGroup": "Instruction",
+ "MetricName": "GFLOPS_ISSUED"
+ },
+ {
+ "MetricExpr": "DP_SPEC / OP_SPEC",
+ "BriefDescription": "Proportion of integer data processing operations",
+ "MetricGroup": "Instruction",
+ "MetricName": "Integer mix"
+ },
+ {
+ "MetricExpr": "INST_RETIRED / CPU_CYCLES",
+ "BriefDescription": "Instructions per cycle",
+ "MetricGroup": "Instruction",
+ "MetricName": "IPC"
+ },
+ {
+ "MetricExpr": "LD_SPEC / OP_SPEC",
+ "BriefDescription": "Proportion of load operations",
+ "MetricGroup": "Instruction",
+ "MetricName": "Load mix"
+ },
+ {
+ "MetricExpr": "LDST_SPEC/ OP_SPEC",
+ "BriefDescription": "Proportion of load & store operations",
+ "MetricGroup": "Instruction",
+ "MetricName": "Load-store mix"
+ },
+ {
+ "MetricExpr": "INST_RETIRED / (duration_time * 1000000)",
+ "BriefDescription": "Millions of instructions per second",
+ "MetricGroup": "Instruction",
+ "MetricName": "MIPS_RETIRED"
+ },
+ {
+ "MetricExpr": "INST_SPEC / (duration_time * 1000000)",
+ "BriefDescription": "Millions of instructions per second",
+ "MetricGroup": "Instruction",
+ "MetricName": "MIPS_UTILIZATION"
+ },
+ {
+ "MetricExpr": "PC_WRITE_SPEC / OP_SPEC",
+ "BriefDescription": "Proportion of software change of PC operations",
+ "MetricGroup": "Instruction",
+ "MetricName": "PC write mix"
+ },
+ {
+ "MetricExpr": "ST_SPEC / OP_SPEC",
+ "BriefDescription": "Proportion of store operations",
+ "MetricGroup": "Instruction",
+ "MetricName": "Store mix"
+ },
+ {
+ "MetricExpr": "VFP_SPEC / OP_SPEC",
+ "BriefDescription": "Proportion of FP operations",
+ "MetricGroup": "Instruction",
+ "MetricName": "VFP mix"
+ },
+ {
+ "MetricExpr": "1 - (OP_RETIRED/ (CPU_CYCLES * 4))",
+ "BriefDescription": "Proportion of slots lost",
+ "MetricGroup": "Speculation / TDA",
+ "MetricName": "CPU lost"
+ },
+ {
+ "MetricExpr": "OP_RETIRED/ (CPU_CYCLES * 4)",
+ "BriefDescription": "Proportion of slots retiring",
+ "MetricGroup": "Speculation / TDA",
+ "MetricName": "CPU utilization"
+ },
+ {
+ "MetricExpr": "OP_RETIRED - OP_SPEC",
+ "BriefDescription": "Operations lost due to misspeculation",
+ "MetricGroup": "Speculation / TDA",
+ "MetricName": "Operations lost"
+ },
+ {
+ "MetricExpr": "1 - (OP_RETIRED / OP_SPEC)",
+ "BriefDescription": "Proportion of operations lost",
+ "MetricGroup": "Speculation / TDA",
+ "MetricName": "Operations lost (ratio)"
+ },
+ {
+ "MetricExpr": "OP_RETIRED / OP_SPEC",
+ "BriefDescription": "Proportion of operations retired",
+ "MetricGroup": "Speculation / TDA",
+ "MetricName": "Operations retired"
+ },
+ {
+ "MetricExpr": "STALL_BACKEND_CACHE / CPU_CYCLES",
+ "BriefDescription": "Proportion of cycles stalled and no operations issued to backend and cache miss",
+ "MetricGroup": "Stall",
+ "MetricName": "Stall backend cache cycles"
+ },
+ {
+ "MetricExpr": "STALL_BACKEND_RESOURCE / CPU_CYCLES",
+ "BriefDescription": "Proportion of cycles stalled and no operations issued to backend and resource full",
+ "MetricGroup": "Stall",
+ "MetricName": "Stall backend resource cycles"
+ },
+ {
+ "MetricExpr": "STALL_BACKEND_TLB / CPU_CYCLES",
+ "BriefDescription": "Proportion of cycles stalled and no operations issued to backend and TLB miss",
+ "MetricGroup": "Stall",
+ "MetricName": "Stall backend tlb cycles"
+ },
+ {
+ "MetricExpr": "STALL_FRONTEND_CACHE / CPU_CYCLES",
+ "BriefDescription": "Proportion of cycles stalled and no ops delivered from frontend and cache miss",
+ "MetricGroup": "Stall",
+ "MetricName": "Stall frontend cache cycles"
+ },
+ {
+ "MetricExpr": "STALL_FRONTEND_TLB / CPU_CYCLES",
+ "BriefDescription": "Proportion of cycles stalled and no ops delivered from frontend and TLB miss",
+ "MetricGroup": "Stall",
+ "MetricName": "Stall frontend tlb cycles"
+ },
+ {
+ "MetricExpr": "DTLB_WALK / L1D_TLB",
+ "BriefDescription": "D-side walk per d-side translation request",
+ "MetricGroup": "TLB",
+ "MetricName": "DTLB walks"
+ },
+ {
+ "MetricExpr": "ITLB_WALK / L1I_TLB",
+ "BriefDescription": "I-side walk per i-side translation request",
+ "MetricGroup": "TLB",
+ "MetricName": "ITLB walks"
+ },
+ {
+ "MetricExpr": "STALL_SLOT_BACKEND / (CPU_CYCLES * 4)",
+ "BriefDescription": "Fraction of slots backend bound",
+ "MetricGroup": "TopDownL1",
+ "MetricName": "backend"
+ },
+ {
+ "MetricExpr": "1 - (retiring + lost + backend)",
+ "BriefDescription": "Fraction of slots frontend bound",
+ "MetricGroup": "TopDownL1",
+ "MetricName": "frontend"
+ },
+ {
+ "MetricExpr": "((OP_SPEC - OP_RETIRED) / (CPU_CYCLES * 4))",
+ "BriefDescription": "Fraction of slots lost due to misspeculation",
+ "MetricGroup": "TopDownL1",
+ "MetricName": "lost"
+ },
+ {
+ "MetricExpr": "(OP_RETIRED / (CPU_CYCLES * 4))",
+ "BriefDescription": "Fraction of slots retiring, useful work",
+ "MetricGroup": "TopDownL1",
+ "MetricName": "retiring"
+ },
+ {
+ "MetricExpr": "backend - backend_memory",
+ "BriefDescription": "Fraction of slots the CPU was stalled due to backend non-memory subsystem issues",
+ "MetricGroup": "TopDownL2",
+ "MetricName": "backend_core"
+ },
+ {
+ "MetricExpr": "(STALL_BACKEND_TLB + STALL_BACKEND_CACHE + STALL_BACKEND_MEM) / CPU_CYCLES ",
+ "BriefDescription": "Fraction of slots the CPU was stalled due to backend memory subsystem issues (cache/tlb miss)",
+ "MetricGroup": "TopDownL2",
+ "MetricName": "backend_memory"
+ },
+ {
+ "MetricExpr": " (BR_MIS_PRED_RETIRED / GPC_FLUSH) * lost",
+ "BriefDescription": "Fraction of slots lost due to branch misprediciton",
+ "MetricGroup": "TopDownL2",
+ "MetricName": "branch_mispredict"
+ },
+ {
+ "MetricExpr": "frontend - frontend_latency",
+ "BriefDescription": "Fraction of slots the CPU did not dispatch at full bandwidth - able to dispatch partial slots only (1, 2, or 3 uops)",
+ "MetricGroup": "TopDownL2",
+ "MetricName": "frontend_bandwidth"
+ },
+ {
+ "MetricExpr": "(STALL_FRONTEND - ((STALL_SLOT_FRONTEND - (frontend * CPU_CYCLES * 4)) / 4)) / CPU_CYCLES",
+ "BriefDescription": "Fraction of slots the CPU was stalled due to frontend latency issues (cache/tlb miss); nothing to dispatch",
+ "MetricGroup": "TopDownL2",
+ "MetricName": "frontend_latency"
+ },
+ {
+ "MetricExpr": "lost - branch_mispredict",
+ "BriefDescription": "Fraction of slots lost due to other/non-branch misprediction misspeculation",
+ "MetricGroup": "TopDownL2",
+ "MetricName": "other_clears"
+ },
+ {
+ "MetricExpr": "(IXU_NUM_UOPS_ISSUED + FSU_ISSUED) / (CPU_CYCLES * 6)",
+ "BriefDescription": "Fraction of execute slots utilized",
+ "MetricGroup": "TopDownL2",
+ "MetricName": "pipe_utilization"
+ },
+ {
+ "MetricExpr": "STALL_BACKEND_MEM / CPU_CYCLES",
+ "BriefDescription": "Fraction of cycles the CPU was stalled due to data L2 cache miss",
+ "MetricGroup": "TopDownL3",
+ "MetricName": "d_cache_l2_miss"
+ },
+ {
+ "MetricExpr": "STALL_BACKEND_CACHE / CPU_CYCLES",
+ "BriefDescription": "Fraction of cycles the CPU was stalled due to data cache miss",
+ "MetricGroup": "TopDownL3",
+ "MetricName": "d_cache_miss"
+ },
+ {
+ "MetricExpr": "STALL_BACKEND_TLB / CPU_CYCLES",
+ "BriefDescription": "Fraction of cycles the CPU was stalled due to data TLB miss",
+ "MetricGroup": "TopDownL3",
+ "MetricName": "d_tlb_miss"
+ },
+ {
+ "MetricExpr": "FSU_ISSUED / (CPU_CYCLES * 2)",
+ "BriefDescription": "Fraction of FSU execute slots utilized",
+ "MetricGroup": "TopDownL3",
+ "MetricName": "fsu_pipe_utilization"
+ },
+ {
+ "MetricExpr": "STALL_FRONTEND_CACHE / CPU_CYCLES",
+ "BriefDescription": "Fraction of cycles the CPU was stalled due to instruction cache miss",
+ "MetricGroup": "TopDownL3",
+ "MetricName": "i_cache_miss"
+ },
+ {
+ "MetricExpr": " STALL_FRONTEND_TLB / CPU_CYCLES ",
+ "BriefDescription": "Fraction of cycles the CPU was stalled due to instruction TLB miss",
+ "MetricGroup": "TopDownL3",
+ "MetricName": "i_tlb_miss"
+ },
+ {
+ "MetricExpr": "IXU_NUM_UOPS_ISSUED / (CPU_CYCLES / 4)",
+ "BriefDescription": "Fraction of IXU execute slots utilized",
+ "MetricGroup": "TopDownL3",
+ "MetricName": "ixu_pipe_utilization"
+ },
+ {
+ "MetricExpr": "IDR_STALL_FLUSH / CPU_CYCLES",
+ "BriefDescription": "Fraction of cycles the CPU was stalled due to flush recovery",
+ "MetricGroup": "TopDownL3",
+ "MetricName": "recovery"
+ },
+ {
+ "MetricExpr": "STALL_BACKEND_RESOURCE / CPU_CYCLES",
+ "BriefDescription": "Fraction of cycles the CPU was stalled due to core resource shortage",
+ "MetricGroup": "TopDownL3",
+ "MetricName": "resource"
+ },
+ {
+ "MetricExpr": "IDR_STALL_FSU_SCHED / CPU_CYCLES ",
+ "BriefDescription": "Fraction of cycles the CPU was stalled and FSU was full",
+ "MetricGroup": "TopDownL4",
+ "MetricName": "stall_fsu_sched"
+ },
+ {
+ "MetricExpr": "IDR_STALL_IXU_SCHED / CPU_CYCLES ",
+ "BriefDescription": "Fraction of cycles the CPU was stalled and IXU was full",
+ "MetricGroup": "TopDownL4",
+ "MetricName": "stall_ixu_sched"
+ },
+ {
+ "MetricExpr": "IDR_STALL_LOB_ID / CPU_CYCLES ",
+ "BriefDescription": "Fraction of cycles the CPU was stalled and LOB was full",
+ "MetricGroup": "TopDownL4",
+ "MetricName": "stall_lob_id"
+ },
+ {
+ "MetricExpr": "IDR_STALL_ROB_ID / CPU_CYCLES",
+ "BriefDescription": "Fraction of cycles the CPU was stalled and ROB was full",
+ "MetricGroup": "TopDownL4",
+ "MetricName": "stall_rob_id"
+ },
+ {
+ "MetricExpr": "IDR_STALL_SOB_ID / CPU_CYCLES ",
+ "BriefDescription": "Fraction of cycles the CPU was stalled and SOB was full",
+ "MetricGroup": "TopDownL4",
+ "MetricName": "stall_sob_id"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/ampere/ampereone/pipeline.json b/tools/perf/pmu-events/arch/arm64/ampere/ampereone/pipeline.json
index f9fae15f7555..711028377f3e 100644
--- a/tools/perf/pmu-events/arch/arm64/ampere/ampereone/pipeline.json
+++ b/tools/perf/pmu-events/arch/arm64/ampere/ampereone/pipeline.json
@@ -1,18 +1,24 @@
[
{
- "ArchStdEvent": "STALL_FRONTEND"
+ "ArchStdEvent": "STALL_FRONTEND",
+ "Errata": "Errata AC03_CPU_29",
+ "BriefDescription": "Impacted by errata, use metrics instead -"
},
{
"ArchStdEvent": "STALL_BACKEND"
},
{
- "ArchStdEvent": "STALL"
+ "ArchStdEvent": "STALL",
+ "Errata": "Errata AC03_CPU_29",
+ "BriefDescription": "Impacted by errata, use metrics instead -"
},
{
"ArchStdEvent": "STALL_SLOT_BACKEND"
},
{
- "ArchStdEvent": "STALL_SLOT_FRONTEND"
+ "ArchStdEvent": "STALL_SLOT_FRONTEND",
+ "Errata": "Errata AC03_CPU_29",
+ "BriefDescription": "Impacted by errata, use metrics instead -"
},
{
"ArchStdEvent": "STALL_SLOT"
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/branch.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/branch.json
deleted file mode 100644
index 79f2016c53b0..000000000000
--- a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/branch.json
+++ /dev/null
@@ -1,8 +0,0 @@
-[
- {
- "ArchStdEvent": "BR_MIS_PRED"
- },
- {
- "ArchStdEvent": "BR_PRED"
- }
-]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/bus.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/bus.json
index 579c1c993d17..2e11a8c4a484 100644
--- a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/bus.json
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/bus.json
@@ -1,20 +1,18 @@
[
{
- "ArchStdEvent": "CPU_CYCLES"
+ "ArchStdEvent": "BUS_ACCESS",
+ "PublicDescription": "Counts memory transactions issued by the CPU to the external bus, including snoop requests and snoop responses. Each beat of data is counted individually."
},
{
- "ArchStdEvent": "BUS_ACCESS"
+ "ArchStdEvent": "BUS_CYCLES",
+ "PublicDescription": "Counts bus cycles in the CPU. Bus cycles represent a clock cycle in which a transaction could be sent or received on the interface from the CPU to the external bus. Since that interface is driven at the same clock speed as the CPU, this event is a duplicate of CPU_CYCLES."
},
{
- "ArchStdEvent": "BUS_CYCLES"
+ "ArchStdEvent": "BUS_ACCESS_RD",
+ "PublicDescription": "Counts memory read transactions seen on the external bus. Each beat of data is counted individually."
},
{
- "ArchStdEvent": "BUS_ACCESS_RD"
- },
- {
- "ArchStdEvent": "BUS_ACCESS_WR"
- },
- {
- "ArchStdEvent": "CNT_CYCLES"
+ "ArchStdEvent": "BUS_ACCESS_WR",
+ "PublicDescription": "Counts memory write transactions seen on the external bus. Each beat of data is counted individually."
}
]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/cache.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/cache.json
deleted file mode 100644
index 0141f749bff3..000000000000
--- a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/cache.json
+++ /dev/null
@@ -1,155 +0,0 @@
-[
- {
- "ArchStdEvent": "L1I_CACHE_REFILL"
- },
- {
- "ArchStdEvent": "L1I_TLB_REFILL"
- },
- {
- "ArchStdEvent": "L1D_CACHE_REFILL"
- },
- {
- "ArchStdEvent": "L1D_CACHE"
- },
- {
- "ArchStdEvent": "L1D_TLB_REFILL"
- },
- {
- "ArchStdEvent": "L1I_CACHE"
- },
- {
- "ArchStdEvent": "L1D_CACHE_WB"
- },
- {
- "ArchStdEvent": "L2D_CACHE"
- },
- {
- "ArchStdEvent": "L2D_CACHE_REFILL"
- },
- {
- "ArchStdEvent": "L2D_CACHE_WB"
- },
- {
- "ArchStdEvent": "L2D_CACHE_ALLOCATE"
- },
- {
- "ArchStdEvent": "L1D_TLB"
- },
- {
- "ArchStdEvent": "L1I_TLB"
- },
- {
- "ArchStdEvent": "L3D_CACHE_ALLOCATE"
- },
- {
- "ArchStdEvent": "L3D_CACHE_REFILL"
- },
- {
- "ArchStdEvent": "L3D_CACHE"
- },
- {
- "ArchStdEvent": "L2D_TLB_REFILL"
- },
- {
- "ArchStdEvent": "L2D_TLB"
- },
- {
- "ArchStdEvent": "DTLB_WALK"
- },
- {
- "ArchStdEvent": "ITLB_WALK"
- },
- {
- "ArchStdEvent": "LL_CACHE_RD"
- },
- {
- "ArchStdEvent": "LL_CACHE_MISS_RD"
- },
- {
- "ArchStdEvent": "L1D_CACHE_LMISS_RD"
- },
- {
- "ArchStdEvent": "L1D_CACHE_RD"
- },
- {
- "ArchStdEvent": "L1D_CACHE_WR"
- },
- {
- "ArchStdEvent": "L1D_CACHE_REFILL_RD"
- },
- {
- "ArchStdEvent": "L1D_CACHE_REFILL_WR"
- },
- {
- "ArchStdEvent": "L1D_CACHE_REFILL_INNER"
- },
- {
- "ArchStdEvent": "L1D_CACHE_REFILL_OUTER"
- },
- {
- "ArchStdEvent": "L1D_CACHE_WB_VICTIM"
- },
- {
- "ArchStdEvent": "L1D_CACHE_WB_CLEAN"
- },
- {
- "ArchStdEvent": "L1D_CACHE_INVAL"
- },
- {
- "ArchStdEvent": "L1D_TLB_REFILL_RD"
- },
- {
- "ArchStdEvent": "L1D_TLB_REFILL_WR"
- },
- {
- "ArchStdEvent": "L1D_TLB_RD"
- },
- {
- "ArchStdEvent": "L1D_TLB_WR"
- },
- {
- "ArchStdEvent": "L2D_CACHE_RD"
- },
- {
- "ArchStdEvent": "L2D_CACHE_WR"
- },
- {
- "ArchStdEvent": "L2D_CACHE_REFILL_RD"
- },
- {
- "ArchStdEvent": "L2D_CACHE_REFILL_WR"
- },
- {
- "ArchStdEvent": "L2D_CACHE_WB_VICTIM"
- },
- {
- "ArchStdEvent": "L2D_CACHE_WB_CLEAN"
- },
- {
- "ArchStdEvent": "L2D_CACHE_INVAL"
- },
- {
- "ArchStdEvent": "L2D_TLB_REFILL_RD"
- },
- {
- "ArchStdEvent": "L2D_TLB_REFILL_WR"
- },
- {
- "ArchStdEvent": "L2D_TLB_RD"
- },
- {
- "ArchStdEvent": "L2D_TLB_WR"
- },
- {
- "ArchStdEvent": "L3D_CACHE_RD"
- },
- {
- "ArchStdEvent": "L1I_CACHE_LMISS"
- },
- {
- "ArchStdEvent": "L2D_CACHE_LMISS_RD"
- },
- {
- "ArchStdEvent": "L3D_CACHE_LMISS_RD"
- }
-]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/exception.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/exception.json
index 344a2d552ad5..4404b8e91690 100644
--- a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/exception.json
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/exception.json
@@ -1,47 +1,62 @@
[
{
- "ArchStdEvent": "EXC_TAKEN"
+ "ArchStdEvent": "EXC_TAKEN",
+ "PublicDescription": "Counts any taken architecturally visible exceptions such as IRQ, FIQ, SError, and other synchronous exceptions. Exceptions are counted whether or not they are taken locally."
},
{
- "ArchStdEvent": "MEMORY_ERROR"
+ "ArchStdEvent": "EXC_RETURN",
+ "PublicDescription": "Counts any architecturally executed exception return instructions. Eg: AArch64: ERET"
},
{
- "ArchStdEvent": "EXC_UNDEF"
+ "ArchStdEvent": "EXC_UNDEF",
+ "PublicDescription": "Counts the number of synchronous exceptions which are taken locally that are due to attempting to execute an instruction that is UNDEFINED. Attempting to execute instruction bit patterns that have not been allocated. Attempting to execute instructions when they are disabled. Attempting to execute instructions at an inappropriate Exception level. Attempting to execute an instruction when the value of PSTATE.IL is 1."
},
{
- "ArchStdEvent": "EXC_SVC"
+ "ArchStdEvent": "EXC_SVC",
+ "PublicDescription": "Counts SVC exceptions taken locally."
},
{
- "ArchStdEvent": "EXC_PABORT"
+ "ArchStdEvent": "EXC_PABORT",
+ "PublicDescription": "Counts synchronous exceptions that are taken locally and caused by Instruction Aborts."
},
{
- "ArchStdEvent": "EXC_DABORT"
+ "ArchStdEvent": "EXC_DABORT",
+ "PublicDescription": "Counts exceptions that are taken locally and are caused by data aborts or SErrors. Conditions that could cause those exceptions are attempting to read or write memory where the MMU generates a fault, attempting to read or write memory with a misaligned address, interrupts from the nSEI inputs and internally generated SErrors."
},
{
- "ArchStdEvent": "EXC_IRQ"
+ "ArchStdEvent": "EXC_IRQ",
+ "PublicDescription": "Counts IRQ exceptions including the virtual IRQs that are taken locally."
},
{
- "ArchStdEvent": "EXC_FIQ"
+ "ArchStdEvent": "EXC_FIQ",
+ "PublicDescription": "Counts FIQ exceptions including the virtual FIQs that are taken locally."
},
{
- "ArchStdEvent": "EXC_SMC"
+ "ArchStdEvent": "EXC_SMC",
+ "PublicDescription": "Counts SMC exceptions take to EL3."
},
{
- "ArchStdEvent": "EXC_HVC"
+ "ArchStdEvent": "EXC_HVC",
+ "PublicDescription": "Counts HVC exceptions taken to EL2."
},
{
- "ArchStdEvent": "EXC_TRAP_PABORT"
+ "ArchStdEvent": "EXC_TRAP_PABORT",
+ "PublicDescription": "Counts exceptions which are traps not taken locally and are caused by Instruction Aborts. For example, attempting to execute an instruction with a misaligned PC."
},
{
- "ArchStdEvent": "EXC_TRAP_DABORT"
+ "ArchStdEvent": "EXC_TRAP_DABORT",
+ "PublicDescription": "Counts exceptions which are traps not taken locally and are caused by Data Aborts or SError interrupts. Conditions that could cause those exceptions are:\n\n1. Attempting to read or write memory where the MMU generates a fault,\n2. Attempting to read or write memory with a misaligned address,\n3. Interrupts from the SEI input.\n4. internally generated SErrors."
},
{
- "ArchStdEvent": "EXC_TRAP_OTHER"
+ "ArchStdEvent": "EXC_TRAP_OTHER",
+ "PublicDescription": "Counts the number of synchronous trap exceptions which are not taken locally and are not SVC, SMC, HVC, data aborts, Instruction Aborts, or interrupts."
},
{
- "ArchStdEvent": "EXC_TRAP_IRQ"
+ "ArchStdEvent": "EXC_TRAP_IRQ",
+ "PublicDescription": "Counts IRQ exceptions including the virtual IRQs that are not taken locally."
},
{
- "ArchStdEvent": "EXC_TRAP_FIQ"
+ "ArchStdEvent": "EXC_TRAP_FIQ",
+ "PublicDescription": "Counts FIQs which are not taken locally but taken from EL0, EL1,\n or EL2 to EL3 (which would be the normal behavior for FIQs when not executing\n in EL3)."
}
]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/fp_operation.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/fp_operation.json
new file mode 100644
index 000000000000..cec3435ac766
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/fp_operation.json
@@ -0,0 +1,22 @@
+[
+ {
+ "ArchStdEvent": "FP_HP_SPEC",
+ "PublicDescription": "Counts speculatively executed half precision floating point operations."
+ },
+ {
+ "ArchStdEvent": "FP_SP_SPEC",
+ "PublicDescription": "Counts speculatively executed single precision floating point operations."
+ },
+ {
+ "ArchStdEvent": "FP_DP_SPEC",
+ "PublicDescription": "Counts speculatively executed double precision floating point operations."
+ },
+ {
+ "ArchStdEvent": "FP_SCALE_OPS_SPEC",
+ "PublicDescription": "Counts speculatively executed scalable single precision floating point operations."
+ },
+ {
+ "ArchStdEvent": "FP_FIXED_OPS_SPEC",
+ "PublicDescription": "Counts speculatively executed non-scalable single precision floating point operations."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/general.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/general.json
new file mode 100644
index 000000000000..428810f855b8
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/general.json
@@ -0,0 +1,10 @@
+[
+ {
+ "ArchStdEvent": "CPU_CYCLES",
+ "PublicDescription": "Counts CPU clock cycles (not timer cycles). The clock measured by this event is defined as the physical clock driving the CPU logic."
+ },
+ {
+ "ArchStdEvent": "CNT_CYCLES",
+ "PublicDescription": "Counts constant frequency cycles"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/instruction.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/instruction.json
deleted file mode 100644
index e57cd55937c6..000000000000
--- a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/instruction.json
+++ /dev/null
@@ -1,143 +0,0 @@
-[
- {
- "ArchStdEvent": "SW_INCR"
- },
- {
- "ArchStdEvent": "INST_RETIRED"
- },
- {
- "ArchStdEvent": "EXC_RETURN"
- },
- {
- "ArchStdEvent": "CID_WRITE_RETIRED"
- },
- {
- "ArchStdEvent": "INST_SPEC"
- },
- {
- "ArchStdEvent": "TTBR_WRITE_RETIRED"
- },
- {
- "ArchStdEvent": "BR_RETIRED"
- },
- {
- "ArchStdEvent": "BR_MIS_PRED_RETIRED"
- },
- {
- "ArchStdEvent": "OP_RETIRED"
- },
- {
- "ArchStdEvent": "OP_SPEC"
- },
- {
- "ArchStdEvent": "LDREX_SPEC"
- },
- {
- "ArchStdEvent": "STREX_PASS_SPEC"
- },
- {
- "ArchStdEvent": "STREX_FAIL_SPEC"
- },
- {
- "ArchStdEvent": "STREX_SPEC"
- },
- {
- "ArchStdEvent": "LD_SPEC"
- },
- {
- "ArchStdEvent": "ST_SPEC"
- },
- {
- "ArchStdEvent": "DP_SPEC"
- },
- {
- "ArchStdEvent": "ASE_SPEC"
- },
- {
- "ArchStdEvent": "VFP_SPEC"
- },
- {
- "ArchStdEvent": "PC_WRITE_SPEC"
- },
- {
- "ArchStdEvent": "CRYPTO_SPEC"
- },
- {
- "ArchStdEvent": "BR_IMMED_SPEC"
- },
- {
- "ArchStdEvent": "BR_RETURN_SPEC"
- },
- {
- "ArchStdEvent": "BR_INDIRECT_SPEC"
- },
- {
- "ArchStdEvent": "ISB_SPEC"
- },
- {
- "ArchStdEvent": "DSB_SPEC"
- },
- {
- "ArchStdEvent": "DMB_SPEC"
- },
- {
- "ArchStdEvent": "RC_LD_SPEC"
- },
- {
- "ArchStdEvent": "RC_ST_SPEC"
- },
- {
- "ArchStdEvent": "ASE_INST_SPEC"
- },
- {
- "ArchStdEvent": "SVE_INST_SPEC"
- },
- {
- "ArchStdEvent": "FP_HP_SPEC"
- },
- {
- "ArchStdEvent": "FP_SP_SPEC"
- },
- {
- "ArchStdEvent": "FP_DP_SPEC"
- },
- {
- "ArchStdEvent": "SVE_PRED_SPEC"
- },
- {
- "ArchStdEvent": "SVE_PRED_EMPTY_SPEC"
- },
- {
- "ArchStdEvent": "SVE_PRED_FULL_SPEC"
- },
- {
- "ArchStdEvent": "SVE_PRED_PARTIAL_SPEC"
- },
- {
- "ArchStdEvent": "SVE_PRED_NOT_FULL_SPEC"
- },
- {
- "ArchStdEvent": "SVE_LDFF_SPEC"
- },
- {
- "ArchStdEvent": "SVE_LDFF_FAULT_SPEC"
- },
- {
- "ArchStdEvent": "FP_SCALE_OPS_SPEC"
- },
- {
- "ArchStdEvent": "FP_FIXED_OPS_SPEC"
- },
- {
- "ArchStdEvent": "ASE_SVE_INT8_SPEC"
- },
- {
- "ArchStdEvent": "ASE_SVE_INT16_SPEC"
- },
- {
- "ArchStdEvent": "ASE_SVE_INT32_SPEC"
- },
- {
- "ArchStdEvent": "ASE_SVE_INT64_SPEC"
- }
-]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/l1d_cache.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/l1d_cache.json
new file mode 100644
index 000000000000..da7c129f2569
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/l1d_cache.json
@@ -0,0 +1,54 @@
+[
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL",
+ "PublicDescription": "Counts level 1 data cache refills caused by speculatively executed load or store operations that missed in the level 1 data cache. This event only counts one event per cache line. This event does not count cache line allocations from preload instructions or from hardware cache prefetching."
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE",
+ "PublicDescription": "Counts level 1 data cache accesses from any load/store operations. Atomic operations that resolve in the CPUs caches (near atomic operations) count as both a write access and read access. Each access to a cache line is counted including the multiple accesses caused by single instructions such as LDM or STM. Each access to other level 1 data or unified memory structures, for example refill buffers, write buffers, and write-back buffers, are also counted."
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WB",
+ "PublicDescription": "Counts write-backs of dirty data from the L1 data cache to the L2 cache. This occurs when either a dirty cache line is evicted from L1 data cache and allocated in the L2 cache or dirty data is written to the L2 and possibly to the next level of cache. This event counts both victim cache line evictions and cache write-backs from snoops or cache maintenance operations. The following cache operations are not counted:\n\n1. Invalidations which do not result in data being transferred out of the L1 (such as evictions of clean data),\n2. Full line writes which write to L2 without writing L1, such as write streaming mode."
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_LMISS_RD",
+ "PublicDescription": "Counts cache line refills into the level 1 data cache from any memory read operations, that incurred additional latency."
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_RD",
+ "PublicDescription": "Counts level 1 data cache accesses from any load operation. Atomic load operations that resolve in the CPUs caches count as both a write access and read access."
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WR",
+ "PublicDescription": "Counts level 1 data cache accesses generated by store operations. This event also counts accesses caused by a DC ZVA (data cache zero, specified by virtual address) instruction. Near atomic operations that resolve in the CPUs caches count as a write access and read access."
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_RD",
+ "PublicDescription": "Counts level 1 data cache refills caused by speculatively executed load instructions where the memory read operation misses in the level 1 data cache. This event only counts one event per cache line."
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_WR",
+ "PublicDescription": "Counts level 1 data cache refills caused by speculatively executed store instructions where the memory write operation misses in the level 1 data cache. This event only counts one event per cache line."
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_INNER",
+ "PublicDescription": "Counts level 1 data cache refills where the cache line data came from caches inside the immediate cluster of the core."
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_OUTER",
+ "PublicDescription": "Counts level 1 data cache refills for which the cache line data came from outside the immediate cluster of the core, like an SLC in the system interconnect or DRAM."
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WB_VICTIM",
+ "PublicDescription": "Counts dirty cache line evictions from the level 1 data cache caused by a new cache line allocation. This event does not count evictions caused by cache maintenance operations."
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WB_CLEAN",
+ "PublicDescription": "Counts write-backs from the level 1 data cache that are a result of a coherency operation made by another CPU. Event count includes cache maintenance operations."
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_INVAL",
+ "PublicDescription": "Counts each explicit invalidation of a cache line in the level 1 data cache caused by:\n\n- Cache Maintenance Operations (CMO) that operate by a virtual address.\n- Broadcast cache coherency operations from another CPU in the system.\n\nThis event does not count for the following conditions:\n\n1. A cache refill invalidates a cache line.\n2. A CMO which is executed on that CPU and invalidates a cache line specified by set/way.\n\nNote that CMOs that operate by set/way cannot be broadcast from one CPU to another."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/l1i_cache.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/l1i_cache.json
new file mode 100644
index 000000000000..633f1030359d
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/l1i_cache.json
@@ -0,0 +1,14 @@
+[
+ {
+ "ArchStdEvent": "L1I_CACHE_REFILL",
+ "PublicDescription": "Counts cache line refills in the level 1 instruction cache caused by a missed instruction fetch. Instruction fetches may include accessing multiple instructions, but the single cache line allocation is counted once."
+ },
+ {
+ "ArchStdEvent": "L1I_CACHE",
+ "PublicDescription": "Counts instruction fetches which access the level 1 instruction cache. Instruction cache accesses caused by cache maintenance operations are not counted."
+ },
+ {
+ "ArchStdEvent": "L1I_CACHE_LMISS",
+ "PublicDescription": "Counts cache line refills into the level 1 instruction cache, that incurred additional latency."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/l2_cache.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/l2_cache.json
new file mode 100644
index 000000000000..0e31d0daf88b
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/l2_cache.json
@@ -0,0 +1,50 @@
+[
+ {
+ "ArchStdEvent": "L2D_CACHE",
+ "PublicDescription": "Counts level 2 cache accesses. level 2 cache is a unified cache for data and instruction accesses. Accesses are for misses in the first level caches or translation resolutions due to accesses. This event also counts write back of dirty data from level 1 data cache to the L2 cache."
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL",
+ "PublicDescription": "Counts cache line refills into the level 2 cache. level 2 cache is a unified cache for data and instruction accesses. Accesses are for misses in the level 1 caches or translation resolutions due to accesses."
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB",
+ "PublicDescription": "Counts write-backs of data from the L2 cache to outside the CPU. This includes snoops to the L2 (from other CPUs) which return data even if the snoops cause an invalidation. L2 cache line invalidations which do not write data outside the CPU and snoops which return data from an L1 cache are not counted. Data would not be written outside the cache when invalidating a clean cache line."
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_ALLOCATE",
+ "PublicDescription": "TBD"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_RD",
+ "PublicDescription": "Counts level 2 cache accesses due to memory read operations. level 2 cache is a unified cache for data and instruction accesses, accesses are for misses in the level 1 caches or translation resolutions due to accesses."
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WR",
+ "PublicDescription": "Counts level 2 cache accesses due to memory write operations. level 2 cache is a unified cache for data and instruction accesses, accesses are for misses in the level 1 caches or translation resolutions due to accesses."
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL_RD",
+ "PublicDescription": "Counts refills for memory accesses due to memory read operation counted by L2D_CACHE_RD. level 2 cache is a unified cache for data and instruction accesses, accesses are for misses in the level 1 caches or translation resolutions due to accesses."
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL_WR",
+ "PublicDescription": "Counts refills for memory accesses due to memory write operation counted by L2D_CACHE_WR. level 2 cache is a unified cache for data and instruction accesses, accesses are for misses in the level 1 caches or translation resolutions due to accesses."
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB_VICTIM",
+ "PublicDescription": "Counts evictions from the level 2 cache because of a line being allocated into the L2 cache."
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB_CLEAN",
+ "PublicDescription": "Counts write-backs from the level 2 cache that are a result of either:\n\n1. Cache maintenance operations,\n\n2. Snoop responses or,\n\n3. Direct cache transfers to another CPU due to a forwarding snoop request."
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_INVAL",
+ "PublicDescription": "Counts each explicit invalidation of a cache line in the level 2 cache by cache maintenance operations that operate by a virtual address, or by external coherency operations. This event does not count if either:\n\n1. A cache refill invalidates a cache line or,\n2. A Cache Maintenance Operation (CMO), which invalidates a cache line specified by set/way, is executed on that CPU.\n\nCMOs that operate by set/way cannot be broadcast from one CPU to another."
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_LMISS_RD",
+ "PublicDescription": "Counts cache line refills into the level 2 unified cache from any memory read operations that incurred additional latency."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/l3_cache.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/l3_cache.json
new file mode 100644
index 000000000000..45bfba532df7
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/l3_cache.json
@@ -0,0 +1,22 @@
+[
+ {
+ "ArchStdEvent": "L3D_CACHE_ALLOCATE",
+ "PublicDescription": "Counts level 3 cache line allocates that do not fetch data from outside the level 3 data or unified cache. For example, allocates due to streaming stores."
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_REFILL",
+ "PublicDescription": "Counts level 3 accesses that receive data from outside the L3 cache."
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE",
+ "PublicDescription": "Counts level 3 cache accesses. level 3 cache is a unified cache for data and instruction accesses. Accesses are for misses in the lower level caches or translation resolutions due to accesses."
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_RD",
+ "PublicDescription": "TBD"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_LMISS_RD",
+ "PublicDescription": "Counts any cache line refill into the level 3 cache from memory read operations that incurred additional latency."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/ll_cache.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/ll_cache.json
new file mode 100644
index 000000000000..bb712d57d58a
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/ll_cache.json
@@ -0,0 +1,10 @@
+[
+ {
+ "ArchStdEvent": "LL_CACHE_RD",
+ "PublicDescription": "Counts read transactions that were returned from outside the core cluster. This event counts when the system register CPUECTLR.EXTLLC bit is set. This event counts read transactions returned from outside the core if those transactions are either hit in the system level cache or missed in the SLC and are returned from any other external sources."
+ },
+ {
+ "ArchStdEvent": "LL_CACHE_MISS_RD",
+ "PublicDescription": "Counts read transactions that were returned from outside the core cluster but missed in the system level cache. This event counts when the system register CPUECTLR.EXTLLC bit is set. This event counts read transactions returned from outside the core if those transactions are missed in the System level Cache. The data source of the transaction is indicated by a field in the CHI transaction returning to the CPU. This event does not count reads caused by cache maintenance operations."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/memory.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/memory.json
index 7b2b21ac150f..106a97f8b2e7 100644
--- a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/memory.json
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/memory.json
@@ -1,41 +1,46 @@
[
{
- "ArchStdEvent": "MEM_ACCESS"
+ "ArchStdEvent": "MEM_ACCESS",
+ "PublicDescription": "Counts memory accesses issued by the CPU load store unit, where those accesses are issued due to load or store operations. This event counts memory accesses no matter whether the data is received from any level of cache hierarchy or external memory. If memory accesses are broken up into smaller transactions than what were specified in the load or store instructions, then the event counts those smaller memory transactions."
},
{
- "ArchStdEvent": "REMOTE_ACCESS"
+ "ArchStdEvent": "MEMORY_ERROR",
+ "PublicDescription": "Counts any detected correctable or uncorrectable physical memory errors (ECC or parity) in protected CPUs RAMs. On the core, this event counts errors in the caches (including data and tag rams). Any detected memory error (from either a speculative and abandoned access, or an architecturally executed access) is counted. Note that errors are only detected when the actual protected memory is accessed by an operation."
},
{
- "ArchStdEvent": "MEM_ACCESS_RD"
+ "ArchStdEvent": "REMOTE_ACCESS",
+ "PublicDescription": "Counts accesses to another chip, which is implemented as a different CMN mesh in the system. If the CHI bus response back to the core indicates that the data source is from another chip (mesh), then the counter is updated. If no data is returned, even if the system snoops another chip/mesh, then the counter is not updated."
},
{
- "ArchStdEvent": "MEM_ACCESS_WR"
+ "ArchStdEvent": "MEM_ACCESS_RD",
+ "PublicDescription": "Counts memory accesses issued by the CPU due to load operations. The event counts any memory load access, no matter whether the data is received from any level of cache hierarchy or external memory. The event also counts atomic load operations. If memory accesses are broken up by the load/store unit into smaller transactions that are issued by the bus interface, then the event counts those smaller transactions."
},
{
- "ArchStdEvent": "UNALIGNED_LD_SPEC"
+ "ArchStdEvent": "MEM_ACCESS_WR",
+ "PublicDescription": "Counts memory accesses issued by the CPU due to store operations. The event counts any memory store access, no matter whether the data is located in any level of cache or external memory. The event also counts atomic load and store operations. If memory accesses are broken up by the load/store unit into smaller transactions that are issued by the bus interface, then the event counts those smaller transactions."
},
{
- "ArchStdEvent": "UNALIGNED_ST_SPEC"
+ "ArchStdEvent": "LDST_ALIGN_LAT",
+ "PublicDescription": "Counts the number of memory read and write accesses in a cycle that incurred additional latency, due to the alignment of the address and the size of data being accessed, which results in store crossing a single cache line."
},
{
- "ArchStdEvent": "UNALIGNED_LDST_SPEC"
+ "ArchStdEvent": "LD_ALIGN_LAT",
+ "PublicDescription": "Counts the number of memory read accesses in a cycle that incurred additional latency, due to the alignment of the address and size of data being accessed, which results in load crossing a single cache line."
},
{
- "ArchStdEvent": "LDST_ALIGN_LAT"
+ "ArchStdEvent": "ST_ALIGN_LAT",
+ "PublicDescription": "Counts the number of memory write access in a cycle that incurred additional latency, due to the alignment of the address and size of data being accessed incurred additional latency."
},
{
- "ArchStdEvent": "LD_ALIGN_LAT"
+ "ArchStdEvent": "MEM_ACCESS_CHECKED",
+ "PublicDescription": "Counts the number of memory read and write accesses in a cycle that are tag checked by the Memory Tagging Extension (MTE)."
},
{
- "ArchStdEvent": "ST_ALIGN_LAT"
+ "ArchStdEvent": "MEM_ACCESS_CHECKED_RD",
+ "PublicDescription": "Counts the number of memory read accesses in a cycle that are tag checked by the Memory Tagging Extension (MTE)."
},
{
- "ArchStdEvent": "MEM_ACCESS_CHECKED"
- },
- {
- "ArchStdEvent": "MEM_ACCESS_CHECKED_RD"
- },
- {
- "ArchStdEvent": "MEM_ACCESS_CHECKED_WR"
+ "ArchStdEvent": "MEM_ACCESS_CHECKED_WR",
+ "PublicDescription": "Counts the number of memory write accesses in a cycle that is tag checked by the Memory Tagging Extension (MTE)."
}
]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/metrics.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/metrics.json
index 8ad15b726dca..5f449270b448 100644
--- a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/metrics.json
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/metrics.json
@@ -1,272 +1,303 @@
[
{
- "ArchStdEvent": "FRONTEND_BOUND",
- "MetricExpr": "((stall_slot_frontend) if (#slots - 5) else (stall_slot_frontend - cpu_cycles)) / (#slots * cpu_cycles)"
+ "ArchStdEvent": "backend_bound",
+ "MetricExpr": "(100 * ((STALL_SLOT_BACKEND / (CPU_CYCLES * #slots)) - ((BR_MIS_PRED * 3) / CPU_CYCLES)))"
},
{
- "ArchStdEvent": "BAD_SPECULATION",
- "MetricExpr": "(1 - op_retired / op_spec) * (1 - (stall_slot if (#slots - 5) else (stall_slot - cpu_cycles)) / (#slots * cpu_cycles))"
+ "MetricName": "backend_stalled_cycles",
+ "MetricExpr": "((STALL_BACKEND / CPU_CYCLES) * 100)",
+ "BriefDescription": "This metric is the percentage of cycles that were stalled due to resource constraints in the backend unit of the processor.",
+ "MetricGroup": "Cycle_Accounting",
+ "ScaleUnit": "1percent of cycles"
},
{
- "ArchStdEvent": "RETIRING",
- "MetricExpr": "(op_retired / op_spec) * (1 - (stall_slot if (#slots - 5) else (stall_slot - cpu_cycles)) / (#slots * cpu_cycles))"
+ "ArchStdEvent": "bad_speculation",
+ "MetricExpr": "(100 * (((1 - (OP_RETIRED / OP_SPEC)) * (1 - (((STALL_SLOT) if (strcmp_cpuid_str(0x410fd493) | strcmp_cpuid_str(0x410fd490) ^ 1) else (STALL_SLOT - CPU_CYCLES)) / (CPU_CYCLES * #slots)))) + ((BR_MIS_PRED * 4) / CPU_CYCLES)))"
},
{
- "ArchStdEvent": "BACKEND_BOUND"
+ "MetricName": "branch_misprediction_ratio",
+ "MetricExpr": "(BR_MIS_PRED_RETIRED / BR_RETIRED)",
+ "BriefDescription": "This metric measures the ratio of branches mispredicted to the total number of branches architecturally executed. This gives an indication of the effectiveness of the branch prediction unit.",
+ "MetricGroup": "Miss_Ratio;Branch_Effectiveness",
+ "ScaleUnit": "1per branch"
},
{
- "MetricExpr": "L1D_TLB_REFILL / L1D_TLB",
- "BriefDescription": "The rate of L1D TLB refill to the overall L1D TLB lookups",
- "MetricGroup": "TLB",
- "MetricName": "l1d_tlb_miss_rate",
- "ScaleUnit": "100%"
+ "MetricName": "branch_mpki",
+ "MetricExpr": "((BR_MIS_PRED_RETIRED / INST_RETIRED) * 1000)",
+ "BriefDescription": "This metric measures the number of branch mispredictions per thousand instructions executed.",
+ "MetricGroup": "MPKI;Branch_Effectiveness",
+ "ScaleUnit": "1MPKI"
},
{
- "MetricExpr": "L1I_TLB_REFILL / L1I_TLB",
- "BriefDescription": "The rate of L1I TLB refill to the overall L1I TLB lookups",
- "MetricGroup": "TLB",
- "MetricName": "l1i_tlb_miss_rate",
- "ScaleUnit": "100%"
+ "MetricName": "branch_percentage",
+ "MetricExpr": "(((BR_IMMED_SPEC + BR_INDIRECT_SPEC) / INST_SPEC) * 100)",
+ "BriefDescription": "This metric measures branch operations as a percentage of operations speculatively executed.",
+ "MetricGroup": "Operation_Mix",
+ "ScaleUnit": "1percent of operations"
},
{
- "MetricExpr": "L2D_TLB_REFILL / L2D_TLB",
- "BriefDescription": "The rate of L2D TLB refill to the overall L2D TLB lookups",
- "MetricGroup": "TLB",
- "MetricName": "l2_tlb_miss_rate",
- "ScaleUnit": "100%"
+ "MetricName": "crypto_percentage",
+ "MetricExpr": "((CRYPTO_SPEC / INST_SPEC) * 100)",
+ "BriefDescription": "This metric measures crypto operations as a percentage of operations speculatively executed.",
+ "MetricGroup": "Operation_Mix",
+ "ScaleUnit": "1percent of operations"
},
{
- "MetricExpr": "DTLB_WALK / INST_RETIRED * 1000",
- "BriefDescription": "The rate of TLB Walks per kilo instructions for data accesses",
- "MetricGroup": "TLB",
"MetricName": "dtlb_mpki",
+ "MetricExpr": "((DTLB_WALK / INST_RETIRED) * 1000)",
+ "BriefDescription": "This metric measures the number of data TLB Walks per thousand instructions executed.",
+ "MetricGroup": "MPKI;DTLB_Effectiveness",
"ScaleUnit": "1MPKI"
},
{
- "MetricExpr": "DTLB_WALK / L1D_TLB",
- "BriefDescription": "The rate of DTLB Walks to the overall L1D TLB lookups",
- "MetricGroup": "TLB",
- "MetricName": "dtlb_walk_rate",
- "ScaleUnit": "100%"
+ "MetricName": "dtlb_walk_ratio",
+ "MetricExpr": "(DTLB_WALK / L1D_TLB)",
+ "BriefDescription": "This metric measures the ratio of data TLB Walks to the total number of data TLB accesses. This gives an indication of the effectiveness of the data TLB accesses.",
+ "MetricGroup": "Miss_Ratio;DTLB_Effectiveness",
+ "ScaleUnit": "1per TLB access"
},
{
- "MetricExpr": "ITLB_WALK / INST_RETIRED * 1000",
- "BriefDescription": "The rate of TLB Walks per kilo instructions for instruction accesses",
- "MetricGroup": "TLB",
- "MetricName": "itlb_mpki",
- "ScaleUnit": "1MPKI"
+ "ArchStdEvent": "frontend_bound",
+ "MetricExpr": "(100 * ((((STALL_SLOT_FRONTEND) if (strcmp_cpuid_str(0x410fd493) | strcmp_cpuid_str(0x410fd490) ^ 1) else (STALL_SLOT_FRONTEND - CPU_CYCLES)) / (CPU_CYCLES * #slots)) - (BR_MIS_PRED / CPU_CYCLES)))"
},
{
- "MetricExpr": "ITLB_WALK / L1I_TLB",
- "BriefDescription": "The rate of ITLB Walks to the overall L1I TLB lookups",
- "MetricGroup": "TLB",
- "MetricName": "itlb_walk_rate",
- "ScaleUnit": "100%"
+ "MetricName": "frontend_stalled_cycles",
+ "MetricExpr": "((STALL_FRONTEND / CPU_CYCLES) * 100)",
+ "BriefDescription": "This metric is the percentage of cycles that were stalled due to resource constraints in the frontend unit of the processor.",
+ "MetricGroup": "Cycle_Accounting",
+ "ScaleUnit": "1percent of cycles"
},
{
- "MetricExpr": "L1I_CACHE_REFILL / INST_RETIRED * 1000",
- "BriefDescription": "The rate of L1 I-Cache misses per kilo instructions",
- "MetricGroup": "Cache",
- "MetricName": "l1i_cache_mpki",
+ "MetricName": "integer_dp_percentage",
+ "MetricExpr": "((DP_SPEC / INST_SPEC) * 100)",
+ "BriefDescription": "This metric measures scalar integer operations as a percentage of operations speculatively executed.",
+ "MetricGroup": "Operation_Mix",
+ "ScaleUnit": "1percent of operations"
+ },
+ {
+ "MetricName": "ipc",
+ "MetricExpr": "(INST_RETIRED / CPU_CYCLES)",
+ "BriefDescription": "This metric measures the number of instructions retired per cycle.",
+ "MetricGroup": "General",
+ "ScaleUnit": "1per cycle"
+ },
+ {
+ "MetricName": "itlb_mpki",
+ "MetricExpr": "((ITLB_WALK / INST_RETIRED) * 1000)",
+ "BriefDescription": "This metric measures the number of instruction TLB Walks per thousand instructions executed.",
+ "MetricGroup": "MPKI;ITLB_Effectiveness",
"ScaleUnit": "1MPKI"
},
{
- "MetricExpr": "L1I_CACHE_REFILL / L1I_CACHE",
- "BriefDescription": "The rate of L1 I-Cache misses to the overall L1 I-Cache",
- "MetricGroup": "Cache",
- "MetricName": "l1i_cache_miss_rate",
- "ScaleUnit": "100%"
+ "MetricName": "itlb_walk_ratio",
+ "MetricExpr": "(ITLB_WALK / L1I_TLB)",
+ "BriefDescription": "This metric measures the ratio of instruction TLB Walks to the total number of instruction TLB accesses. This gives an indication of the effectiveness of the instruction TLB accesses.",
+ "MetricGroup": "Miss_Ratio;ITLB_Effectiveness",
+ "ScaleUnit": "1per TLB access"
+ },
+ {
+ "MetricName": "l1d_cache_miss_ratio",
+ "MetricExpr": "(L1D_CACHE_REFILL / L1D_CACHE)",
+ "BriefDescription": "This metric measures the ratio of level 1 data cache accesses missed to the total number of level 1 data cache accesses. This gives an indication of the effectiveness of the level 1 data cache.",
+ "MetricGroup": "Miss_Ratio;L1D_Cache_Effectiveness",
+ "ScaleUnit": "1per cache access"
},
{
- "MetricExpr": "L1D_CACHE_REFILL / INST_RETIRED * 1000",
- "BriefDescription": "The rate of L1 D-Cache misses per kilo instructions",
- "MetricGroup": "Cache",
"MetricName": "l1d_cache_mpki",
+ "MetricExpr": "((L1D_CACHE_REFILL / INST_RETIRED) * 1000)",
+ "BriefDescription": "This metric measures the number of level 1 data cache accesses missed per thousand instructions executed.",
+ "MetricGroup": "MPKI;L1D_Cache_Effectiveness",
"ScaleUnit": "1MPKI"
},
{
- "MetricExpr": "L1D_CACHE_REFILL / L1D_CACHE",
- "BriefDescription": "The rate of L1 D-Cache misses to the overall L1 D-Cache",
- "MetricGroup": "Cache",
- "MetricName": "l1d_cache_miss_rate",
- "ScaleUnit": "100%"
+ "MetricName": "l1d_tlb_miss_ratio",
+ "MetricExpr": "(L1D_TLB_REFILL / L1D_TLB)",
+ "BriefDescription": "This metric measures the ratio of level 1 data TLB accesses missed to the total number of level 1 data TLB accesses. This gives an indication of the effectiveness of the level 1 data TLB.",
+ "MetricGroup": "Miss_Ratio;DTLB_Effectiveness",
+ "ScaleUnit": "1per TLB access"
},
{
- "MetricExpr": "L2D_CACHE_REFILL / INST_RETIRED * 1000",
- "BriefDescription": "The rate of L2 D-Cache misses per kilo instructions",
- "MetricGroup": "Cache",
- "MetricName": "l2d_cache_mpki",
+ "MetricName": "l1d_tlb_mpki",
+ "MetricExpr": "((L1D_TLB_REFILL / INST_RETIRED) * 1000)",
+ "BriefDescription": "This metric measures the number of level 1 instruction TLB accesses missed per thousand instructions executed.",
+ "MetricGroup": "MPKI;DTLB_Effectiveness",
"ScaleUnit": "1MPKI"
},
{
- "MetricExpr": "L2D_CACHE_REFILL / L2D_CACHE",
- "BriefDescription": "The rate of L2 D-Cache misses to the overall L2 D-Cache",
- "MetricGroup": "Cache",
- "MetricName": "l2d_cache_miss_rate",
- "ScaleUnit": "100%"
+ "MetricName": "l1i_cache_miss_ratio",
+ "MetricExpr": "(L1I_CACHE_REFILL / L1I_CACHE)",
+ "BriefDescription": "This metric measures the ratio of level 1 instruction cache accesses missed to the total number of level 1 instruction cache accesses. This gives an indication of the effectiveness of the level 1 instruction cache.",
+ "MetricGroup": "Miss_Ratio;L1I_Cache_Effectiveness",
+ "ScaleUnit": "1per cache access"
},
{
- "MetricExpr": "L3D_CACHE_REFILL / INST_RETIRED * 1000",
- "BriefDescription": "The rate of L3 D-Cache misses per kilo instructions",
- "MetricGroup": "Cache",
- "MetricName": "l3d_cache_mpki",
+ "MetricName": "l1i_cache_mpki",
+ "MetricExpr": "((L1I_CACHE_REFILL / INST_RETIRED) * 1000)",
+ "BriefDescription": "This metric measures the number of level 1 instruction cache accesses missed per thousand instructions executed.",
+ "MetricGroup": "MPKI;L1I_Cache_Effectiveness",
"ScaleUnit": "1MPKI"
},
{
- "MetricExpr": "L3D_CACHE_REFILL / L3D_CACHE",
- "BriefDescription": "The rate of L3 D-Cache misses to the overall L3 D-Cache",
- "MetricGroup": "Cache",
- "MetricName": "l3d_cache_miss_rate",
- "ScaleUnit": "100%"
+ "MetricName": "l1i_tlb_miss_ratio",
+ "MetricExpr": "(L1I_TLB_REFILL / L1I_TLB)",
+ "BriefDescription": "This metric measures the ratio of level 1 instruction TLB accesses missed to the total number of level 1 instruction TLB accesses. This gives an indication of the effectiveness of the level 1 instruction TLB.",
+ "MetricGroup": "Miss_Ratio;ITLB_Effectiveness",
+ "ScaleUnit": "1per TLB access"
},
{
- "MetricExpr": "LL_CACHE_MISS_RD / INST_RETIRED * 1000",
- "BriefDescription": "The rate of LL Cache read misses per kilo instructions",
- "MetricGroup": "Cache",
- "MetricName": "ll_cache_read_mpki",
+ "MetricName": "l1i_tlb_mpki",
+ "MetricExpr": "((L1I_TLB_REFILL / INST_RETIRED) * 1000)",
+ "BriefDescription": "This metric measures the number of level 1 instruction TLB accesses missed per thousand instructions executed.",
+ "MetricGroup": "MPKI;ITLB_Effectiveness",
"ScaleUnit": "1MPKI"
},
{
- "MetricExpr": "LL_CACHE_MISS_RD / LL_CACHE_RD",
- "BriefDescription": "The rate of LL Cache read misses to the overall LL Cache read",
- "MetricGroup": "Cache",
- "MetricName": "ll_cache_read_miss_rate",
- "ScaleUnit": "100%"
+ "MetricName": "l2_cache_miss_ratio",
+ "MetricExpr": "(L2D_CACHE_REFILL / L2D_CACHE)",
+ "BriefDescription": "This metric measures the ratio of level 2 cache accesses missed to the total number of level 2 cache accesses. This gives an indication of the effectiveness of the level 2 cache, which is a unified cache that stores both data and instruction. Note that cache accesses in this cache are either data memory access or instruction fetch as this is a unified cache.",
+ "MetricGroup": "Miss_Ratio;L2_Cache_Effectiveness",
+ "ScaleUnit": "1per cache access"
},
{
- "MetricExpr": "(LL_CACHE_RD - LL_CACHE_MISS_RD) / LL_CACHE_RD",
- "BriefDescription": "The rate of LL Cache read hit to the overall LL Cache read",
- "MetricGroup": "Cache",
- "MetricName": "ll_cache_read_hit_rate",
- "ScaleUnit": "100%"
+ "MetricName": "l2_cache_mpki",
+ "MetricExpr": "((L2D_CACHE_REFILL / INST_RETIRED) * 1000)",
+ "BriefDescription": "This metric measures the number of level 2 unified cache accesses missed per thousand instructions executed. Note that cache accesses in this cache are either data memory access or instruction fetch as this is a unified cache.",
+ "MetricGroup": "MPKI;L2_Cache_Effectiveness",
+ "ScaleUnit": "1MPKI"
},
{
- "MetricExpr": "BR_MIS_PRED_RETIRED / INST_RETIRED * 1000",
- "BriefDescription": "The rate of branches mis-predicted per kilo instructions",
- "MetricGroup": "Branch",
- "MetricName": "branch_mpki",
+ "MetricName": "l2_tlb_miss_ratio",
+ "MetricExpr": "(L2D_TLB_REFILL / L2D_TLB)",
+ "BriefDescription": "This metric measures the ratio of level 2 unified TLB accesses missed to the total number of level 2 unified TLB accesses. This gives an indication of the effectiveness of the level 2 TLB.",
+ "MetricGroup": "Miss_Ratio;ITLB_Effectiveness;DTLB_Effectiveness",
+ "ScaleUnit": "1per TLB access"
+ },
+ {
+ "MetricName": "l2_tlb_mpki",
+ "MetricExpr": "((L2D_TLB_REFILL / INST_RETIRED) * 1000)",
+ "BriefDescription": "This metric measures the number of level 2 unified TLB accesses missed per thousand instructions executed.",
+ "MetricGroup": "MPKI;ITLB_Effectiveness;DTLB_Effectiveness",
"ScaleUnit": "1MPKI"
},
{
- "MetricExpr": "BR_RETIRED / INST_RETIRED * 1000",
- "BriefDescription": "The rate of branches retired per kilo instructions",
- "MetricGroup": "Branch",
- "MetricName": "branch_pki",
- "ScaleUnit": "1PKI"
+ "MetricName": "ll_cache_read_hit_ratio",
+ "MetricExpr": "((LL_CACHE_RD - LL_CACHE_MISS_RD) / LL_CACHE_RD)",
+ "BriefDescription": "This metric measures the ratio of last level cache read accesses hit in the cache to the total number of last level cache accesses. This gives an indication of the effectiveness of the last level cache for read traffic. Note that cache accesses in this cache are either data memory access or instruction fetch as this is a system level cache.",
+ "MetricGroup": "LL_Cache_Effectiveness",
+ "ScaleUnit": "1per cache access"
},
{
- "MetricExpr": "BR_MIS_PRED_RETIRED / BR_RETIRED",
- "BriefDescription": "The rate of branches mis-predited to the overall branches",
- "MetricGroup": "Branch",
- "MetricName": "branch_miss_pred_rate",
- "ScaleUnit": "100%"
+ "MetricName": "ll_cache_read_miss_ratio",
+ "MetricExpr": "(LL_CACHE_MISS_RD / LL_CACHE_RD)",
+ "BriefDescription": "This metric measures the ratio of last level cache read accesses missed to the total number of last level cache accesses. This gives an indication of the effectiveness of the last level cache for read traffic. Note that cache accesses in this cache are either data memory access or instruction fetch as this is a system level cache.",
+ "MetricGroup": "Miss_Ratio;LL_Cache_Effectiveness",
+ "ScaleUnit": "1per cache access"
},
{
- "MetricExpr": "instructions / CPU_CYCLES",
- "BriefDescription": "The average number of instructions executed for each cycle.",
- "MetricGroup": "PEutilization",
- "MetricName": "ipc"
+ "MetricName": "ll_cache_read_mpki",
+ "MetricExpr": "((LL_CACHE_MISS_RD / INST_RETIRED) * 1000)",
+ "BriefDescription": "This metric measures the number of last level cache read accesses missed per thousand instructions executed.",
+ "MetricGroup": "MPKI;LL_Cache_Effectiveness",
+ "ScaleUnit": "1MPKI"
},
{
- "MetricExpr": "ipc / 5",
- "BriefDescription": "IPC percentage of peak. The peak of IPC is 5.",
- "MetricGroup": "PEutilization",
- "MetricName": "ipc_rate",
- "ScaleUnit": "100%"
+ "MetricName": "load_percentage",
+ "MetricExpr": "((LD_SPEC / INST_SPEC) * 100)",
+ "BriefDescription": "This metric measures load operations as a percentage of operations speculatively executed.",
+ "MetricGroup": "Operation_Mix",
+ "ScaleUnit": "1percent of operations"
},
{
- "MetricExpr": "INST_RETIRED / CPU_CYCLES",
- "BriefDescription": "Architecturally executed Instructions Per Cycle (IPC)",
- "MetricGroup": "PEutilization",
- "MetricName": "retired_ipc"
+ "ArchStdEvent": "retiring",
+ "MetricExpr": "(100 * ((OP_RETIRED / OP_SPEC) * (1 - (((STALL_SLOT) if (strcmp_cpuid_str(0x410fd493) | strcmp_cpuid_str(0x410fd490) ^ 1) else (STALL_SLOT - CPU_CYCLES)) / (CPU_CYCLES * #slots)))))"
},
{
- "MetricExpr": "INST_SPEC / CPU_CYCLES",
- "BriefDescription": "Speculatively executed Instructions Per Cycle (IPC)",
- "MetricGroup": "PEutilization",
- "MetricName": "spec_ipc"
+ "MetricName": "scalar_fp_percentage",
+ "MetricExpr": "((VFP_SPEC / INST_SPEC) * 100)",
+ "BriefDescription": "This metric measures scalar floating point operations as a percentage of operations speculatively executed.",
+ "MetricGroup": "Operation_Mix",
+ "ScaleUnit": "1percent of operations"
},
{
- "MetricExpr": "OP_RETIRED / OP_SPEC",
- "BriefDescription": "Of all the micro-operations issued, what percentage are retired(committed)",
- "MetricGroup": "PEutilization",
- "MetricName": "retired_rate",
- "ScaleUnit": "100%"
+ "MetricName": "simd_percentage",
+ "MetricExpr": "((ASE_SPEC / INST_SPEC) * 100)",
+ "BriefDescription": "This metric measures advanced SIMD operations as a percentage of total operations speculatively executed.",
+ "MetricGroup": "Operation_Mix",
+ "ScaleUnit": "1percent of operations"
},
{
- "MetricExpr": "1 - OP_RETIRED / OP_SPEC",
- "BriefDescription": "Of all the micro-operations issued, what percentage are not retired(committed)",
- "MetricGroup": "PEutilization",
- "MetricName": "wasted_rate",
- "ScaleUnit": "100%"
+ "MetricName": "store_percentage",
+ "MetricExpr": "((ST_SPEC / INST_SPEC) * 100)",
+ "BriefDescription": "This metric measures store operations as a percentage of operations speculatively executed.",
+ "MetricGroup": "Operation_Mix",
+ "ScaleUnit": "1percent of operations"
},
{
- "MetricExpr": "OP_RETIRED / OP_SPEC * (1 - (STALL_SLOT if (#slots - 5) else (STALL_SLOT - CPU_CYCLES)) / (#slots * CPU_CYCLES))",
- "BriefDescription": "The truly effective ratio of micro-operations executed by the CPU, which means that misprediction and stall are not included",
- "MetricGroup": "PEutilization",
- "MetricName": "cpu_utilization",
- "ScaleUnit": "100%"
+ "MetricExpr": "L3D_CACHE_REFILL / INST_RETIRED * 1000",
+ "BriefDescription": "The rate of L3 D-Cache misses per kilo instructions",
+ "MetricGroup": "MPKI;L3_Cache_Effectiveness",
+ "MetricName": "l3d_cache_mpki",
+ "ScaleUnit": "1MPKI"
},
{
- "MetricExpr": "LD_SPEC / INST_SPEC",
- "BriefDescription": "The rate of load instructions speculatively executed to overall instructions speclatively executed",
- "MetricGroup": "InstructionMix",
- "MetricName": "load_spec_rate",
+ "MetricExpr": "L3D_CACHE_REFILL / L3D_CACHE",
+ "BriefDescription": "The rate of L3 D-Cache misses to the overall L3 D-Cache",
+ "MetricGroup": "Miss_Ratio;L3_Cache_Effectiveness",
+ "MetricName": "l3d_cache_miss_rate",
"ScaleUnit": "100%"
},
{
- "MetricExpr": "ST_SPEC / INST_SPEC",
- "BriefDescription": "The rate of store instructions speculatively executed to overall instructions speclatively executed",
- "MetricGroup": "InstructionMix",
- "MetricName": "store_spec_rate",
- "ScaleUnit": "100%"
+ "MetricExpr": "BR_RETIRED / INST_RETIRED * 1000",
+ "BriefDescription": "The rate of branches retired per kilo instructions",
+ "MetricGroup": "MPKI;Branch_Effectiveness",
+ "MetricName": "branch_pki",
+ "ScaleUnit": "1PKI"
},
{
- "MetricExpr": "DP_SPEC / INST_SPEC",
- "BriefDescription": "The rate of integer data-processing instructions speculatively executed to overall instructions speclatively executed",
- "MetricGroup": "InstructionMix",
- "MetricName": "data_process_spec_rate",
+ "MetricExpr": "ipc / #slots",
+ "BriefDescription": "IPC percentage of peak. The peak of IPC is the number of slots.",
+ "MetricGroup": "General",
+ "MetricName": "ipc_rate",
"ScaleUnit": "100%"
},
{
- "MetricExpr": "ASE_SPEC / INST_SPEC",
- "BriefDescription": "The rate of advanced SIMD instructions speculatively executed to overall instructions speclatively executed",
- "MetricGroup": "InstructionMix",
- "MetricName": "advanced_simd_spec_rate",
- "ScaleUnit": "100%"
+ "MetricExpr": "INST_SPEC / CPU_CYCLES",
+ "BriefDescription": "Speculatively executed Instructions Per Cycle (IPC)",
+ "MetricGroup": "General",
+ "MetricName": "spec_ipc"
},
{
- "MetricExpr": "VFP_SPEC / INST_SPEC",
- "BriefDescription": "The rate of floating point instructions speculatively executed to overall instructions speclatively executed",
- "MetricGroup": "InstructionMix",
- "MetricName": "float_point_spec_rate",
+ "MetricExpr": "OP_RETIRED / OP_SPEC",
+ "BriefDescription": "Of all the micro-operations issued, what percentage are retired(committed)",
+ "MetricGroup": "General",
+ "MetricName": "retired_rate",
"ScaleUnit": "100%"
},
{
- "MetricExpr": "CRYPTO_SPEC / INST_SPEC",
- "BriefDescription": "The rate of crypto instructions speculatively executed to overall instructions speclatively executed",
- "MetricGroup": "InstructionMix",
- "MetricName": "crypto_spec_rate",
+ "MetricExpr": "1 - OP_RETIRED / OP_SPEC",
+ "BriefDescription": "Of all the micro-operations issued, what percentage are not retired(committed)",
+ "MetricGroup": "General",
+ "MetricName": "wasted_rate",
"ScaleUnit": "100%"
},
{
"MetricExpr": "BR_IMMED_SPEC / INST_SPEC",
- "BriefDescription": "The rate of branch immediate instructions speculatively executed to overall instructions speclatively executed",
- "MetricGroup": "InstructionMix",
+ "BriefDescription": "The rate of branch immediate instructions speculatively executed to overall instructions speculatively executed",
+ "MetricGroup": "Operation_Mix",
"MetricName": "branch_immed_spec_rate",
"ScaleUnit": "100%"
},
{
"MetricExpr": "BR_RETURN_SPEC / INST_SPEC",
- "BriefDescription": "The rate of procedure return instructions speculatively executed to overall instructions speclatively executed",
- "MetricGroup": "InstructionMix",
+ "BriefDescription": "The rate of procedure return instructions speculatively executed to overall instructions speculatively executed",
+ "MetricGroup": "Operation_Mix",
"MetricName": "branch_return_spec_rate",
"ScaleUnit": "100%"
},
{
"MetricExpr": "BR_INDIRECT_SPEC / INST_SPEC",
- "BriefDescription": "The rate of indirect branch instructions speculatively executed to overall instructions speclatively executed",
- "MetricGroup": "InstructionMix",
+ "BriefDescription": "The rate of indirect branch instructions speculatively executed to overall instructions speculatively executed",
+ "MetricGroup": "Operation_Mix",
"MetricName": "branch_indirect_spec_rate",
"ScaleUnit": "100%"
}
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/pipeline.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/pipeline.json
deleted file mode 100644
index f9fae15f7555..000000000000
--- a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/pipeline.json
+++ /dev/null
@@ -1,23 +0,0 @@
-[
- {
- "ArchStdEvent": "STALL_FRONTEND"
- },
- {
- "ArchStdEvent": "STALL_BACKEND"
- },
- {
- "ArchStdEvent": "STALL"
- },
- {
- "ArchStdEvent": "STALL_SLOT_BACKEND"
- },
- {
- "ArchStdEvent": "STALL_SLOT_FRONTEND"
- },
- {
- "ArchStdEvent": "STALL_SLOT"
- },
- {
- "ArchStdEvent": "STALL_BACKEND_MEM"
- }
-]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/retired.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/retired.json
new file mode 100644
index 000000000000..f297b049b62f
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/retired.json
@@ -0,0 +1,30 @@
+[
+ {
+ "ArchStdEvent": "SW_INCR",
+ "PublicDescription": "Counts software writes to the PMSWINC_EL0 (software PMU increment) register. The PMSWINC_EL0 register is a manually updated counter for use by application software.\n\nThis event could be used to measure any user program event, such as accesses to a particular data structure (by writing to the PMSWINC_EL0 register each time the data structure is accessed).\n\nTo use the PMSWINC_EL0 register and event, developers must insert instructions that write to the PMSWINC_EL0 register into the source code.\n\nSince the SW_INCR event records writes to the PMSWINC_EL0 register, there is no need to do a read/increment/write sequence to the PMSWINC_EL0 register."
+ },
+ {
+ "ArchStdEvent": "INST_RETIRED",
+ "PublicDescription": "Counts instructions that have been architecturally executed."
+ },
+ {
+ "ArchStdEvent": "CID_WRITE_RETIRED",
+ "PublicDescription": "Counts architecturally executed writes to the CONTEXTIDR register, which usually contain the kernel PID and can be output with hardware trace."
+ },
+ {
+ "ArchStdEvent": "TTBR_WRITE_RETIRED",
+ "PublicDescription": "Counts architectural writes to TTBR0/1_EL1. If virtualization host extensions are enabled (by setting the HCR_EL2.E2H bit to 1), then accesses to TTBR0/1_EL1 that are redirected to TTBR0/1_EL2, or accesses to TTBR0/1_EL12, are counted. TTBRn registers are typically updated when the kernel is swapping user-space threads or applications."
+ },
+ {
+ "ArchStdEvent": "BR_RETIRED",
+ "PublicDescription": "Counts architecturally executed branches, whether the branch is taken or not. Instructions that explicitly write to the PC are also counted."
+ },
+ {
+ "ArchStdEvent": "BR_MIS_PRED_RETIRED",
+ "PublicDescription": "Counts branches counted by BR_RETIRED which were mispredicted and caused a pipeline flush."
+ },
+ {
+ "ArchStdEvent": "OP_RETIRED",
+ "PublicDescription": "Counts micro-operations that are architecturally executed. This is a count of number of micro-operations retired from the commit queue in a single cycle."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/spe.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/spe.json
index 20f2165c85fe..5de8b0f3a440 100644
--- a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/spe.json
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/spe.json
@@ -1,14 +1,18 @@
[
{
- "ArchStdEvent": "SAMPLE_POP"
+ "ArchStdEvent": "SAMPLE_POP",
+ "PublicDescription": "Counts statistical profiling sample population, the count of all operations that could be sampled but may or may not be chosen for sampling."
},
{
- "ArchStdEvent": "SAMPLE_FEED"
+ "ArchStdEvent": "SAMPLE_FEED",
+ "PublicDescription": "Counts statistical profiling samples taken for sampling."
},
{
- "ArchStdEvent": "SAMPLE_FILTRATE"
+ "ArchStdEvent": "SAMPLE_FILTRATE",
+ "PublicDescription": "Counts statistical profiling samples taken which are not removed by filtering."
},
{
- "ArchStdEvent": "SAMPLE_COLLISION"
+ "ArchStdEvent": "SAMPLE_COLLISION",
+ "PublicDescription": "Counts statistical profiling samples that have collided with a previous sample and so therefore not taken."
}
]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/spec_operation.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/spec_operation.json
new file mode 100644
index 000000000000..1af961f8a6c8
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/spec_operation.json
@@ -0,0 +1,110 @@
+[
+ {
+ "ArchStdEvent": "BR_MIS_PRED",
+ "PublicDescription": "Counts branches which are speculatively executed and mispredicted."
+ },
+ {
+ "ArchStdEvent": "BR_PRED",
+ "PublicDescription": "Counts branches speculatively executed and were predicted right."
+ },
+ {
+ "ArchStdEvent": "INST_SPEC",
+ "PublicDescription": "Counts operations that have been speculatively executed."
+ },
+ {
+ "ArchStdEvent": "OP_SPEC",
+ "PublicDescription": "Counts micro-operations speculatively executed. This is the count of the number of micro-operations dispatched in a cycle."
+ },
+ {
+ "ArchStdEvent": "UNALIGNED_LD_SPEC",
+ "PublicDescription": "Counts unaligned memory read operations issued by the CPU. This event counts unaligned accesses (as defined by the actual instruction), even if they are subsequently issued as multiple aligned accesses. The event does not count preload operations (PLD, PLI)."
+ },
+ {
+ "ArchStdEvent": "UNALIGNED_ST_SPEC",
+ "PublicDescription": "Counts unaligned memory write operations issued by the CPU. This event counts unaligned accesses (as defined by the actual instruction), even if they are subsequently issued as multiple aligned accesses."
+ },
+ {
+ "ArchStdEvent": "UNALIGNED_LDST_SPEC",
+ "PublicDescription": "Counts unaligned memory operations issued by the CPU. This event counts unaligned accesses (as defined by the actual instruction), even if they are subsequently issued as multiple aligned accesses."
+ },
+ {
+ "ArchStdEvent": "LDREX_SPEC",
+ "PublicDescription": "Counts Load-Exclusive operations that have been speculatively executed. Eg: LDREX, LDX"
+ },
+ {
+ "ArchStdEvent": "STREX_PASS_SPEC",
+ "PublicDescription": "Counts store-exclusive operations that have been speculatively executed and have successfully completed the store operation."
+ },
+ {
+ "ArchStdEvent": "STREX_FAIL_SPEC",
+ "PublicDescription": "Counts store-exclusive operations that have been speculatively executed and have not successfully completed the store operation."
+ },
+ {
+ "ArchStdEvent": "STREX_SPEC",
+ "PublicDescription": "Counts store-exclusive operations that have been speculatively executed."
+ },
+ {
+ "ArchStdEvent": "LD_SPEC",
+ "PublicDescription": "Counts speculatively executed load operations including Single Instruction Multiple Data (SIMD) load operations."
+ },
+ {
+ "ArchStdEvent": "ST_SPEC",
+ "PublicDescription": "Counts speculatively executed store operations including Single Instruction Multiple Data (SIMD) store operations."
+ },
+ {
+ "ArchStdEvent": "DP_SPEC",
+ "PublicDescription": "Counts speculatively executed logical or arithmetic instructions such as MOV/MVN operations."
+ },
+ {
+ "ArchStdEvent": "ASE_SPEC",
+ "PublicDescription": "Counts speculatively executed Advanced SIMD operations excluding load, store and move micro-operations that move data to or from SIMD (vector) registers."
+ },
+ {
+ "ArchStdEvent": "VFP_SPEC",
+ "PublicDescription": "Counts speculatively executed floating point operations. This event does not count operations that move data to or from floating point (vector) registers."
+ },
+ {
+ "ArchStdEvent": "PC_WRITE_SPEC",
+ "PublicDescription": "Counts speculatively executed operations which cause software changes of the PC. Those operations include all taken branch operations."
+ },
+ {
+ "ArchStdEvent": "CRYPTO_SPEC",
+ "PublicDescription": "Counts speculatively executed cryptographic operations except for PMULL and VMULL operations."
+ },
+ {
+ "ArchStdEvent": "BR_IMMED_SPEC",
+ "PublicDescription": "Counts immediate branch operations which are speculatively executed."
+ },
+ {
+ "ArchStdEvent": "BR_RETURN_SPEC",
+ "PublicDescription": "Counts procedure return operations (RET) which are speculatively executed."
+ },
+ {
+ "ArchStdEvent": "BR_INDIRECT_SPEC",
+ "PublicDescription": "Counts indirect branch operations including procedure returns, which are speculatively executed. This includes operations that force a software change of the PC, other than exception-generating operations. Eg: BR Xn, RET"
+ },
+ {
+ "ArchStdEvent": "ISB_SPEC",
+ "PublicDescription": "Counts ISB operations that are executed."
+ },
+ {
+ "ArchStdEvent": "DSB_SPEC",
+ "PublicDescription": "Counts DSB operations that are speculatively issued to Load/Store unit in the CPU."
+ },
+ {
+ "ArchStdEvent": "DMB_SPEC",
+ "PublicDescription": "Counts DMB operations that are speculatively issued to the Load/Store unit in the CPU. This event does not count implied barriers from load acquire/store release operations."
+ },
+ {
+ "ArchStdEvent": "RC_LD_SPEC",
+ "PublicDescription": "Counts any load acquire operations that are speculatively executed. Eg: LDAR, LDARH, LDARB"
+ },
+ {
+ "ArchStdEvent": "RC_ST_SPEC",
+ "PublicDescription": "Counts any store release operations that are speculatively executed. Eg: STLR, STLRH, STLRB'"
+ },
+ {
+ "ArchStdEvent": "ASE_INST_SPEC",
+ "PublicDescription": "Counts speculatively executed Advanced SIMD operations."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/stall.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/stall.json
new file mode 100644
index 000000000000..bbbebc805034
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/stall.json
@@ -0,0 +1,30 @@
+[
+ {
+ "ArchStdEvent": "STALL_FRONTEND",
+ "PublicDescription": "Counts cycles when frontend could not send any micro-operations to the rename stage because of frontend resource stalls caused by fetch memory latency or branch prediction flow stalls. All the frontend slots were empty during the cycle when this event counts."
+ },
+ {
+ "ArchStdEvent": "STALL_BACKEND",
+ "PublicDescription": "Counts cycles whenever the rename unit is unable to send any micro-operations to the backend of the pipeline because of backend resource constraints. Backend resource constraints can include issue stage fullness, execution stage fullness, or other internal pipeline resource fullness. All the backend slots were empty during the cycle when this event counts."
+ },
+ {
+ "ArchStdEvent": "STALL",
+ "PublicDescription": "Counts cycles when no operations are sent to the rename unit from the frontend or from the rename unit to the backend for any reason (either frontend or backend stall)."
+ },
+ {
+ "ArchStdEvent": "STALL_SLOT_BACKEND",
+ "PublicDescription": "Counts slots per cycle in which no operations are sent from the rename unit to the backend due to backend resource constraints."
+ },
+ {
+ "ArchStdEvent": "STALL_SLOT_FRONTEND",
+ "PublicDescription": "Counts slots per cycle in which no operations are sent to the rename unit from the frontend due to frontend resource constraints."
+ },
+ {
+ "ArchStdEvent": "STALL_SLOT",
+ "PublicDescription": "Counts slots per cycle in which no operations are sent to the rename unit from the frontend or from the rename unit to the backend for any reason (either frontend or backend stall)."
+ },
+ {
+ "ArchStdEvent": "STALL_BACKEND_MEM",
+ "PublicDescription": "Counts cycles when the backend is stalled because there is a pending demand load request in progress in the last level core cache."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/sve.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/sve.json
new file mode 100644
index 000000000000..51dab48cb2ba
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/sve.json
@@ -0,0 +1,50 @@
+[
+ {
+ "ArchStdEvent": "SVE_INST_SPEC",
+ "PublicDescription": "Counts speculatively executed operations that are SVE operations."
+ },
+ {
+ "ArchStdEvent": "SVE_PRED_SPEC",
+ "PublicDescription": "Counts speculatively executed predicated SVE operations."
+ },
+ {
+ "ArchStdEvent": "SVE_PRED_EMPTY_SPEC",
+ "PublicDescription": "Counts speculatively executed predicated SVE operations with no active predicate elements."
+ },
+ {
+ "ArchStdEvent": "SVE_PRED_FULL_SPEC",
+ "PublicDescription": "Counts speculatively executed predicated SVE operations with all predicate elements active."
+ },
+ {
+ "ArchStdEvent": "SVE_PRED_PARTIAL_SPEC",
+ "PublicDescription": "Counts speculatively executed predicated SVE operations with at least one but not all active predicate elements."
+ },
+ {
+ "ArchStdEvent": "SVE_PRED_NOT_FULL_SPEC",
+ "PublicDescription": "Counts speculatively executed predicated SVE operations with at least one non active predicate elements."
+ },
+ {
+ "ArchStdEvent": "SVE_LDFF_SPEC",
+ "PublicDescription": "Counts speculatively executed SVE first fault or non-fault load operations."
+ },
+ {
+ "ArchStdEvent": "SVE_LDFF_FAULT_SPEC",
+ "PublicDescription": "Counts speculatively executed SVE first fault or non-fault load operations that clear at least one bit in the FFR."
+ },
+ {
+ "ArchStdEvent": "ASE_SVE_INT8_SPEC",
+ "PublicDescription": "Counts speculatively executed Advanced SIMD or SVE integer operations with the largest data type an 8-bit integer."
+ },
+ {
+ "ArchStdEvent": "ASE_SVE_INT16_SPEC",
+ "PublicDescription": "Counts speculatively executed Advanced SIMD or SVE integer operations with the largest data type a 16-bit integer."
+ },
+ {
+ "ArchStdEvent": "ASE_SVE_INT32_SPEC",
+ "PublicDescription": "Counts speculatively executed Advanced SIMD or SVE integer operations with the largest data type a 32-bit integer."
+ },
+ {
+ "ArchStdEvent": "ASE_SVE_INT64_SPEC",
+ "PublicDescription": "Counts speculatively executed Advanced SIMD or SVE integer operations with the largest data type a 64-bit integer."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/tlb.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/tlb.json
new file mode 100644
index 000000000000..b550af1831f5
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/tlb.json
@@ -0,0 +1,66 @@
+[
+ {
+ "ArchStdEvent": "L1I_TLB_REFILL",
+ "PublicDescription": "Counts level 1 instruction TLB refills from any Instruction fetch. If there are multiple misses in the TLB that are resolved by the refill, then this event only counts once. This event will not count if the translation table walk results in a fault (such as a translation or access fault), since there is no new translation created for the TLB."
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_REFILL",
+ "PublicDescription": "Counts level 1 data TLB accesses that resulted in TLB refills. If there are multiple misses in the TLB that are resolved by the refill, then this event only counts once. This event counts for refills caused by preload instructions or hardware prefetch accesses. This event counts regardless of whether the miss hits in L2 or results in a translation table walk. This event will not count if the translation table walk results in a fault (such as a translation or access fault), since there is no new translation created for the TLB. This event will not count on an access from an AT(address translation) instruction."
+ },
+ {
+ "ArchStdEvent": "L1D_TLB",
+ "PublicDescription": "Counts level 1 data TLB accesses caused by any memory load or store operation. Note that load or store instructions can be broken up into multiple memory operations. This event does not count TLB maintenance operations."
+ },
+ {
+ "ArchStdEvent": "L1I_TLB",
+ "PublicDescription": "Counts level 1 instruction TLB accesses, whether the access hits or misses in the TLB. This event counts both demand accesses and prefetch or preload generated accesses."
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_REFILL",
+ "PublicDescription": "Counts level 2 TLB refills caused by memory operations from both data and instruction fetch, except for those caused by TLB maintenance operations and hardware prefetches."
+ },
+ {
+ "ArchStdEvent": "L2D_TLB",
+ "PublicDescription": "Counts level 2 TLB accesses except those caused by TLB maintenance operations."
+ },
+ {
+ "ArchStdEvent": "DTLB_WALK",
+ "PublicDescription": "Counts data memory translation table walks caused by a miss in the L2 TLB driven by a memory access. Note that partial translations that also cause a table walk are counted. This event does not count table walks caused by TLB maintenance operations."
+ },
+ {
+ "ArchStdEvent": "ITLB_WALK",
+ "PublicDescription": "Counts instruction memory translation table walks caused by a miss in the L2 TLB driven by a memory access. Partial translations that also cause a table walk are counted. This event does not count table walks caused by TLB maintenance operations."
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_REFILL_RD",
+ "PublicDescription": "Counts level 1 data TLB refills caused by memory read operations. If there are multiple misses in the TLB that are resolved by the refill, then this event only counts once. This event counts for refills caused by preload instructions or hardware prefetch accesses. This event counts regardless of whether the miss hits in L2 or results in a translation table walk. This event will not count if the translation table walk results in a fault (such as a translation or access fault), since there is no new translation created for the TLB. This event will not count on an access from an Address Translation (AT) instruction."
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_REFILL_WR",
+ "PublicDescription": "Counts level 1 data TLB refills caused by data side memory write operations. If there are multiple misses in the TLB that are resolved by the refill, then this event only counts once. This event counts for refills caused by preload instructions or hardware prefetch accesses. This event counts regardless of whether the miss hits in L2 or results in a translation table walk. This event will not count if the table walk results in a fault (such as a translation or access fault), since there is no new translation created for the TLB. This event will not count with an access from an Address Translation (AT) instruction."
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_RD",
+ "PublicDescription": "Counts level 1 data TLB accesses caused by memory read operations. This event counts whether the access hits or misses in the TLB. This event does not count TLB maintenance operations."
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_WR",
+ "PublicDescription": "Counts any L1 data side TLB accesses caused by memory write operations. This event counts whether the access hits or misses in the TLB. This event does not count TLB maintenance operations."
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_REFILL_RD",
+ "PublicDescription": "Counts level 2 TLB refills caused by memory read operations from both data and instruction fetch except for those caused by TLB maintenance operations or hardware prefetches."
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_REFILL_WR",
+ "PublicDescription": "Counts level 2 TLB refills caused by memory write operations from both data and instruction fetch except for those caused by TLB maintenance operations."
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_RD",
+ "PublicDescription": "Counts level 2 TLB accesses caused by memory read operations from both data and instruction fetch except for those caused by TLB maintenance operations."
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_WR",
+ "PublicDescription": "Counts level 2 TLB accesses caused by memory write operations from both data and instruction fetch except for those caused by TLB maintenance operations."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/trace.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/trace.json
index 3116135c59e2..98f6fabfebc7 100644
--- a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/trace.json
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/trace.json
@@ -1,29 +1,38 @@
[
{
- "ArchStdEvent": "TRB_WRAP"
+ "ArchStdEvent": "TRB_WRAP",
+ "PublicDescription": "This event is generated each time the current write pointer is wrapped to the base pointer."
},
{
- "ArchStdEvent": "TRCEXTOUT0"
+ "ArchStdEvent": "TRCEXTOUT0",
+ "PublicDescription": "This event is generated each time an event is signaled by ETE external event 0."
},
{
- "ArchStdEvent": "TRCEXTOUT1"
+ "ArchStdEvent": "TRCEXTOUT1",
+ "PublicDescription": "This event is generated each time an event is signaled by ETE external event 1."
},
{
- "ArchStdEvent": "TRCEXTOUT2"
+ "ArchStdEvent": "TRCEXTOUT2",
+ "PublicDescription": "This event is generated each time an event is signaled by ETE external event 2."
},
{
- "ArchStdEvent": "TRCEXTOUT3"
+ "ArchStdEvent": "TRCEXTOUT3",
+ "PublicDescription": "This event is generated each time an event is signaled by ETE external event 3."
},
{
- "ArchStdEvent": "CTI_TRIGOUT4"
+ "ArchStdEvent": "CTI_TRIGOUT4",
+ "PublicDescription": "This event is generated each time an event is signaled on CTI output trigger 4."
},
{
- "ArchStdEvent": "CTI_TRIGOUT5"
+ "ArchStdEvent": "CTI_TRIGOUT5",
+ "PublicDescription": "This event is generated each time an event is signaled on CTI output trigger 5."
},
{
- "ArchStdEvent": "CTI_TRIGOUT6"
+ "ArchStdEvent": "CTI_TRIGOUT6",
+ "PublicDescription": "This event is generated each time an event is signaled on CTI output trigger 6."
},
{
- "ArchStdEvent": "CTI_TRIGOUT7"
+ "ArchStdEvent": "CTI_TRIGOUT7",
+ "PublicDescription": "This event is generated each time an event is signaled on CTI output trigger 7."
}
]
diff --git a/tools/perf/pmu-events/arch/arm64/freescale/yitian710/sys/ali_drw.json b/tools/perf/pmu-events/arch/arm64/freescale/yitian710/sys/ali_drw.json
new file mode 100644
index 000000000000..e21c469a8ef0
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/freescale/yitian710/sys/ali_drw.json
@@ -0,0 +1,373 @@
+[
+ {
+ "BriefDescription": "A Write or Read Op at HIF interface. The unit is 64B.",
+ "ConfigCode": "0x0",
+ "EventName": "hif_rd_or_wr",
+ "Unit": "ali_drw",
+ "Compat": "ali_drw_pmu"
+ },
+ {
+ "BriefDescription": "A Write Op at HIF interface. The unit is 64B.",
+ "ConfigCode": "0x1",
+ "EventName": "hif_wr",
+ "Unit": "ali_drw",
+ "Compat": "ali_drw_pmu"
+ },
+ {
+ "BriefDescription": "A Read Op at HIF interface. The unit is 64B.",
+ "ConfigCode": "0x2",
+ "EventName": "hif_rd",
+ "Unit": "ali_drw",
+ "Compat": "ali_drw_pmu"
+ },
+ {
+ "BriefDescription": "A Read-Modify-Write Op at HIF interface. The unit is 64B.",
+ "ConfigCode": "0x3",
+ "EventName": "hif_rmw",
+ "Unit": "ali_drw",
+ "Compat": "ali_drw_pmu"
+ },
+ {
+ "BriefDescription": "A high priority Read at HIF interface. The unit is 64B.",
+ "ConfigCode": "0x4",
+ "EventName": "hif_hi_pri_rd",
+ "Unit": "ali_drw",
+ "Compat": "ali_drw_pmu"
+ },
+ {
+ "BriefDescription": "A write data cycle at DFI interface (to DRAM).",
+ "ConfigCode": "0x7",
+ "EventName": "dfi_wr_data_cycles",
+ "Unit": "ali_drw",
+ "Compat": "ali_drw_pmu"
+ },
+ {
+ "BriefDescription": "A read data cycle at DFI interface (to DRAM).",
+ "ConfigCode": "0x8",
+ "EventName": "dfi_rd_data_cycles",
+ "Unit": "ali_drw",
+ "Compat": "ali_drw_pmu"
+ },
+ {
+ "BriefDescription": "A high priority read becomes critical.",
+ "ConfigCode": "0x9",
+ "EventName": "hpr_xact_when_critical",
+ "Unit": "ali_drw",
+ "Compat": "ali_drw_pmu"
+ },
+ {
+ "BriefDescription": "A low priority read becomes critical.",
+ "ConfigCode": "0xA",
+ "EventName": "lpr_xact_when_critical",
+ "Unit": "ali_drw",
+ "Compat": "ali_drw_pmu"
+ },
+ {
+ "BriefDescription": "A write becomes critical.",
+ "ConfigCode": "0xB",
+ "EventName": "wr_xact_when_critical",
+ "Unit": "ali_drw",
+ "Compat": "ali_drw_pmu"
+ },
+ {
+ "BriefDescription": "An Activate(ACT) command to DRAM.",
+ "ConfigCode": "0xC",
+ "EventName": "op_is_activate",
+ "Unit": "ali_drw",
+ "Compat": "ali_drw_pmu"
+ },
+ {
+ "BriefDescription": "A Read or Write CAS command to DRAM.",
+ "ConfigCode": "0xD",
+ "EventName": "op_is_rd_or_wr",
+ "Unit": "ali_drw",
+ "Compat": "ali_drw_pmu"
+ },
+ {
+ "BriefDescription": "An Activate(ACT) command for read to DRAM.",
+ "ConfigCode": "0xE",
+ "EventName": "op_is_rd_activate",
+ "Unit": "ali_drw",
+ "Compat": "ali_drw_pmu"
+ },
+ {
+ "BriefDescription": "A Read CAS command to DRAM.",
+ "ConfigCode": "0xF",
+ "EventName": "op_is_rd",
+ "Unit": "ali_drw",
+ "Compat": "ali_drw_pmu"
+ },
+ {
+ "BriefDescription": "A Write CAS command to DRAM.",
+ "ConfigCode": "0x10",
+ "EventName": "op_is_wr",
+ "Unit": "ali_drw",
+ "Compat": "ali_drw_pmu"
+ },
+ {
+ "BriefDescription": "A Masked Write command to DRAM.",
+ "ConfigCode": "0x11",
+ "EventName": "op_is_mwr",
+ "Unit": "ali_drw",
+ "Compat": "ali_drw_pmu"
+ },
+ {
+ "BriefDescription": "A Precharge(PRE) command to DRAM.",
+ "ConfigCode": "0x12",
+ "EventName": "op_is_precharge",
+ "Unit": "ali_drw",
+ "Compat": "ali_drw_pmu"
+ },
+ {
+ "BriefDescription": "A Precharge(PRE) required by read or write.",
+ "ConfigCode": "0x13",
+ "EventName": "precharge_for_rdwr",
+ "Unit": "ali_drw",
+ "Compat": "ali_drw_pmu"
+ },
+ {
+ "BriefDescription": "A Precharge(PRE) required by other conditions.",
+ "ConfigCode": "0x14",
+ "EventName": "precharge_for_other",
+ "Unit": "ali_drw",
+ "Compat": "ali_drw_pmu"
+ },
+ {
+ "BriefDescription": "A read-write turnaround.",
+ "ConfigCode": "0x15",
+ "EventName": "rdwr_transitions",
+ "Unit": "ali_drw",
+ "Compat": "ali_drw_pmu"
+ },
+ {
+ "BriefDescription": "A write combine(merge) in write data buffer.",
+ "ConfigCode": "0x16",
+ "EventName": "write_combine",
+ "Unit": "ali_drw",
+ "Compat": "ali_drw_pmu"
+ },
+ {
+ "BriefDescription": "A Write-After-Read hazard.",
+ "ConfigCode": "0x17",
+ "EventName": "war_hazard",
+ "Unit": "ali_drw",
+ "Compat": "ali_drw_pmu"
+ },
+ {
+ "BriefDescription": "A Read-After-Write hazard.",
+ "ConfigCode": "0x18",
+ "EventName": "raw_hazard",
+ "Unit": "ali_drw",
+ "Compat": "ali_drw_pmu"
+ },
+ {
+ "BriefDescription": "A Write-After-Write hazard.",
+ "ConfigCode": "0x19",
+ "EventName": "waw_hazard",
+ "Unit": "ali_drw",
+ "Compat": "ali_drw_pmu"
+ },
+ {
+ "BriefDescription": "Rank0 enters self-refresh(SRE).",
+ "ConfigCode": "0x1A",
+ "EventName": "op_is_enter_selfref_rk0",
+ "Unit": "ali_drw",
+ "Compat": "ali_drw_pmu"
+ },
+ {
+ "BriefDescription": "Rank1 enters self-refresh(SRE).",
+ "ConfigCode": "0x1B",
+ "EventName": "op_is_enter_selfref_rk1",
+ "Unit": "ali_drw",
+ "Compat": "ali_drw_pmu"
+ },
+ {
+ "BriefDescription": "Rank2 enters self-refresh(SRE).",
+ "ConfigCode": "0x1C",
+ "EventName": "op_is_enter_selfref_rk2",
+ "Unit": "ali_drw",
+ "Compat": "ali_drw_pmu"
+ },
+ {
+ "BriefDescription": "Rank3 enters self-refresh(SRE).",
+ "ConfigCode": "0x1D",
+ "EventName": "op_is_enter_selfref_rk3",
+ "Unit": "ali_drw",
+ "Compat": "ali_drw_pmu"
+ },
+ {
+ "BriefDescription": "Rank0 enters power-down(PDE).",
+ "ConfigCode": "0x1E",
+ "EventName": "op_is_enter_powerdown_rk0",
+ "Unit": "ali_drw",
+ "Compat": "ali_drw_pmu"
+ },
+ {
+ "BriefDescription": "Rank1 enters power-down(PDE).",
+ "ConfigCode": "0x1F",
+ "EventName": "op_is_enter_powerdown_rk1",
+ "Unit": "ali_drw",
+ "Compat": "ali_drw_pmu"
+ },
+ {
+ "BriefDescription": "Rank2 enters power-down(PDE).",
+ "ConfigCode": "0x20",
+ "EventName": "op_is_enter_powerdown_rk2",
+ "Unit": "ali_drw",
+ "Compat": "ali_drw_pmu"
+ },
+ {
+ "BriefDescription": "Rank3 enters power-down(PDE).",
+ "ConfigCode": "0x21",
+ "EventName": "op_is_enter_powerdown_rk3",
+ "Unit": "ali_drw",
+ "Compat": "ali_drw_pmu"
+ },
+ {
+ "BriefDescription": "A cycle that Rank0 stays in self-refresh mode.",
+ "ConfigCode": "0x26",
+ "EventName": "selfref_mode_rk0",
+ "Unit": "ali_drw",
+ "Compat": "ali_drw_pmu"
+ },
+ {
+ "BriefDescription": "A cycle that Rank1 stays in self-refresh mode.",
+ "ConfigCode": "0x27",
+ "EventName": "selfref_mode_rk1",
+ "Unit": "ali_drw",
+ "Compat": "ali_drw_pmu"
+ },
+ {
+ "BriefDescription": "A cycle that Rank2 stays in self-refresh mode.",
+ "ConfigCode": "0x28",
+ "EventName": "selfref_mode_rk2",
+ "Unit": "ali_drw",
+ "Compat": "ali_drw_pmu"
+ },
+ {
+ "BriefDescription": "A cycle that Rank3 stays in self-refresh mode.",
+ "ConfigCode": "0x29",
+ "EventName": "selfref_mode_rk3",
+ "Unit": "ali_drw",
+ "Compat": "ali_drw_pmu"
+ },
+ {
+ "BriefDescription": "An auto-refresh(REF) command to DRAM.",
+ "ConfigCode": "0x2A",
+ "EventName": "op_is_refresh",
+ "Unit": "ali_drw",
+ "Compat": "ali_drw_pmu"
+ },
+ {
+ "BriefDescription": "A critical auto-refresh(REF) command to DRAM.",
+ "ConfigCode": "0x2B",
+ "EventName": "op_is_crit_ref",
+ "Unit": "ali_drw",
+ "Compat": "ali_drw_pmu"
+ },
+ {
+ "BriefDescription": "An MRR or MRW command to DRAM.",
+ "ConfigCode": "0x2D",
+ "EventName": "op_is_load_mode",
+ "Unit": "ali_drw",
+ "Compat": "ali_drw_pmu"
+ },
+ {
+ "BriefDescription": "A ZQCal command to DRAM.",
+ "ConfigCode": "0x2E",
+ "EventName": "op_is_zqcl",
+ "Unit": "ali_drw",
+ "Compat": "ali_drw_pmu"
+ },
+ {
+ "BriefDescription": "At least one entry in read queue reaches the visible window limit.",
+ "ConfigCode": "0x30",
+ "EventName": "visible_window_limit_reached_rd",
+ "Unit": "ali_drw",
+ "Compat": "ali_drw_pmu"
+ },
+ {
+ "BriefDescription": "At least one entry in write queue reaches the visible window limit.",
+ "ConfigCode": "0x31",
+ "EventName": "visible_window_limit_reached_wr",
+ "Unit": "ali_drw",
+ "Compat": "ali_drw_pmu"
+ },
+ {
+ "BriefDescription": "A DQS Oscillator MPC command to DRAM.",
+ "ConfigCode": "0x34",
+ "EventName": "op_is_dqsosc_mpc",
+ "Unit": "ali_drw",
+ "Compat": "ali_drw_pmu"
+ },
+ {
+ "BriefDescription": "A DQS Oscillator MRR command to DRAM.",
+ "ConfigCode": "0x35",
+ "EventName": "op_is_dqsosc_mrr",
+ "Unit": "ali_drw",
+ "Compat": "ali_drw_pmu"
+ },
+ {
+ "BriefDescription": "A Temperature Compensated Refresh(TCR) MRR command to DRAM.",
+ "ConfigCode": "0x36",
+ "EventName": "op_is_tcr_mrr",
+ "Unit": "ali_drw",
+ "Compat": "ali_drw_pmu"
+ },
+ {
+ "BriefDescription": "A ZQCal Start command to DRAM.",
+ "ConfigCode": "0x37",
+ "EventName": "op_is_zqstart",
+ "Unit": "ali_drw",
+ "Compat": "ali_drw_pmu"
+ },
+ {
+ "BriefDescription": "A ZQCal Latch command to DRAM.",
+ "ConfigCode": "0x38",
+ "EventName": "op_is_zqlatch",
+ "Unit": "ali_drw",
+ "Compat": "ali_drw_pmu"
+ },
+ {
+ "BriefDescription": "A packet at CHI TXREQ interface (request).",
+ "ConfigCode": "0x39",
+ "EventName": "chi_txreq",
+ "Unit": "ali_drw",
+ "Compat": "ali_drw_pmu"
+ },
+ {
+ "BriefDescription": "A packet at CHI TXDAT interface (read data).",
+ "ConfigCode": "0x3A",
+ "EventName": "chi_txdat",
+ "Unit": "ali_drw",
+ "Compat": "ali_drw_pmu"
+ },
+ {
+ "BriefDescription": "A packet at CHI RXDAT interface (write data).",
+ "ConfigCode": "0x3B",
+ "EventName": "chi_rxdat",
+ "Unit": "ali_drw",
+ "Compat": "ali_drw_pmu"
+ },
+ {
+ "BriefDescription": "A packet at CHI RXRSP interface.",
+ "ConfigCode": "0x3C",
+ "EventName": "chi_rxrsp",
+ "Unit": "ali_drw",
+ "Compat": "ali_drw_pmu"
+ },
+ {
+ "BriefDescription": "A violation detected in TZC.",
+ "ConfigCode": "0x3D",
+ "EventName": "tsz_vio",
+ "Unit": "ali_drw",
+ "Compat": "ali_drw_pmu"
+ },
+ {
+ "BriefDescription": "The ddr cycles.",
+ "ConfigCode": "0x80",
+ "EventName": "ddr_cycles",
+ "Unit": "ali_drw",
+ "Compat": "ali_drw_pmu"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/freescale/yitian710/sys/metrics.json b/tools/perf/pmu-events/arch/arm64/freescale/yitian710/sys/metrics.json
new file mode 100644
index 000000000000..bc865b374b6a
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/freescale/yitian710/sys/metrics.json
@@ -0,0 +1,20 @@
+[
+ {
+ "MetricName": "ddr_read_bandwidth.all",
+ "BriefDescription": "The ddr read bandwidth(MB/s).",
+ "MetricGroup": "ali_drw",
+ "MetricExpr": "hif_rd * 64 / 1e6 / duration_time",
+ "ScaleUnit": "1MB/s",
+ "Unit": "ali_drw",
+ "Compat": "ali_drw_pmu"
+ },
+ {
+ "MetricName": "ddr_write_bandwidth.all",
+ "BriefDescription": "The ddr write bandwidth(MB/s).",
+ "MetricGroup": "ali_drw",
+ "MetricExpr": "(hif_wr + hif_rmw) * 64 / 1e6 / duration_time",
+ "ScaleUnit": "1MB/s",
+ "Unit": "ali_drw",
+ "Compat": "ali_drw_pmu"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/sbsa.json b/tools/perf/pmu-events/arch/arm64/sbsa.json
index f90b338261ac..4eed79a28f6e 100644
--- a/tools/perf/pmu-events/arch/arm64/sbsa.json
+++ b/tools/perf/pmu-events/arch/arm64/sbsa.json
@@ -1,34 +1,34 @@
[
{
- "MetricExpr": "stall_slot_frontend / (#slots * cpu_cycles)",
- "BriefDescription": "Frontend bound L1 topdown metric",
+ "MetricExpr": "100 * (stall_slot_frontend / (#slots * cpu_cycles))",
+ "BriefDescription": "This metric is the percentage of total slots that were stalled due to resource constraints in the frontend of the processor.",
"DefaultMetricgroupName": "TopdownL1",
"MetricGroup": "Default;TopdownL1",
"MetricName": "frontend_bound",
- "ScaleUnit": "100%"
+ "ScaleUnit": "1percent of slots"
},
{
- "MetricExpr": "(1 - op_retired / op_spec) * (1 - stall_slot / (#slots * cpu_cycles))",
- "BriefDescription": "Bad speculation L1 topdown metric",
+ "MetricExpr": "100 * ((1 - op_retired / op_spec) * (1 - stall_slot / (#slots * cpu_cycles)))",
+ "BriefDescription": "This metric is the percentage of total slots that executed operations and didn't retire due to a pipeline flush.\nThis indicates cycles that were utilized but inefficiently.",
"DefaultMetricgroupName": "TopdownL1",
"MetricGroup": "Default;TopdownL1",
"MetricName": "bad_speculation",
- "ScaleUnit": "100%"
+ "ScaleUnit": "1percent of slots"
},
{
- "MetricExpr": "(op_retired / op_spec) * (1 - stall_slot / (#slots * cpu_cycles))",
- "BriefDescription": "Retiring L1 topdown metric",
+ "MetricExpr": "100 * ((op_retired / op_spec) * (1 - stall_slot / (#slots * cpu_cycles)))",
+ "BriefDescription": "This metric is the percentage of total slots that retired operations, which indicates cycles that were utilized efficiently.",
"DefaultMetricgroupName": "TopdownL1",
"MetricGroup": "Default;TopdownL1",
"MetricName": "retiring",
- "ScaleUnit": "100%"
+ "ScaleUnit": "1percent of slots"
},
{
- "MetricExpr": "stall_slot_backend / (#slots * cpu_cycles)",
- "BriefDescription": "Backend Bound L1 topdown metric",
+ "MetricExpr": "100 * (stall_slot_backend / (#slots * cpu_cycles))",
+ "BriefDescription": "This metric is the percentage of total slots that were stalled due to resource constraints in the backend of the processor.",
"DefaultMetricgroupName": "TopdownL1",
"MetricGroup": "Default;TopdownL1",
"MetricName": "backend_bound",
- "ScaleUnit": "100%"
+ "ScaleUnit": "1percent of slots"
}
]
diff --git a/tools/perf/pmu-events/arch/powerpc/power10/cache.json b/tools/perf/pmu-events/arch/powerpc/power10/cache.json
index 605be14f441c..839ae26945fb 100644
--- a/tools/perf/pmu-events/arch/powerpc/power10/cache.json
+++ b/tools/perf/pmu-events/arch/powerpc/power10/cache.json
@@ -1,53 +1,8 @@
[
{
- "EventCode": "0x1003C",
- "EventName": "PM_EXEC_STALL_DMISS_L2L3",
- "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from either the local L2 or local L3."
- },
- {
- "EventCode": "0x1E054",
- "EventName": "PM_EXEC_STALL_DMISS_L21_L31",
- "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from another core's L2 or L3 on the same chip."
- },
- {
- "EventCode": "0x34054",
- "EventName": "PM_EXEC_STALL_DMISS_L2L3_NOCONFLICT",
- "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from the local L2 or local L3, without a dispatch conflict."
- },
- {
- "EventCode": "0x34056",
- "EventName": "PM_EXEC_STALL_LOAD_FINISH",
- "BriefDescription": "Cycles in which the oldest instruction in the pipeline was finishing a load after its data was reloaded from a data source beyond the local L1; cycles in which the LSU was processing an L1-hit; cycles in which the NTF instruction merged with another load in the LMQ; cycles in which the NTF instruction is waiting for a data reload for a load miss, but the data comes back with a non-NTF instruction."
- },
- {
- "EventCode": "0x3006C",
- "EventName": "PM_RUN_CYC_SMT2_MODE",
- "BriefDescription": "Cycles when this thread's run latch is set and the core is in SMT2 mode."
- },
- {
"EventCode": "0x300F4",
"EventName": "PM_RUN_INST_CMPL_CONC",
- "BriefDescription": "PowerPC instructions completed by this thread when all threads in the core had the run-latch set."
- },
- {
- "EventCode": "0x4C016",
- "EventName": "PM_EXEC_STALL_DMISS_L2L3_CONFLICT",
- "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from the local L2 or local L3, with a dispatch conflict."
- },
- {
- "EventCode": "0x4D014",
- "EventName": "PM_EXEC_STALL_LOAD",
- "BriefDescription": "Cycles in which the oldest instruction in the pipeline was a load instruction executing in the Load Store Unit."
- },
- {
- "EventCode": "0x4D016",
- "EventName": "PM_EXEC_STALL_PTESYNC",
- "BriefDescription": "Cycles in which the oldest instruction in the pipeline was a PTESYNC instruction executing in the Load Store Unit."
- },
- {
- "EventCode": "0x401EA",
- "EventName": "PM_THRESH_EXC_128",
- "BriefDescription": "Threshold counter exceeded a value of 128."
+ "BriefDescription": "PowerPC instruction completed by this thread when all threads in the core had the run-latch set."
},
{
"EventCode": "0x400F6",
diff --git a/tools/perf/pmu-events/arch/powerpc/power10/floating_point.json b/tools/perf/pmu-events/arch/powerpc/power10/floating_point.json
index 54acb55e2c8c..e816cd10c129 100644
--- a/tools/perf/pmu-events/arch/powerpc/power10/floating_point.json
+++ b/tools/perf/pmu-events/arch/powerpc/power10/floating_point.json
@@ -1,7 +1,67 @@
[
{
- "EventCode": "0x4016E",
- "EventName": "PM_THRESH_NOT_MET",
- "BriefDescription": "Threshold counter did not meet threshold."
+ "EventCode": "0x100F4",
+ "EventName": "PM_FLOP_CMPL",
+ "BriefDescription": "Floating Point Operations Completed. Includes any type. It counts once for each 1, 2, 4 or 8 flop instruction. Use PM_1|2|4|8_FLOP_CMPL events to count flops."
+ },
+ {
+ "EventCode": "0x45050",
+ "EventName": "PM_1FLOP_CMPL",
+ "BriefDescription": "One floating point instruction completed (fadd, fmul, fsub, fcmp, fsel, fabs, fnabs, fres, fsqrte, fneg)."
+ },
+ {
+ "EventCode": "0x45052",
+ "EventName": "PM_4FLOP_CMPL",
+ "BriefDescription": "Four floating point instruction completed (fadd, fmul, fsub, fcmp, fsel, fabs, fnabs, fres, fsqrte, fneg)."
+ },
+ {
+ "EventCode": "0x45054",
+ "EventName": "PM_FMA_CMPL",
+ "BriefDescription": "Two floating point instruction completed (FMA class of instructions: fmadd, fnmadd, fmsub, fnmsub). Scalar instructions only."
+ },
+ {
+ "EventCode": "0x45056",
+ "EventName": "PM_SCALAR_FLOP_CMPL",
+ "BriefDescription": "Scalar floating point instruction completed."
+ },
+ {
+ "EventCode": "0x4505A",
+ "EventName": "PM_SP_FLOP_CMPL",
+ "BriefDescription": "Single Precision floating point instruction completed."
+ },
+ {
+ "EventCode": "0x4505C",
+ "EventName": "PM_MATH_FLOP_CMPL",
+ "BriefDescription": "Math floating point instruction completed."
+ },
+ {
+ "EventCode": "0x4D052",
+ "EventName": "PM_2FLOP_CMPL",
+ "BriefDescription": "Double Precision vector version of fmul, fsub, fcmp, fsel, fabs, fnabs, fres, fsqrte, fneg completed."
+ },
+ {
+ "EventCode": "0x4D054",
+ "EventName": "PM_8FLOP_CMPL",
+ "BriefDescription": "Four Double Precision vector instruction completed."
+ },
+ {
+ "EventCode": "0x4D056",
+ "EventName": "PM_NON_FMA_FLOP_CMPL",
+ "BriefDescription": "Non FMA instruction completed."
+ },
+ {
+ "EventCode": "0x4D058",
+ "EventName": "PM_VECTOR_FLOP_CMPL",
+ "BriefDescription": "Vector floating point instruction completed."
+ },
+ {
+ "EventCode": "0x4D05A",
+ "EventName": "PM_NON_MATH_FLOP_CMPL",
+ "BriefDescription": "Non Math instruction completed."
+ },
+ {
+ "EventCode": "0x4D05C",
+ "EventName": "PM_DPP_FLOP_CMPL",
+ "BriefDescription": "Double-Precision or Quad-Precision instruction completed."
}
]
diff --git a/tools/perf/pmu-events/arch/powerpc/power10/frontend.json b/tools/perf/pmu-events/arch/powerpc/power10/frontend.json
index 558f9530f54e..5977f5e64212 100644
--- a/tools/perf/pmu-events/arch/powerpc/power10/frontend.json
+++ b/tools/perf/pmu-events/arch/powerpc/power10/frontend.json
@@ -1,43 +1,13 @@
[
{
- "EventCode": "0x10004",
- "EventName": "PM_EXEC_STALL_TRANSLATION",
- "BriefDescription": "Cycles in which the oldest instruction in the pipeline suffered a TLB miss or ERAT miss and waited for it to resolve."
+ "EventCode": "0x1D054",
+ "EventName": "PM_DTLB_HIT_2M",
+ "BriefDescription": "Data TLB hit (DERAT reload) page size 2M. Implies radix translation. When MMCR1[16]=0 this event counts only DERAT reloads for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
},
{
- "EventCode": "0x10006",
- "EventName": "PM_DISP_STALL_HELD_OTHER_CYC",
- "BriefDescription": "Cycles in which the NTC instruction is held at dispatch for any other reason."
- },
- {
- "EventCode": "0x10010",
- "EventName": "PM_PMC4_OVERFLOW",
- "BriefDescription": "The event selected for PMC4 caused the event counter to overflow."
- },
- {
- "EventCode": "0x10020",
- "EventName": "PM_PMC4_REWIND",
- "BriefDescription": "The speculative event selected for PMC4 rewinds and the counter for PMC4 is not charged."
- },
- {
- "EventCode": "0x10038",
- "EventName": "PM_DISP_STALL_TRANSLATION",
- "BriefDescription": "Cycles when dispatch was stalled for this thread because the MMU was handling a translation miss."
- },
- {
- "EventCode": "0x1003A",
- "EventName": "PM_DISP_STALL_BR_MPRED_IC_L2",
- "BriefDescription": "Cycles when dispatch was stalled while the instruction was fetched from the local L2 after suffering a branch mispredict."
- },
- {
- "EventCode": "0x1D05E",
- "EventName": "PM_DISP_STALL_HELD_HALT_CYC",
- "BriefDescription": "Cycles in which the NTC instruction is held at dispatch because of power management."
- },
- {
- "EventCode": "0x1E050",
- "EventName": "PM_DISP_STALL_HELD_STF_MAPPER_CYC",
- "BriefDescription": "Cycles in which the NTC instruction is held at dispatch because the STF mapper/SRB was full. Includes GPR (count, link, tar), VSR, VMR, FPR."
+ "EventCode": "0x1D058",
+ "EventName": "PM_ITLB_HIT_64K",
+ "BriefDescription": "Instruction TLB hit (IERAT reload) page size 64K. When MMCR1[17]=0 this event counts only for demand misses. When MMCR1[17]=1 this event includes demand misses and prefetches."
},
{
"EventCode": "0x1F054",
@@ -45,21 +15,6 @@
"BriefDescription": "The PTE required by the instruction was resident in the TLB (data TLB access). When MMCR1[16]=0 this event counts only demand hits. When MMCR1[16]=1 this event includes demand and prefetch. Applies to both HPT and RPT."
},
{
- "EventCode": "0x10064",
- "EventName": "PM_DISP_STALL_IC_L2",
- "BriefDescription": "Cycles when dispatch was stalled while the instruction was fetched from the local L2."
- },
- {
- "EventCode": "0x101E8",
- "EventName": "PM_THRESH_EXC_256",
- "BriefDescription": "Threshold counter exceeded a count of 256."
- },
- {
- "EventCode": "0x101EC",
- "EventName": "PM_THRESH_MET",
- "BriefDescription": "Threshold exceeded."
- },
- {
"EventCode": "0x100F2",
"EventName": "PM_1PLUS_PPC_CMPL",
"BriefDescription": "Cycles in which at least one instruction is completed by this thread."
@@ -67,57 +22,7 @@
{
"EventCode": "0x100F6",
"EventName": "PM_IERAT_MISS",
- "BriefDescription": "IERAT Reloaded to satisfy an IERAT miss. All page sizes are counted by this event."
- },
- {
- "EventCode": "0x100F8",
- "EventName": "PM_DISP_STALL_CYC",
- "BriefDescription": "Cycles the ICT has no itags assigned to this thread (no instructions were dispatched during these cycles)."
- },
- {
- "EventCode": "0x20006",
- "EventName": "PM_DISP_STALL_HELD_ISSQ_FULL_CYC",
- "BriefDescription": "Cycles in which the NTC instruction is held at dispatch due to Issue queue full. Includes issue queue and branch queue."
- },
- {
- "EventCode": "0x20114",
- "EventName": "PM_MRK_L2_RC_DISP",
- "BriefDescription": "Marked instruction RC dispatched in L2."
- },
- {
- "EventCode": "0x2C010",
- "EventName": "PM_EXEC_STALL_LSU",
- "BriefDescription": "Cycles in which the oldest instruction in the pipeline was executing in the Load Store Unit. This does not include simple fixed point instructions."
- },
- {
- "EventCode": "0x2C016",
- "EventName": "PM_DISP_STALL_IERAT_ONLY_MISS",
- "BriefDescription": "Cycles when dispatch was stalled while waiting to resolve an instruction ERAT miss."
- },
- {
- "EventCode": "0x2C01E",
- "EventName": "PM_DISP_STALL_BR_MPRED_IC_L3",
- "BriefDescription": "Cycles when dispatch was stalled while the instruction was fetched from the local L3 after suffering a branch mispredict."
- },
- {
- "EventCode": "0x2D01A",
- "EventName": "PM_DISP_STALL_IC_MISS",
- "BriefDescription": "Cycles when dispatch was stalled for this thread due to an Icache Miss."
- },
- {
- "EventCode": "0x2E018",
- "EventName": "PM_DISP_STALL_FETCH",
- "BriefDescription": "Cycles when dispatch was stalled for this thread because Fetch was being held."
- },
- {
- "EventCode": "0x2E01A",
- "EventName": "PM_DISP_STALL_HELD_XVFC_MAPPER_CYC",
- "BriefDescription": "Cycles in which the NTC instruction is held at dispatch because the XVFC mapper/SRB was full."
- },
- {
- "EventCode": "0x2C142",
- "EventName": "PM_MRK_XFER_FROM_SRC_PMC2",
- "BriefDescription": "For a marked data transfer instruction, the processor's L1 data cache was reloaded from the source specified in MMCR3[15:27]. If MMCR1[16|17] is 0 (default), this count includes only lines that were reloaded to satisfy a demand miss. If MMCR1[16|17] is 1, this count includes both demand misses and prefetch reloads."
+ "BriefDescription": "IERAT Reloaded to satisfy an IERAT miss. All page sizes are counted by this event. This event only counts instruction demand access."
},
{
"EventCode": "0x24050",
@@ -135,11 +40,6 @@
"BriefDescription": "Branch Taken instruction completed."
},
{
- "EventCode": "0x30004",
- "EventName": "PM_DISP_STALL_FLUSH",
- "BriefDescription": "Cycles when dispatch was stalled because of a flush that happened to an instruction(s) that was not yet NTC. PM_EXEC_STALL_NTC_FLUSH only includes instructions that were flushed after becoming NTC."
- },
- {
"EventCode": "0x3000A",
"EventName": "PM_DISP_STALL_ITLB_MISS",
"BriefDescription": "Cycles when dispatch was stalled while waiting to resolve an instruction TLB miss."
@@ -150,59 +50,24 @@
"BriefDescription": "The instruction that was next to complete (oldest in the pipeline) did not complete because it suffered a flush."
},
{
- "EventCode": "0x30014",
- "EventName": "PM_EXEC_STALL_STORE",
- "BriefDescription": "Cycles in which the oldest instruction in the pipeline was a store instruction executing in the Load Store Unit."
- },
- {
- "EventCode": "0x30018",
- "EventName": "PM_DISP_STALL_HELD_SCOREBOARD_CYC",
- "BriefDescription": "Cycles in which the NTC instruction is held at dispatch while waiting on the Scoreboard. This event combines VSCR and FPSCR together."
- },
- {
- "EventCode": "0x30026",
- "EventName": "PM_EXEC_STALL_STORE_MISS",
- "BriefDescription": "Cycles in which the oldest instruction in the pipeline was a store whose cache line was not resident in the L1 and was waiting for allocation of the missing line into the L1."
- },
- {
- "EventCode": "0x3012A",
- "EventName": "PM_MRK_L2_RC_DONE",
- "BriefDescription": "L2 RC machine completed the transaction for the marked instruction."
- },
- {
"EventCode": "0x3F046",
"EventName": "PM_ITLB_HIT_1G",
"BriefDescription": "Instruction TLB hit (IERAT reload) page size 1G, which implies Radix Page Table translation is in use. When MMCR1[17]=0 this event counts only for demand misses. When MMCR1[17]=1 this event includes demand misses and prefetches."
},
{
- "EventCode": "0x34058",
- "EventName": "PM_DISP_STALL_BR_MPRED_ICMISS",
- "BriefDescription": "Cycles when dispatch was stalled after a mispredicted branch resulted in an instruction cache miss."
- },
- {
- "EventCode": "0x3D05C",
- "EventName": "PM_DISP_STALL_HELD_RENAME_CYC",
- "BriefDescription": "Cycles in which the NTC instruction is held at dispatch because the mapper/SRB was full. Includes GPR (count, link, tar), VSR, VMR, FPR and XVFC."
- },
- {
- "EventCode": "0x3E052",
- "EventName": "PM_DISP_STALL_IC_L3",
- "BriefDescription": "Cycles when dispatch was stalled while the instruction was fetched from the local L3."
+ "EventCode": "0x3C05A",
+ "EventName": "PM_DTLB_HIT_64K",
+ "BriefDescription": "Data TLB hit (DERAT reload) page size 64K. When MMCR1[16]=0 this event counts only for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
},
{
"EventCode": "0x3E054",
"EventName": "PM_LD_MISS_L1",
- "BriefDescription": "Load Missed L1, counted at execution time (can be greater than loads finished). LMQ merges are not included in this count. i.e. if a load instruction misses on an address that is already allocated on the LMQ, this event will not increment for that load). Note that this count is per slice, so if a load spans multiple slices this event will increment multiple times for a single load."
- },
- {
- "EventCode": "0x301EA",
- "EventName": "PM_THRESH_EXC_1024",
- "BriefDescription": "Threshold counter exceeded a value of 1024."
+ "BriefDescription": "Load missed L1, counted at finish time. LMQ merges are not included in this count. i.e. if a load instruction misses on an address that is already allocated on the LMQ, this event will not increment for that load). Note that this count is per slice, so if a load spans multiple slices this event will increment multiple times for a single load."
},
{
"EventCode": "0x300FA",
"EventName": "PM_INST_FROM_L3MISS",
- "BriefDescription": "The processor's instruction cache was reloaded from a source other than the local core's L1, L2, or L3 due to a demand miss."
+ "BriefDescription": "The processor's instruction cache was reloaded from beyond the local core's L3 due to a demand miss."
},
{
"EventCode": "0x40006",
@@ -210,38 +75,18 @@
"BriefDescription": "Cycles in which an instruction or group of instructions were cancelled after being issued. This event increments once per occurrence, regardless of how many instructions are included in the issue group."
},
{
- "EventCode": "0x40116",
- "EventName": "PM_MRK_LARX_FIN",
- "BriefDescription": "Marked load and reserve instruction (LARX) finished. LARX and STCX are instructions used to acquire a lock."
- },
- {
- "EventCode": "0x4C010",
- "EventName": "PM_DISP_STALL_BR_MPRED_IC_L3MISS",
- "BriefDescription": "Cycles when dispatch was stalled while the instruction was fetched from sources beyond the local L3 after suffering a mispredicted branch."
- },
- {
- "EventCode": "0x4D01E",
- "EventName": "PM_DISP_STALL_BR_MPRED",
- "BriefDescription": "Cycles when dispatch was stalled for this thread due to a mispredicted branch."
- },
- {
- "EventCode": "0x4E010",
- "EventName": "PM_DISP_STALL_IC_L3MISS",
- "BriefDescription": "Cycles when dispatch was stalled while the instruction was fetched from any source beyond the local L3."
- },
- {
- "EventCode": "0x4E01A",
- "EventName": "PM_DISP_STALL_HELD_CYC",
- "BriefDescription": "Cycles in which the NTC instruction is held at dispatch for any reason."
+ "EventCode": "0x44056",
+ "EventName": "PM_VECTOR_ST_CMPL",
+ "BriefDescription": "Vector store instruction completed."
},
{
- "EventCode": "0x4003C",
- "EventName": "PM_DISP_STALL_HELD_SYNC_CYC",
- "BriefDescription": "Cycles in which the NTC instruction is held at dispatch because of a synchronizing instruction that requires the ICT to be empty before dispatch."
+ "EventCode": "0x4E054",
+ "EventName": "PM_DTLB_HIT_1G",
+ "BriefDescription": "Data TLB hit (DERAT reload) page size 1G. Implies radix translation. When MMCR1[16]=0 this event counts only for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
},
{
- "EventCode": "0x44056",
- "EventName": "PM_VECTOR_ST_CMPL",
- "BriefDescription": "Vector store instructions completed."
+ "EventCode": "0x400FC",
+ "EventName": "PM_ITLB_MISS",
+ "BriefDescription": "Instruction TLB reload (after a miss), all page sizes. Includes only demand misses."
}
]
diff --git a/tools/perf/pmu-events/arch/powerpc/power10/marked.json b/tools/perf/pmu-events/arch/powerpc/power10/marked.json
index 58b5dfe3a273..78f71a9eadfd 100644
--- a/tools/perf/pmu-events/arch/powerpc/power10/marked.json
+++ b/tools/perf/pmu-events/arch/powerpc/power10/marked.json
@@ -1,15 +1,35 @@
[
{
- "EventCode": "0x1002C",
- "EventName": "PM_LD_PREFETCH_CACHE_LINE_MISS",
- "BriefDescription": "The L1 cache was reloaded with a line that fulfills a prefetch request."
- },
- {
"EventCode": "0x10132",
"EventName": "PM_MRK_INST_ISSUED",
"BriefDescription": "Marked instruction issued. Note that stores always get issued twice, the address gets issued to the LSU and the data gets issued to the VSU. Also, issues can sometimes get killed/cancelled and cause multiple sequential issues for the same instruction."
},
{
+ "EventCode": "0x10134",
+ "EventName": "PM_MRK_ST_DONE_L2",
+ "BriefDescription": "Marked store completed in L2."
+ },
+ {
+ "EventCode": "0x1C142",
+ "EventName": "PM_MRK_XFER_FROM_SRC_PMC1",
+ "BriefDescription": "For a marked data transfer instruction, the processor's L1 data cache was reloaded from the source specified in MMCR3[0:12]. If MMCR1[16|17] is 0 (default), this count includes only lines that were reloaded to satisfy a demand miss. If MMCR1[16|17] is 1, this count includes both demand misses and prefetch reloads."
+ },
+ {
+ "EventCode": "0x1C144",
+ "EventName": "PM_MRK_XFER_FROM_SRC_CYC_PMC1",
+ "BriefDescription": "Cycles taken for a marked demand miss to reload a line from the source specified in MMCR3[0:12]."
+ },
+ {
+ "EventCode": "0x1D15C",
+ "EventName": "PM_MRK_DTLB_MISS_1G",
+ "BriefDescription": "Marked Data TLB reload (after a miss) page size 1G. Implies radix translation was used. When MMCR1[16]=0 this event counts only for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
+ },
+ {
+ "EventCode": "0x1F150",
+ "EventName": "PM_MRK_ST_L2_CYC",
+ "BriefDescription": "Cycles from L2 RC dispatch to L2 RC completion."
+ },
+ {
"EventCode": "0x101E0",
"EventName": "PM_MRK_INST_DISP",
"BriefDescription": "The thread has dispatched a randomly sampled marked instruction."
@@ -20,14 +40,39 @@
"BriefDescription": "Marked Branch Taken instruction completed."
},
{
- "EventCode": "0x20112",
- "EventName": "PM_MRK_NTF_FIN",
- "BriefDescription": "The marked instruction became the oldest in the pipeline before it finished. It excludes instructions that finish at dispatch."
+ "EventCode": "0x101E4",
+ "EventName": "PM_MRK_L1_ICACHE_MISS",
+ "BriefDescription": "Marked instruction suffered an instruction cache miss."
+ },
+ {
+ "EventCode": "0x101EA",
+ "EventName": "PM_MRK_L1_RELOAD_VALID",
+ "BriefDescription": "Marked demand reload."
},
{
- "EventCode": "0x2C01C",
- "EventName": "PM_EXEC_STALL_DMISS_OFF_CHIP",
- "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from a remote chip."
+ "EventCode": "0x20114",
+ "EventName": "PM_MRK_L2_RC_DISP",
+ "BriefDescription": "Marked instruction RC dispatched in L2."
+ },
+ {
+ "EventCode": "0x2011C",
+ "EventName": "PM_MRK_NTF_CYC",
+ "BriefDescription": "Cycles in which the marked instruction is the oldest in the pipeline (next-to-finish or next-to-complete)."
+ },
+ {
+ "EventCode": "0x20130",
+ "EventName": "PM_MRK_INST_DECODED",
+ "BriefDescription": "An instruction was marked at decode time. Random Instruction Sampling (RIS) only."
+ },
+ {
+ "EventCode": "0x20132",
+ "EventName": "PM_MRK_DFU_ISSUE",
+ "BriefDescription": "The marked instruction was a decimal floating point operation issued to the VSU. Measured at issue time."
+ },
+ {
+ "EventCode": "0x20134",
+ "EventName": "PM_MRK_FXU_ISSUE",
+ "BriefDescription": "The marked instruction was a fixed point operation issued to the VSU. Measured at issue time."
},
{
"EventCode": "0x20138",
@@ -40,6 +85,16 @@
"BriefDescription": "Marked Branch instruction finished."
},
{
+ "EventCode": "0x2013C",
+ "EventName": "PM_MRK_FX_LSU_FIN",
+ "BriefDescription": "The marked instruction was simple fixed point that was issued to the store unit. Measured at finish time."
+ },
+ {
+ "EventCode": "0x2C142",
+ "EventName": "PM_MRK_XFER_FROM_SRC_PMC2",
+ "BriefDescription": "For a marked data transfer instruction, the processor's L1 data cache was reloaded from the source specified in MMCR3[15:27]. If MMCR1[16|17] is 0 (default), this count includes only lines that were reloaded to satisfy a demand miss. If MMCR1[16|17] is 1, this count includes both demand misses and prefetch reloads."
+ },
+ {
"EventCode": "0x2C144",
"EventName": "PM_MRK_XFER_FROM_SRC_CYC_PMC2",
"BriefDescription": "Cycles taken for a marked demand miss to reload a line from the source specified in MMCR3[15:27]."
@@ -60,19 +115,54 @@
"BriefDescription": "A marked branch completed. All branches are included."
},
{
- "EventCode": "0x200FD",
- "EventName": "PM_L1_ICACHE_MISS",
- "BriefDescription": "Demand iCache Miss."
+ "EventCode": "0x2D154",
+ "EventName": "PM_MRK_DERAT_MISS_64K",
+ "BriefDescription": "Data ERAT Miss (Data TLB Access) page size 64K for a marked instruction. When MMCR1[16]=0 this event counts only DERAT reloads for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
+ },
+ {
+ "EventCode": "0x201E0",
+ "EventName": "PM_MRK_DATA_FROM_MEMORY",
+ "BriefDescription": "The processor's data cache was reloaded from local, remote, or distant memory due to a demand miss for a marked load."
},
{
- "EventCode": "0x30130",
- "EventName": "PM_MRK_INST_FIN",
- "BriefDescription": "marked instruction finished. Excludes instructions that finish at dispatch. Note that stores always finish twice since the address gets issued to the LSU and the data gets issued to the VSU."
+ "EventCode": "0x201E2",
+ "EventName": "PM_MRK_LD_MISS_L1",
+ "BriefDescription": "Marked demand data load miss counted at finish time."
+ },
+ {
+ "EventCode": "0x201E4",
+ "EventName": "PM_MRK_DATA_FROM_L3MISS",
+ "BriefDescription": "The processor's data cache was reloaded from a source other than the local core's L1, L2, or L3 due to a demand miss for a marked load."
+ },
+ {
+ "EventCode": "0x3012A",
+ "EventName": "PM_MRK_L2_RC_DONE",
+ "BriefDescription": "L2 RC machine completed the transaction for the marked instruction."
+ },
+ {
+ "EventCode": "0x3012E",
+ "EventName": "PM_MRK_DTLB_MISS_2M",
+ "BriefDescription": "Marked Data TLB reload (after a miss) page size 2M, which implies Radix Page Table translation was used. When MMCR1[16]=0 this event counts only for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
+ },
+ {
+ "EventCode": "0x30132",
+ "EventName": "PM_MRK_VSU_FIN",
+ "BriefDescription": "VSU marked instruction finished. Excludes simple FX instructions issued to the Store Unit."
},
{
"EventCode": "0x34146",
"EventName": "PM_MRK_LD_CMPL",
- "BriefDescription": "Marked loads completed."
+ "BriefDescription": "Marked load instruction completed."
+ },
+ {
+ "EventCode": "0x3C142",
+ "EventName": "PM_MRK_XFER_FROM_SRC_PMC3",
+ "BriefDescription": "For a marked data transfer instruction, the processor's L1 data cache was reloaded from the source specified in MMCR3[30:42]. If MMCR1[16|17] is 0 (default), this count includes only lines that were reloaded to satisfy a demand miss. If MMCR1[16|17] is 1, this count includes both demand misses and prefetch reloads."
+ },
+ {
+ "EventCode": "0x3C144",
+ "EventName": "PM_MRK_XFER_FROM_SRC_CYC_PMC3",
+ "BriefDescription": "Cycles taken for a marked demand miss to reload a line from the source specified in MMCR3[30:42]."
},
{
"EventCode": "0x3E158",
@@ -82,12 +172,22 @@
{
"EventCode": "0x3E15A",
"EventName": "PM_MRK_ST_FIN",
- "BriefDescription": "The marked instruction was a store of any kind."
+ "BriefDescription": "Marked store instruction finished."
+ },
+ {
+ "EventCode": "0x3F150",
+ "EventName": "PM_MRK_ST_DRAIN_CYC",
+ "BriefDescription": "Cycles in which the marked store drained from the core to the L2."
},
{
- "EventCode": "0x30068",
- "EventName": "PM_L1_ICACHE_RELOADED_PREF",
- "BriefDescription": "Counts all Icache prefetch reloads ( includes demand turned into prefetch)."
+ "EventCode": "0x30162",
+ "EventName": "PM_MRK_ISSUE_DEPENDENT_LOAD",
+ "BriefDescription": "The marked instruction was dependent on a load. It is eligible for issue kill."
+ },
+ {
+ "EventCode": "0x301E2",
+ "EventName": "PM_MRK_ST_CMPL",
+ "BriefDescription": "Marked store completed and sent to nest. Note that this count excludes cache-inhibited stores."
},
{
"EventCode": "0x301E4",
@@ -95,48 +195,78 @@
"BriefDescription": "Marked Branch Mispredicted. Includes direction and target."
},
{
- "EventCode": "0x300F6",
- "EventName": "PM_LD_DEMAND_MISS_L1",
- "BriefDescription": "The L1 cache was reloaded with a line that fulfills a demand miss request. Counted at reload time, before finish."
+ "EventCode": "0x301E6",
+ "EventName": "PM_MRK_DERAT_MISS",
+ "BriefDescription": "Marked Erat Miss (Data TLB Access) All page sizes. When MMCR1[16]=0 this event counts only DERAT reloads for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
+ },
+ {
+ "EventCode": "0x4010E",
+ "EventName": "PM_MRK_TLBIE_FIN",
+ "BriefDescription": "Marked TLBIE instruction finished. Includes TLBIE and TLBIEL instructions."
+ },
+ {
+ "EventCode": "0x40116",
+ "EventName": "PM_MRK_LARX_FIN",
+ "BriefDescription": "Marked load and reserve instruction (LARX) finished. LARX and STCX are instructions used to acquire a lock."
+ },
+ {
+ "EventCode": "0x40132",
+ "EventName": "PM_MRK_LSU_FIN",
+ "BriefDescription": "LSU marked instruction finish."
+ },
+ {
+ "EventCode": "0x44146",
+ "EventName": "PM_MRK_STCX_CORE_CYC",
+ "BriefDescription": "Cycles spent in the core portion of a marked STCX instruction. It starts counting when the instruction is decoded and stops counting when it drains into the L2."
},
{
- "EventCode": "0x300FE",
- "EventName": "PM_DATA_FROM_L3MISS",
- "BriefDescription": "The processor's data cache was reloaded from a source other than the local core's L1, L2, or L3 due to a demand miss."
+ "EventCode": "0x4C142",
+ "EventName": "PM_MRK_XFER_FROM_SRC_PMC4",
+ "BriefDescription": "For a marked data transfer instruction, the processor's L1 data cache was reloaded from the source specified in MMCR3[45:57]. If MMCR1[16|17] is 0 (default), this count includes only lines that were reloaded to satisfy a demand miss. If MMCR1[16|17] is 1, this count includes both demand misses and prefetch reloads."
},
{
- "EventCode": "0x40012",
- "EventName": "PM_L1_ICACHE_RELOADED_ALL",
- "BriefDescription": "Counts all Icache reloads includes demand, prefetch, prefetch turned into demand and demand turned into prefetch."
+ "EventCode": "0x4C144",
+ "EventName": "PM_MRK_XFER_FROM_SRC_CYC_PMC4",
+ "BriefDescription": "Cycles taken for a marked demand miss to reload a line from the source specified in MMCR3[45:57]."
},
{
- "EventCode": "0x40134",
- "EventName": "PM_MRK_INST_TIMEO",
- "BriefDescription": "Marked instruction finish timeout (instruction was lost)."
+ "EventCode": "0x4C15C",
+ "EventName": "PM_MRK_DERAT_MISS_1G",
+ "BriefDescription": "Data ERAT Miss (Data TLB Access) page size 1G for a marked instruction. When MMCR1[16]=0 this event counts only DERAT reloads for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
},
{
- "EventCode": "0x4505A",
- "EventName": "PM_SP_FLOP_CMPL",
- "BriefDescription": "Single Precision floating point instructions completed."
+ "EventCode": "0x4C15E",
+ "EventName": "PM_MRK_DTLB_MISS_64K",
+ "BriefDescription": "Marked Data TLB reload (after a miss) page size 64K. When MMCR1[16]=0 this event counts only for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
},
{
- "EventCode": "0x4D058",
- "EventName": "PM_VECTOR_FLOP_CMPL",
- "BriefDescription": "Vector floating point instructions completed."
+ "EventCode": "0x4E15E",
+ "EventName": "PM_MRK_INST_FLUSHED",
+ "BriefDescription": "The marked instruction was flushed."
},
{
- "EventCode": "0x4D05A",
- "EventName": "PM_NON_MATH_FLOP_CMPL",
- "BriefDescription": "Non Math instructions completed."
+ "EventCode": "0x40164",
+ "EventName": "PM_MRK_DERAT_MISS_2M",
+ "BriefDescription": "Data ERAT Miss (Data TLB Access) page size 2M for a marked instruction. When MMCR1[16]=0 this event counts only DERAT reloads for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
},
{
"EventCode": "0x401E0",
"EventName": "PM_MRK_INST_CMPL",
- "BriefDescription": "marked instruction completed."
+ "BriefDescription": "Marked instruction completed."
+ },
+ {
+ "EventCode": "0x401E4",
+ "EventName": "PM_MRK_DTLB_MISS",
+ "BriefDescription": "The DPTEG required for the marked load/store instruction in execution was missing from the TLB. This event only counts for demand misses."
+ },
+ {
+ "EventCode": "0x401E6",
+ "EventName": "PM_MRK_INST_FROM_L3MISS",
+ "BriefDescription": "The processor's instruction cache was reloaded from beyond the local core's L3 due to a demand miss for a marked instruction."
},
{
- "EventCode": "0x400FE",
- "EventName": "PM_DATA_FROM_MEMORY",
- "BriefDescription": "The processor's data cache was reloaded from local, remote, or distant memory due to a demand miss."
+ "EventCode": "0x401E8",
+ "EventName": "PM_MRK_DATA_FROM_L2MISS",
+ "BriefDescription": "The processor's L1 data cache was reloaded from a source beyond the local core's L2 due to a demand miss for a marked instruction."
}
]
diff --git a/tools/perf/pmu-events/arch/powerpc/power10/memory.json b/tools/perf/pmu-events/arch/powerpc/power10/memory.json
index 843b51f531e9..885262957beb 100644
--- a/tools/perf/pmu-events/arch/powerpc/power10/memory.json
+++ b/tools/perf/pmu-events/arch/powerpc/power10/memory.json
@@ -1,25 +1,10 @@
[
{
- "EventCode": "0x1000A",
- "EventName": "PM_PMC3_REWIND",
- "BriefDescription": "The speculative event selected for PMC3 rewinds and the counter for PMC3 is not charged."
- },
- {
"EventCode": "0x1C040",
"EventName": "PM_XFER_FROM_SRC_PMC1",
"BriefDescription": "The processor's L1 data cache was reloaded from the source specified in MMCR3[0:12]. If MMCR1[16|17] is 0 (default), this count includes only lines that were reloaded to satisfy a demand miss. If MMCR1[16|17] is 1, this count includes both demand misses and prefetch reloads."
},
{
- "EventCode": "0x1C142",
- "EventName": "PM_MRK_XFER_FROM_SRC_PMC1",
- "BriefDescription": "For a marked data transfer instruction, the processor's L1 data cache was reloaded from the source specified in MMCR3[0:12]. If MMCR1[16|17] is 0 (default), this count includes only lines that were reloaded to satisfy a demand miss. If MMCR1[16|17] is 1, this count includes both demand misses and prefetch reloads."
- },
- {
- "EventCode": "0x1C144",
- "EventName": "PM_MRK_XFER_FROM_SRC_CYC_PMC1",
- "BriefDescription": "Cycles taken for a marked demand miss to reload a line from the source specified in MMCR3[0:12]."
- },
- {
"EventCode": "0x1C056",
"EventName": "PM_DERAT_MISS_4K",
"BriefDescription": "Data ERAT Miss (Data TLB Access) page size 4K. When MMCR1[16]=0 this event counts only DERAT reloads for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
@@ -35,24 +20,9 @@
"BriefDescription": "Data TLB reload (after a miss) page size 2M. Implies radix translation was used. When MMCR1[16]=0 this event counts only for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
},
{
- "EventCode": "0x1E056",
- "EventName": "PM_EXEC_STALL_STORE_PIPE",
- "BriefDescription": "Cycles in which the oldest instruction in the pipeline was executing in the store unit. This does not include cycles spent handling store misses, PTESYNC instructions or TLBIE instructions."
- },
- {
- "EventCode": "0x1F150",
- "EventName": "PM_MRK_ST_L2_CYC",
- "BriefDescription": "Cycles from L2 RC dispatch to L2 RC completion."
- },
- {
"EventCode": "0x10062",
"EventName": "PM_LD_L3MISS_PEND_CYC",
- "BriefDescription": "Cycles L3 miss was pending for this thread."
- },
- {
- "EventCode": "0x20010",
- "EventName": "PM_PMC1_OVERFLOW",
- "BriefDescription": "The event selected for PMC1 caused the event counter to overflow."
+ "BriefDescription": "Cycles in which an L3 miss was pending for this thread."
},
{
"EventCode": "0x2001A",
@@ -80,9 +50,9 @@
"BriefDescription": "Data TLB reload (after a miss) page size 4K. When MMCR1[16]=0 this event counts only for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
},
{
- "EventCode": "0x2D154",
- "EventName": "PM_MRK_DERAT_MISS_64K",
- "BriefDescription": "Data ERAT Miss (Data TLB Access) page size 64K for a marked instruction. When MMCR1[16]=0 this event counts only DERAT reloads for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
+ "EventCode": "0x2C05A",
+ "EventName": "PM_DERAT_MISS_1G",
+ "BriefDescription": "Data ERAT Miss (Data TLB Access) page size 1G. Implies radix translation. When MMCR1[16]=0 this event counts only DERAT reloads for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
},
{
"EventCode": "0x200F6",
@@ -90,9 +60,9 @@
"BriefDescription": "DERAT Reloaded to satisfy a DERAT miss. All page sizes are counted by this event. When MMCR1[16]=0 this event counts only DERAT reloads for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
},
{
- "EventCode": "0x30016",
- "EventName": "PM_EXEC_STALL_DERAT_DTLB_MISS",
- "BriefDescription": "Cycles in which the oldest instruction in the pipeline suffered a TLB miss and waited for it resolve."
+ "EventCode": "0x34044",
+ "EventName": "PM_DERAT_MISS_PREF",
+ "BriefDescription": "DERAT miss (TLB access) while servicing a data prefetch."
},
{
"EventCode": "0x3C040",
@@ -100,16 +70,6 @@
"BriefDescription": "The processor's L1 data cache was reloaded from the source specified in MMCR3[30:42]. If MMCR1[16|17] is 0 (default), this count includes only lines that were reloaded to satisfy a demand miss. If MMCR1[16|17] is 1, this count includes both demand misses and prefetch reloads."
},
{
- "EventCode": "0x3C142",
- "EventName": "PM_MRK_XFER_FROM_SRC_PMC3",
- "BriefDescription": "For a marked data transfer instruction, the processor's L1 data cache was reloaded from the source specified in MMCR3[30:42]. If MMCR1[16|17] is 0 (default), this count includes only lines that were reloaded to satisfy a demand miss. If MMCR1[16|17] is 1, this count includes both demand misses and prefetch reloads."
- },
- {
- "EventCode": "0x3C144",
- "EventName": "PM_MRK_XFER_FROM_SRC_CYC_PMC3",
- "BriefDescription": "Cycles taken for a marked demand miss to reload a line from the source specified in MMCR3[30:42]."
- },
- {
"EventCode": "0x3C054",
"EventName": "PM_DERAT_MISS_16M",
"BriefDescription": "Data ERAT Miss (Data TLB Access) page size 16M. When MMCR1[16]=0 this event counts only DERAT reloads for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
@@ -125,24 +85,14 @@
"BriefDescription": "Load and reserve instruction (LARX) finished. LARX and STCX are instructions used to acquire a lock."
},
{
- "EventCode": "0x301E2",
- "EventName": "PM_MRK_ST_CMPL",
- "BriefDescription": "Marked store completed and sent to nest. Note that this count excludes cache-inhibited stores."
- },
- {
"EventCode": "0x300FC",
"EventName": "PM_DTLB_MISS",
- "BriefDescription": "The DPTEG required for the load/store instruction in execution was missing from the TLB. It includes pages of all sizes for demand and prefetch activity."
- },
- {
- "EventCode": "0x4D02C",
- "EventName": "PM_PMC1_REWIND",
- "BriefDescription": "The speculative event selected for PMC1 rewinds and the counter for PMC1 is not charged."
+ "BriefDescription": "The DPTEG required for the load/store instruction in execution was missing from the TLB. This event only counts for demand misses."
},
{
"EventCode": "0x4003E",
"EventName": "PM_LD_CMPL",
- "BriefDescription": "Loads completed."
+ "BriefDescription": "Load instruction completed."
},
{
"EventCode": "0x4C040",
@@ -150,16 +100,6 @@
"BriefDescription": "The processor's L1 data cache was reloaded from the source specified in MMCR3[45:57]. If MMCR1[16|17] is 0 (default), this count includes only lines that were reloaded to satisfy a demand miss. If MMCR1[16|17] is 1, this count includes both demand misses and prefetch reloads."
},
{
- "EventCode": "0x4C142",
- "EventName": "PM_MRK_XFER_FROM_SRC_PMC4",
- "BriefDescription": "For a marked data transfer instruction, the processor's L1 data cache was reloaded from the source specified in MMCR3[45:57]. If MMCR1[16|17] is 0 (default), this count includes only lines that were reloaded to satisfy a demand miss. If MMCR1[16|17] is 1, this count includes both demand misses and prefetch reloads."
- },
- {
- "EventCode": "0x4C144",
- "EventName": "PM_MRK_XFER_FROM_SRC_CYC_PMC4",
- "BriefDescription": "Cycles taken for a marked demand miss to reload a line from the source specified in MMCR3[45:57]."
- },
- {
"EventCode": "0x4C056",
"EventName": "PM_DTLB_MISS_16M",
"BriefDescription": "Data TLB reload (after a miss) page size 16M. When MMCR1[16]=0 this event counts only for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
@@ -168,20 +108,5 @@
"EventCode": "0x4C05A",
"EventName": "PM_DTLB_MISS_1G",
"BriefDescription": "Data TLB reload (after a miss) page size 1G. Implies radix translation was used. When MMCR1[16]=0 this event counts only for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
- },
- {
- "EventCode": "0x4C15E",
- "EventName": "PM_MRK_DTLB_MISS_64K",
- "BriefDescription": "Marked Data TLB reload (after a miss) page size 64K. When MMCR1[16]=0 this event counts only for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
- },
- {
- "EventCode": "0x4D056",
- "EventName": "PM_NON_FMA_FLOP_CMPL",
- "BriefDescription": "Non FMA instruction completed."
- },
- {
- "EventCode": "0x40164",
- "EventName": "PM_MRK_DERAT_MISS_2M",
- "BriefDescription": "Data ERAT Miss (Data TLB Access) page size 2M for a marked instruction. When MMCR1[16]=0 this event counts only DERAT reloads for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
}
]
diff --git a/tools/perf/pmu-events/arch/powerpc/power10/metrics.json b/tools/perf/pmu-events/arch/powerpc/power10/metrics.json
index 6f53583a0c62..4d66b75c6ad5 100644
--- a/tools/perf/pmu-events/arch/powerpc/power10/metrics.json
+++ b/tools/perf/pmu-events/arch/powerpc/power10/metrics.json
@@ -16,133 +16,139 @@
"BriefDescription": "Average cycles per completed instruction when dispatch was stalled for any reason",
"MetricExpr": "PM_DISP_STALL_CYC / PM_RUN_INST_CMPL",
"MetricGroup": "CPI;CPI_STALL_RATIO",
- "MetricName": "DISPATCHED_CPI"
+ "MetricName": "DISPATCH_STALL_CPI"
},
{
"BriefDescription": "Average cycles per completed instruction when dispatch was stalled because there was a flush",
"MetricExpr": "PM_DISP_STALL_FLUSH / PM_RUN_INST_CMPL",
"MetricGroup": "CPI",
- "MetricName": "DISPATCHED_FLUSH_CPI"
+ "MetricName": "DISPATCH_STALL_FLUSH_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when dispatch was stalled because Fetch was being held, so there was nothing in the pipeline for this thread",
+ "MetricExpr": "PM_DISP_STALL_FETCH / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "DISPATCH_STALL_FETCH_CPI"
},
{
"BriefDescription": "Average cycles per completed instruction when dispatch was stalled because the MMU was handling a translation miss",
"MetricExpr": "PM_DISP_STALL_TRANSLATION / PM_RUN_INST_CMPL",
"MetricGroup": "CPI",
- "MetricName": "DISPATCHED_TRANSLATION_CPI"
+ "MetricName": "DISPATCH_STALL_TRANSLATION_CPI"
},
{
"BriefDescription": "Average cycles per completed instruction when dispatch was stalled waiting to resolve an instruction ERAT miss",
"MetricExpr": "PM_DISP_STALL_IERAT_ONLY_MISS / PM_RUN_INST_CMPL",
"MetricGroup": "CPI",
- "MetricName": "DISPATCHED_IERAT_ONLY_MISS_CPI"
+ "MetricName": "DISPATCH_STALL_IERAT_ONLY_MISS_CPI"
},
{
"BriefDescription": "Average cycles per completed instruction when dispatch was stalled waiting to resolve an instruction TLB miss",
"MetricExpr": "PM_DISP_STALL_ITLB_MISS / PM_RUN_INST_CMPL",
"MetricGroup": "CPI",
- "MetricName": "DISPATCHED_ITLB_MISS_CPI"
+ "MetricName": "DISPATCH_STALL_ITLB_MISS_CPI"
},
{
"BriefDescription": "Average cycles per completed instruction when dispatch was stalled due to an icache miss",
"MetricExpr": "PM_DISP_STALL_IC_MISS / PM_RUN_INST_CMPL",
"MetricGroup": "CPI",
- "MetricName": "DISPATCHED_IC_MISS_CPI"
+ "MetricName": "DISPATCH_STALL_IC_MISS_CPI"
},
{
"BriefDescription": "Average cycles per completed instruction when dispatch was stalled while the instruction was fetched from the local L2",
"MetricExpr": "PM_DISP_STALL_IC_L2 / PM_RUN_INST_CMPL",
"MetricGroup": "CPI",
- "MetricName": "DISPATCHED_IC_L2_CPI"
+ "MetricName": "DISPATCH_STALL_IC_L2_CPI"
},
{
"BriefDescription": "Average cycles per completed instruction when dispatch was stalled while the instruction was fetched from the local L3",
"MetricExpr": "PM_DISP_STALL_IC_L3 / PM_RUN_INST_CMPL",
"MetricGroup": "CPI",
- "MetricName": "DISPATCHED_IC_L3_CPI"
+ "MetricName": "DISPATCH_STALL_IC_L3_CPI"
},
{
"BriefDescription": "Average cycles per completed instruction when dispatch was stalled while the instruction was fetched from any source beyond the local L3",
"MetricExpr": "PM_DISP_STALL_IC_L3MISS / PM_RUN_INST_CMPL",
"MetricGroup": "CPI",
- "MetricName": "DISPATCHED_IC_L3MISS_CPI"
+ "MetricName": "DISPATCH_STALL_IC_L3MISS_CPI"
},
{
"BriefDescription": "Average cycles per completed instruction when dispatch was stalled due to an icache miss after a branch mispredict",
"MetricExpr": "PM_DISP_STALL_BR_MPRED_ICMISS / PM_RUN_INST_CMPL",
"MetricGroup": "CPI",
- "MetricName": "DISPATCHED_BR_MPRED_ICMISS_CPI"
+ "MetricName": "DISPATCH_STALL_BR_MPRED_ICMISS_CPI"
},
{
"BriefDescription": "Average cycles per completed instruction when dispatch was stalled while instruction was fetched from the local L2 after suffering a branch mispredict",
"MetricExpr": "PM_DISP_STALL_BR_MPRED_IC_L2 / PM_RUN_INST_CMPL",
"MetricGroup": "CPI",
- "MetricName": "DISPATCHED_BR_MPRED_IC_L2_CPI"
+ "MetricName": "DISPATCH_STALL_BR_MPRED_IC_L2_CPI"
},
{
"BriefDescription": "Average cycles per completed instruction when dispatch was stalled while instruction was fetched from the local L3 after suffering a branch mispredict",
"MetricExpr": "PM_DISP_STALL_BR_MPRED_IC_L3 / PM_RUN_INST_CMPL",
"MetricGroup": "CPI",
- "MetricName": "DISPATCHED_BR_MPRED_IC_L3_CPI"
+ "MetricName": "DISPATCH_STALL_BR_MPRED_IC_L3_CPI"
},
{
"BriefDescription": "Average cycles per completed instruction when dispatch was stalled while instruction was fetched from any source beyond the local L3 after suffering a branch mispredict",
"MetricExpr": "PM_DISP_STALL_BR_MPRED_IC_L3MISS / PM_RUN_INST_CMPL",
"MetricGroup": "CPI",
- "MetricName": "DISPATCHED_BR_MPRED_IC_L3MISS_CPI"
+ "MetricName": "DISPATCH_STALL_BR_MPRED_IC_L3MISS_CPI"
},
{
"BriefDescription": "Average cycles per completed instruction when dispatch was stalled due to a branch mispredict",
"MetricExpr": "PM_DISP_STALL_BR_MPRED / PM_RUN_INST_CMPL",
"MetricGroup": "CPI",
- "MetricName": "DISPATCHED_BR_MPRED_CPI"
+ "MetricName": "DISPATCH_STALL_BR_MPRED_CPI"
},
{
"BriefDescription": "Average cycles per completed instruction when the NTC instruction was held at dispatch for any reason",
"MetricExpr": "PM_DISP_STALL_HELD_CYC / PM_RUN_INST_CMPL",
"MetricGroup": "CPI",
- "MetricName": "DISPATCHED_HELD_CPI"
+ "MetricName": "DISPATCH_STALL_HELD_CPI"
},
{
"BriefDescription": "Average cycles per completed instruction when the NTC instruction was held at dispatch because of a synchronizing instruction that requires the ICT to be empty before dispatch",
"MetricExpr": "PM_DISP_STALL_HELD_SYNC_CYC / PM_RUN_INST_CMPL",
"MetricGroup": "CPI",
- "MetricName": "DISP_HELD_STALL_SYNC_CPI"
+ "MetricName": "DISPATCH_STALL_HELD_SYNC_CPI"
},
{
"BriefDescription": "Average cycles per completed instruction when the NTC instruction was held at dispatch while waiting on the scoreboard",
"MetricExpr": "PM_DISP_STALL_HELD_SCOREBOARD_CYC / PM_RUN_INST_CMPL",
"MetricGroup": "CPI",
- "MetricName": "DISP_HELD_STALL_SCOREBOARD_CPI"
+ "MetricName": "DISPATCH_STALL_HELD_SCOREBOARD_CPI"
},
{
"BriefDescription": "Average cycles per completed instruction when the NTC instruction was held at dispatch due to issue queue full",
"MetricExpr": "PM_DISP_STALL_HELD_ISSQ_FULL_CYC / PM_RUN_INST_CMPL",
"MetricGroup": "CPI",
- "MetricName": "DISP_HELD_STALL_ISSQ_FULL_CPI"
+ "MetricName": "DISPATCH_STALL_HELD_ISSQ_FULL_CPI"
},
{
"BriefDescription": "Average cycles per completed instruction when the NTC instruction was held at dispatch because the mapper/SRB was full",
"MetricExpr": "PM_DISP_STALL_HELD_RENAME_CYC / PM_RUN_INST_CMPL",
"MetricGroup": "CPI",
- "MetricName": "DISPATCHED_HELD_RENAME_CPI"
+ "MetricName": "DISPATCH_STALL_HELD_RENAME_CPI"
},
{
"BriefDescription": "Average cycles per completed instruction when the NTC instruction was held at dispatch because the STF mapper/SRB was full",
"MetricExpr": "PM_DISP_STALL_HELD_STF_MAPPER_CYC / PM_RUN_INST_CMPL",
"MetricGroup": "CPI",
- "MetricName": "DISPATCHED_HELD_STF_MAPPER_CPI"
+ "MetricName": "DISPATCH_STALL_HELD_STF_MAPPER_CPI"
},
{
"BriefDescription": "Average cycles per completed instruction when the NTC instruction was held at dispatch because the XVFC mapper/SRB was full",
"MetricExpr": "PM_DISP_STALL_HELD_XVFC_MAPPER_CYC / PM_RUN_INST_CMPL",
"MetricGroup": "CPI",
- "MetricName": "DISPATCHED_HELD_XVFC_MAPPER_CPI"
+ "MetricName": "DISPATCH_STALL_HELD_XVFC_MAPPER_CPI"
},
{
"BriefDescription": "Average cycles per completed instruction when the NTC instruction was held at dispatch for any other reason",
"MetricExpr": "PM_DISP_STALL_HELD_OTHER_CYC / PM_RUN_INST_CMPL",
"MetricGroup": "CPI",
- "MetricName": "DISPATCHED_HELD_OTHER_CPI"
+ "MetricName": "DISPATCH_STALL_HELD_OTHER_CPI"
},
{
"BriefDescription": "Average cycles per completed instruction when the NTC instruction has been dispatched but not issued for any reason",
@@ -352,13 +358,13 @@
"BriefDescription": "Average cycles per completed instruction when dispatch was stalled because fetch was being held, so there was nothing in the pipeline for this thread",
"MetricExpr": "PM_DISP_STALL_FETCH / PM_RUN_INST_CMPL",
"MetricGroup": "CPI",
- "MetricName": "DISPATCHED_FETCH_CPI"
+ "MetricName": "DISPATCH_STALL_FETCH_CPI"
},
{
"BriefDescription": "Average cycles per completed instruction when the NTC instruction was held at dispatch because of power management",
"MetricExpr": "PM_DISP_STALL_HELD_HALT_CYC / PM_RUN_INST_CMPL",
"MetricGroup": "CPI",
- "MetricName": "DISPATCHED_HELD_HALT_CPI"
+ "MetricName": "DISPATCH_STALL_HELD_HALT_CPI"
},
{
"BriefDescription": "Percentage of flushes per completed instruction",
@@ -395,6 +401,13 @@
"ScaleUnit": "1%"
},
{
+ "BriefDescription": "Percentage of completed instructions that were stores that missed the L1",
+ "MetricExpr": "PM_ST_MISS_L1 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "Others",
+ "MetricName": "L1_ST_MISS_RATE",
+ "ScaleUnit": "1%"
+ },
+ {
"BriefDescription": "Percentage of completed instructions when the DPTEG required for the load/store instruction in execution was missing from the TLB",
"MetricExpr": "PM_DTLB_MISS / PM_RUN_INST_CMPL * 100",
"MetricGroup": "Others",
@@ -454,12 +467,6 @@
"MetricName": "LOADS_PER_INST"
},
{
- "BriefDescription": "Average number of finished stores per completed instruction",
- "MetricExpr": "PM_ST_FIN / PM_RUN_INST_CMPL",
- "MetricGroup": "General",
- "MetricName": "STORES_PER_INST"
- },
- {
"BriefDescription": "Percentage of demand loads that reloaded from beyond the L2 per completed instruction",
"MetricExpr": "PM_DATA_FROM_L2MISS / PM_RUN_INST_CMPL * 100",
"MetricGroup": "dL1_Reloads",
@@ -474,6 +481,13 @@
"ScaleUnit": "1%"
},
{
+ "BriefDescription": "Percentage of ITLB misses per completed run instruction",
+ "MetricExpr": "PM_ITLB_MISS / PM_RUN_INST_CMPL * 100",
+ "MetricGroup": "General",
+ "MetricName": "ITLB_MISS_RATE",
+ "ScaleUnit": "1%"
+ },
+ {
"BriefDescription": "Percentage of DERAT misses with 4k page size per completed instruction",
"MetricExpr": "PM_DERAT_MISS_4K / PM_RUN_INST_CMPL * 100",
"MetricGroup": "Translation",
@@ -566,7 +580,7 @@
"BriefDescription": "Average number of STCX instructions finshed per completed instruction",
"MetricExpr": "PM_STCX_FIN / PM_RUN_INST_CMPL",
"MetricGroup": "General",
- "MetricName": "STXC_PER_INST"
+ "MetricName": "STCX_PER_INST"
},
{
"BriefDescription": "Average number of LARX instructions finshed per completed instruction",
@@ -629,6 +643,13 @@
"ScaleUnit": "1%"
},
{
+ "BriefDescription": "Percentage of DERAT misses with 1G page size per completed run instruction",
+ "MetricExpr": "PM_DERAT_MISS_1G * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "Translation",
+ "MetricName": "DERAT_1G_MISS_RATE",
+ "ScaleUnit": "1%"
+ },
+ {
"BriefDescription": "DERAT miss ratio for 4K page size",
"MetricExpr": "PM_DERAT_MISS_4K / PM_DERAT_MISS",
"MetricGroup": "Translation",
@@ -647,6 +668,12 @@
"MetricName": "DERAT_16M_MISS_RATIO"
},
{
+ "BriefDescription": "DERAT miss ratio for 1G page size",
+ "MetricExpr": "PM_DERAT_MISS_1G / PM_DERAT_MISS",
+ "MetricGroup": "Translation",
+ "MetricName": "DERAT_1G_MISS_RATIO"
+ },
+ {
"BriefDescription": "DERAT miss ratio for 64K page size",
"MetricExpr": "PM_DERAT_MISS_64K / PM_DERAT_MISS",
"MetricGroup": "Translation",
diff --git a/tools/perf/pmu-events/arch/powerpc/power10/others.json b/tools/perf/pmu-events/arch/powerpc/power10/others.json
index a771e4b6bec5..0e21e7ba1959 100644
--- a/tools/perf/pmu-events/arch/powerpc/power10/others.json
+++ b/tools/perf/pmu-events/arch/powerpc/power10/others.json
@@ -1,28 +1,13 @@
[
{
- "EventCode": "0x10016",
- "EventName": "PM_VSU0_ISSUE",
- "BriefDescription": "VSU instructions issued to VSU pipe 0."
- },
- {
- "EventCode": "0x1001C",
- "EventName": "PM_ULTRAVISOR_INST_CMPL",
- "BriefDescription": "PowerPC instructions that completed while the thread was in ultravisor state."
- },
- {
- "EventCode": "0x100F0",
- "EventName": "PM_CYC",
- "BriefDescription": "Processor cycles."
- },
- {
- "EventCode": "0x10134",
- "EventName": "PM_MRK_ST_DONE_L2",
- "BriefDescription": "Marked stores completed in L2 (RC machine done)."
+ "EventCode": "0x1002C",
+ "EventName": "PM_LD_PREFETCH_CACHE_LINE_MISS",
+ "BriefDescription": "The L1 cache was reloaded with a line that fulfills a prefetch request."
},
{
"EventCode": "0x1505E",
"EventName": "PM_LD_HIT_L1",
- "BriefDescription": "Loads that finished without experiencing an L1 miss."
+ "BriefDescription": "Load finished without experiencing an L1 miss."
},
{
"EventCode": "0x1F056",
@@ -30,9 +15,9 @@
"BriefDescription": "Cycles in which Superslice 0 dispatches either 1 or 2 instructions."
},
{
- "EventCode": "0x1F15C",
- "EventName": "PM_MRK_STCX_L2_CYC",
- "BriefDescription": "Cycles spent in the nest portion of a marked Stcx instruction. It starts counting when the operation starts to drain to the L2 and it stops counting when the instruction retires from the Instruction Completion Table (ICT) in the Instruction Sequencing Unit (ISU)."
+ "EventCode": "0x1F05A",
+ "EventName": "PM_DISP_HELD_SYNC_CYC",
+ "BriefDescription": "Cycles dispatch is held because of a synchronizing instruction that requires the ICT to be empty before dispatch."
},
{
"EventCode": "0x10066",
@@ -40,39 +25,14 @@
"BriefDescription": "Cycles in which the thread is in Adjunct state. MSR[S HV PR] bits = 011."
},
{
- "EventCode": "0x101E4",
- "EventName": "PM_MRK_L1_ICACHE_MISS",
- "BriefDescription": "Marked Instruction suffered an icache Miss."
- },
- {
- "EventCode": "0x101EA",
- "EventName": "PM_MRK_L1_RELOAD_VALID",
- "BriefDescription": "Marked demand reload."
- },
- {
- "EventCode": "0x100F4",
- "EventName": "PM_FLOP_CMPL",
- "BriefDescription": "Floating Point Operations Completed. Includes any type. It counts once for each 1, 2, 4 or 8 flop instruction. Use PM_1|2|4|8_FLOP_CMPL events to count flops."
- },
- {
- "EventCode": "0x100FA",
- "EventName": "PM_RUN_LATCH_ANY_THREAD_CYC",
- "BriefDescription": "Cycles when at least one thread has the run latch set."
- },
- {
"EventCode": "0x100FC",
"EventName": "PM_LD_REF_L1",
"BriefDescription": "All L1 D cache load references counted at finish, gated by reject. In P9 and earlier this event counted only cacheable loads but in P10 both cacheable and non-cacheable loads are included."
},
{
- "EventCode": "0x2000C",
- "EventName": "PM_RUN_LATCH_ALL_THREADS_CYC",
- "BriefDescription": "Cycles when the run latch is set for all threads."
- },
- {
"EventCode": "0x2E010",
"EventName": "PM_ADJUNCT_INST_CMPL",
- "BriefDescription": "PowerPC instructions that completed while the thread is in Adjunct state."
+ "BriefDescription": "PowerPC instruction completed while the thread was in Adjunct state."
},
{
"EventCode": "0x2E014",
@@ -80,26 +40,6 @@
"BriefDescription": "Conditional store instruction (STCX) finished. LARX and STCX are instructions used to acquire a lock."
},
{
- "EventCode": "0x20130",
- "EventName": "PM_MRK_INST_DECODED",
- "BriefDescription": "An instruction was marked at decode time. Random Instruction Sampling (RIS) only."
- },
- {
- "EventCode": "0x20132",
- "EventName": "PM_MRK_DFU_ISSUE",
- "BriefDescription": "The marked instruction was a decimal floating point operation issued to the VSU. Measured at issue time."
- },
- {
- "EventCode": "0x20134",
- "EventName": "PM_MRK_FXU_ISSUE",
- "BriefDescription": "The marked instruction was a fixed point operation issued to the VSU. Measured at issue time."
- },
- {
- "EventCode": "0x2505C",
- "EventName": "PM_VSU_ISSUE",
- "BriefDescription": "At least one VSU instruction was issued to one of the VSU pipes. Up to 4 per cycle. Includes fixed point operations."
- },
- {
"EventCode": "0x2F054",
"EventName": "PM_DISP_SS1_2_INSTR_CYC",
"BriefDescription": "Cycles in which Superslice 1 dispatches either 1 or 2 instructions."
@@ -110,39 +50,14 @@
"BriefDescription": "Cycles in which Superslice 1 dispatches either 3 or 4 instructions."
},
{
- "EventCode": "0x2006C",
- "EventName": "PM_RUN_CYC_SMT4_MODE",
- "BriefDescription": "Cycles when this thread's run latch is set and the core is in SMT4 mode."
- },
- {
- "EventCode": "0x201E0",
- "EventName": "PM_MRK_DATA_FROM_MEMORY",
- "BriefDescription": "The processor's data cache was reloaded from local, remote, or distant memory due to a demand miss for a marked load."
- },
- {
- "EventCode": "0x201E4",
- "EventName": "PM_MRK_DATA_FROM_L3MISS",
- "BriefDescription": "The processor's data cache was reloaded from a source other than the local core's L1, L2, or L3 due to a demand miss for a marked load."
- },
- {
- "EventCode": "0x201E8",
- "EventName": "PM_THRESH_EXC_512",
- "BriefDescription": "Threshold counter exceeded a value of 512."
- },
- {
"EventCode": "0x200F2",
"EventName": "PM_INST_DISP",
- "BriefDescription": "PowerPC instructions dispatched."
- },
- {
- "EventCode": "0x30132",
- "EventName": "PM_MRK_VSU_FIN",
- "BriefDescription": "VSU marked instructions finished. Excludes simple FX instructions issued to the Store Unit."
+ "BriefDescription": "PowerPC instruction dispatched."
},
{
- "EventCode": "0x30038",
- "EventName": "PM_EXEC_STALL_DMISS_LMEM",
- "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from the local memory, local OpenCapp cache, or local OpenCapp memory."
+ "EventCode": "0x200FD",
+ "EventName": "PM_L1_ICACHE_MISS",
+ "BriefDescription": "Demand instruction cache miss."
},
{
"EventCode": "0x3F04A",
@@ -152,12 +67,7 @@
{
"EventCode": "0x3405A",
"EventName": "PM_PRIVILEGED_INST_CMPL",
- "BriefDescription": "PowerPC Instructions that completed while the thread is in Privileged state."
- },
- {
- "EventCode": "0x3F150",
- "EventName": "PM_MRK_ST_DRAIN_CYC",
- "BriefDescription": "cycles to drain st from core to L2."
+ "BriefDescription": "PowerPC instruction completed while the thread was in Privileged state."
},
{
"EventCode": "0x3F054",
@@ -170,74 +80,29 @@
"BriefDescription": "Cycles in which Superslice 0 dispatches either 5, 6, 7 or 8 instructions."
},
{
- "EventCode": "0x30162",
- "EventName": "PM_MRK_ISSUE_DEPENDENT_LOAD",
- "BriefDescription": "The marked instruction was dependent on a load. It is eligible for issue kill."
- },
- {
- "EventCode": "0x40114",
- "EventName": "PM_MRK_START_PROBE_NOP_DISP",
- "BriefDescription": "Marked Start probe nop dispatched. Instruction AND R0,R0,R0."
- },
- {
- "EventCode": "0x4001C",
- "EventName": "PM_VSU_FIN",
- "BriefDescription": "VSU instructions finished."
- },
- {
- "EventCode": "0x4C01A",
- "EventName": "PM_EXEC_STALL_DMISS_OFF_NODE",
- "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from a distant chip."
- },
- {
- "EventCode": "0x4D012",
- "EventName": "PM_PMC3_SAVED",
- "BriefDescription": "The conditions for the speculative event selected for PMC3 are met and PMC3 is charged."
- },
- {
- "EventCode": "0x4D022",
- "EventName": "PM_HYPERVISOR_INST_CMPL",
- "BriefDescription": "PowerPC instructions that completed while the thread is in hypervisor state."
- },
- {
- "EventCode": "0x4D026",
- "EventName": "PM_ULTRAVISOR_CYC",
- "BriefDescription": "Cycles when the thread is in Ultravisor state. MSR[S HV PR]=110."
+ "EventCode": "0x30068",
+ "EventName": "PM_L1_ICACHE_RELOADED_PREF",
+ "BriefDescription": "Counts all instruction cache prefetch reloads (includes demand turned into prefetch)."
},
{
- "EventCode": "0x4D028",
- "EventName": "PM_PRIVILEGED_CYC",
- "BriefDescription": "Cycles when the thread is in Privileged state. MSR[S HV PR]=x00."
+ "EventCode": "0x300F6",
+ "EventName": "PM_LD_DEMAND_MISS_L1",
+ "BriefDescription": "The L1 cache was reloaded with a line that fulfills a demand miss request. Counted at reload time, before finish."
},
{
- "EventCode": "0x40030",
- "EventName": "PM_INST_FIN",
- "BriefDescription": "Instructions finished."
+ "EventCode": "0x300FE",
+ "EventName": "PM_DATA_FROM_L3MISS",
+ "BriefDescription": "The processor's data cache was reloaded from a source other than the local core's L1, L2, or L3 due to a demand miss."
},
{
- "EventCode": "0x44146",
- "EventName": "PM_MRK_STCX_CORE_CYC",
- "BriefDescription": "Cycles spent in the core portion of a marked Stcx instruction. It starts counting when the instruction is decoded and stops counting when it drains into the L2."
+ "EventCode": "0x40012",
+ "EventName": "PM_L1_ICACHE_RELOADED_ALL",
+ "BriefDescription": "Counts all instruction cache reloads includes demand, prefetch, prefetch turned into demand and demand turned into prefetch."
},
{
"EventCode": "0x44054",
"EventName": "PM_VECTOR_LD_CMPL",
- "BriefDescription": "Vector load instructions completed."
- },
- {
- "EventCode": "0x45054",
- "EventName": "PM_FMA_CMPL",
- "BriefDescription": "Two floating point instructions completed (FMA class of instructions: fmadd, fnmadd, fmsub, fnmsub). Scalar instructions only."
- },
- {
- "EventCode": "0x45056",
- "EventName": "PM_SCALAR_FLOP_CMPL",
- "BriefDescription": "Scalar floating point instructions completed."
- },
- {
- "EventCode": "0x4505C",
- "EventName": "PM_MATH_FLOP_CMPL",
- "BriefDescription": "Math floating point instructions completed."
+ "BriefDescription": "Vector load instruction completed."
},
{
"EventCode": "0x4D05E",
@@ -245,28 +110,13 @@
"BriefDescription": "A branch completed. All branches are included."
},
{
- "EventCode": "0x4E15E",
- "EventName": "PM_MRK_INST_FLUSHED",
- "BriefDescription": "The marked instruction was flushed."
- },
- {
- "EventCode": "0x401E6",
- "EventName": "PM_MRK_INST_FROM_L3MISS",
- "BriefDescription": "The processor's instruction cache was reloaded from a source other than the local core's L1, L2, or L3 due to a demand miss for a marked instruction."
- },
- {
- "EventCode": "0x401E8",
- "EventName": "PM_MRK_DATA_FROM_L2MISS",
- "BriefDescription": "The processor's data cache was reloaded from a source other than the local core's L1 or L2 due to a demand miss for a marked load."
- },
- {
"EventCode": "0x400F0",
"EventName": "PM_LD_DEMAND_MISS_L1_FIN",
- "BriefDescription": "Load Missed L1, counted at finish time."
+ "BriefDescription": "Load missed L1, counted at finish time."
},
{
- "EventCode": "0x500FA",
- "EventName": "PM_RUN_INST_CMPL",
- "BriefDescription": "Completed PowerPC instructions gated by the run latch."
+ "EventCode": "0x400FE",
+ "EventName": "PM_DATA_FROM_MEMORY",
+ "BriefDescription": "The processor's data cache was reloaded from local, remote, or distant memory due to a demand miss."
}
]
diff --git a/tools/perf/pmu-events/arch/powerpc/power10/pipeline.json b/tools/perf/pmu-events/arch/powerpc/power10/pipeline.json
index b8aded6045fa..21b23bb55d0d 100644
--- a/tools/perf/pmu-events/arch/powerpc/power10/pipeline.json
+++ b/tools/perf/pmu-events/arch/powerpc/power10/pipeline.json
@@ -1,8 +1,13 @@
[
{
- "EventCode": "0x100FE",
- "EventName": "PM_INST_CMPL",
- "BriefDescription": "PowerPC instructions completed."
+ "EventCode": "0x10004",
+ "EventName": "PM_EXEC_STALL_TRANSLATION",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline suffered a TLB miss or ERAT miss and waited for it to resolve."
+ },
+ {
+ "EventCode": "0x10006",
+ "EventName": "PM_DISP_STALL_HELD_OTHER_CYC",
+ "BriefDescription": "Cycles in which the next-to-complete (NTC) instruction is held at dispatch for any other reason."
},
{
"EventCode": "0x1000C",
@@ -12,7 +17,7 @@
{
"EventCode": "0x1000E",
"EventName": "PM_MMA_ISSUED",
- "BriefDescription": "MMA instructions issued."
+ "BriefDescription": "MMA instruction issued."
},
{
"EventCode": "0x10012",
@@ -30,14 +35,24 @@
"BriefDescription": "Cycles in which an instruction reload is pending to satisfy a demand miss."
},
{
- "EventCode": "0x10022",
- "EventName": "PM_PMC2_SAVED",
- "BriefDescription": "The conditions for the speculative event selected for PMC2 are met and PMC2 is charged."
+ "EventCode": "0x10028",
+ "EventName": "PM_NTC_FLUSH",
+ "BriefDescription": "The instruction was flushed after becoming next-to-complete (NTC)."
+ },
+ {
+ "EventCode": "0x10038",
+ "EventName": "PM_DISP_STALL_TRANSLATION",
+ "BriefDescription": "Cycles when dispatch was stalled for this thread because the MMU was handling a translation miss."
+ },
+ {
+ "EventCode": "0x1003A",
+ "EventName": "PM_DISP_STALL_BR_MPRED_IC_L2",
+ "BriefDescription": "Cycles when dispatch was stalled while the instruction was fetched from the local L2 after suffering a branch mispredict."
},
{
- "EventCode": "0x10024",
- "EventName": "PM_PMC5_OVERFLOW",
- "BriefDescription": "The event selected for PMC5 caused the event counter to overflow."
+ "EventCode": "0x1003C",
+ "EventName": "PM_EXEC_STALL_DMISS_L2L3",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from either the local L2 or local L3."
},
{
"EventCode": "0x10058",
@@ -55,11 +70,41 @@
"BriefDescription": "Data ERAT Miss (Data TLB Access) page size 2M. Implies radix translation. When MMCR1[16]=0 this event counts only DERAT reloads for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
},
{
+ "EventCode": "0x1D05E",
+ "EventName": "PM_DISP_STALL_HELD_HALT_CYC",
+ "BriefDescription": "Cycles in which the next-to-complete (NTC) instruction is held at dispatch because of power management."
+ },
+ {
+ "EventCode": "0x1E050",
+ "EventName": "PM_DISP_STALL_HELD_STF_MAPPER_CYC",
+ "BriefDescription": "Cycles in which the next-to-complete (NTC) instruction is held at dispatch because the STF mapper/SRB was full. Includes GPR (count, link, tar), VSR, VMR, FPR."
+ },
+ {
+ "EventCode": "0x1E054",
+ "EventName": "PM_EXEC_STALL_DMISS_L21_L31",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from another core's L2 or L3 on the same chip."
+ },
+ {
+ "EventCode": "0x1E056",
+ "EventName": "PM_EXEC_STALL_STORE_PIPE",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline was executing in the store unit. This does not include cycles spent handling store misses, PTESYNC instructions or TLBIE instructions."
+ },
+ {
"EventCode": "0x1E05A",
"EventName": "PM_CMPL_STALL_LWSYNC",
"BriefDescription": "Cycles in which the oldest instruction in the pipeline was a lwsync waiting to complete."
},
{
+ "EventCode": "0x1F058",
+ "EventName": "PM_DISP_HELD_CYC",
+ "BriefDescription": "Cycles dispatch is held."
+ },
+ {
+ "EventCode": "0x10064",
+ "EventName": "PM_DISP_STALL_IC_L2",
+ "BriefDescription": "Cycles when dispatch was stalled while the instruction was fetched from the local L2."
+ },
+ {
"EventCode": "0x10068",
"EventName": "PM_BR_FIN",
"BriefDescription": "A branch instruction finished. Includes predicted/mispredicted/unconditional."
@@ -70,9 +115,9 @@
"BriefDescription": "Simple fixed point instruction issued to the store unit. Measured at finish time."
},
{
- "EventCode": "0x1006C",
- "EventName": "PM_RUN_CYC_ST_MODE",
- "BriefDescription": "Cycles when the run latch is set and the core is in ST mode."
+ "EventCode": "0x100F8",
+ "EventName": "PM_DISP_STALL_CYC",
+ "BriefDescription": "Cycles the ICT has no itags assigned to this thread (no instructions were dispatched during these cycles)."
},
{
"EventCode": "0x20004",
@@ -80,9 +125,9 @@
"BriefDescription": "Cycles in which the oldest instruction in the pipeline was dispatched but not issued yet."
},
{
- "EventCode": "0x2000A",
- "EventName": "PM_HYPERVISOR_CYC",
- "BriefDescription": "Cycles when the thread is in Hypervisor state. MSR[S HV PR]=010."
+ "EventCode": "0x20006",
+ "EventName": "PM_DISP_STALL_HELD_ISSQ_FULL_CYC",
+ "BriefDescription": "Cycles in which the next-to-complete (NTC) instruction is held at dispatch due to Issue queue full. Includes issue queue and branch queue."
},
{
"EventCode": "0x2000E",
@@ -90,24 +135,59 @@
"BriefDescription": "LSU Finished an internal operation in LD1 port."
},
{
+ "EventCode": "0x2C010",
+ "EventName": "PM_EXEC_STALL_LSU",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline was executing in the Load Store Unit. This does not include simple fixed point instructions."
+ },
+ {
"EventCode": "0x2C014",
"EventName": "PM_CMPL_STALL_SPECIAL",
"BriefDescription": "Cycles in which the oldest instruction in the pipeline required special handling before completing."
},
{
+ "EventCode": "0x2C016",
+ "EventName": "PM_DISP_STALL_IERAT_ONLY_MISS",
+ "BriefDescription": "Cycles when dispatch was stalled while waiting to resolve an instruction ERAT miss."
+ },
+ {
"EventCode": "0x2C018",
"EventName": "PM_EXEC_STALL_DMISS_L3MISS",
"BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from a source beyond the local L2 or local L3."
},
{
+ "EventCode": "0x2C01C",
+ "EventName": "PM_EXEC_STALL_DMISS_OFF_CHIP",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from a remote chip."
+ },
+ {
+ "EventCode": "0x2C01E",
+ "EventName": "PM_DISP_STALL_BR_MPRED_IC_L3",
+ "BriefDescription": "Cycles when dispatch was stalled while the instruction was fetched from the local L3 after suffering a branch mispredict."
+ },
+ {
"EventCode": "0x2D010",
"EventName": "PM_LSU_ST1_FIN",
"BriefDescription": "LSU Finished an internal operation in ST1 port."
},
{
+ "EventCode": "0x10016",
+ "EventName": "PM_VSU0_ISSUE",
+ "BriefDescription": "VSU instruction issued to VSU pipe 0."
+ },
+ {
"EventCode": "0x2D012",
"EventName": "PM_VSU1_ISSUE",
- "BriefDescription": "VSU instructions issued to VSU pipe 1."
+ "BriefDescription": "VSU instruction issued to VSU pipe 1."
+ },
+ {
+ "EventCode": "0x2505C",
+ "EventName": "PM_VSU_ISSUE",
+ "BriefDescription": "At least one VSU instruction was issued to one of the VSU pipes. Up to 4 per cycle. Includes fixed point operations."
+ },
+ {
+ "EventCode": "0x4001C",
+ "EventName": "PM_VSU_FIN",
+ "BriefDescription": "VSU instruction finished."
},
{
"EventCode": "0x2D018",
@@ -115,19 +195,34 @@
"BriefDescription": "Cycles in which the oldest instruction in the pipeline was executing in the VSU (includes FXU, VSU, CRU)."
},
{
+ "EventCode": "0x2D01A",
+ "EventName": "PM_DISP_STALL_IC_MISS",
+ "BriefDescription": "Cycles when dispatch was stalled for this thread due to an instruction cache miss."
+ },
+ {
"EventCode": "0x2D01C",
"EventName": "PM_CMPL_STALL_STCX",
"BriefDescription": "Cycles in which the oldest instruction in the pipeline was a stcx waiting for resolution from the nest before completing."
},
{
- "EventCode": "0x2E01E",
- "EventName": "PM_EXEC_STALL_NTC_FLUSH",
- "BriefDescription": "Cycles in which the oldest instruction in the pipeline was executing in any unit before it was flushed. Note that if the flush of the oldest instruction happens after finish, the cycles from dispatch to issue will be included in PM_DISP_STALL and the cycles from issue to finish will be included in PM_EXEC_STALL and its corresponding children. This event will also count cycles when the previous NTF instruction is still completing and the new NTF instruction is stalled at dispatch."
+ "EventCode": "0x2E018",
+ "EventName": "PM_DISP_STALL_FETCH",
+ "BriefDescription": "Cycles when dispatch was stalled for this thread because Fetch was being held."
+ },
+ {
+ "EventCode": "0x2E01A",
+ "EventName": "PM_DISP_STALL_HELD_XVFC_MAPPER_CYC",
+ "BriefDescription": "Cycles in which the next-to-complete (NTC) instruction is held at dispatch because the XVFC mapper/SRB was full."
+ },
+ {
+ "EventCode": "0x2E01C",
+ "EventName": "PM_EXEC_STALL_TLBIE",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline was a TLBIE instruction executing in the Load Store Unit."
},
{
- "EventCode": "0x2013C",
- "EventName": "PM_MRK_FX_LSU_FIN",
- "BriefDescription": "The marked instruction was simple fixed point that was issued to the store unit. Measured at finish time."
+ "EventCode": "0x2E01E",
+ "EventName": "PM_EXEC_STALL_NTC_FLUSH",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline was executing in any unit before it was flushed. Note that if the flush of the oldest instruction happens after finish, the cycles from dispatch to issue will be included in PM_DISP_STALL and the cycles from issue to finish will be included in PM_EXEC_STALL and its corresponding children. This event will also count cycles when the previous next-to-finish (NTF) instruction is still completing and the new NTF instruction is stalled at dispatch."
},
{
"EventCode": "0x2405A",
@@ -135,14 +230,19 @@
"BriefDescription": "Cycles in which the oldest instruction in the pipeline (NTC) finishes. Note that instructions can finish out of order, therefore not all the instructions that finish have a Next-to-complete status."
},
{
- "EventCode": "0x201E2",
- "EventName": "PM_MRK_LD_MISS_L1",
- "BriefDescription": "Marked DL1 Demand Miss counted at finish time."
+ "EventCode": "0x20066",
+ "EventName": "PM_DISP_HELD_OTHER_CYC",
+ "BriefDescription": "Cycles dispatch is held for any other reason."
+ },
+ {
+ "EventCode": "0x2006A",
+ "EventName": "PM_DISP_HELD_STF_MAPPER_CYC",
+ "BriefDescription": "Cycles dispatch is held because the STF mapper/SRB was full. Includes GPR (count, link, tar), VSR, VMR, FPR."
},
{
- "EventCode": "0x200F4",
- "EventName": "PM_RUN_CYC",
- "BriefDescription": "Processor cycles gated by the run latch."
+ "EventCode": "0x30004",
+ "EventName": "PM_DISP_STALL_FLUSH",
+ "BriefDescription": "Cycles when dispatch was stalled because of a flush that happened to an instruction(s) that was not yet next-to-complete (NTC). PM_EXEC_STALL_NTC_FLUSH only includes instructions that were flushed after becoming NTC."
},
{
"EventCode": "0x30008",
@@ -150,29 +250,34 @@
"BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting to finish in one of the execution units (BRU, LSU, VSU). Only cycles between issue and finish are counted in this category."
},
{
- "EventCode": "0x3001A",
- "EventName": "PM_LSU_ST2_FIN",
- "BriefDescription": "LSU Finished an internal operation in ST2 port."
+ "EventCode": "0x30014",
+ "EventName": "PM_EXEC_STALL_STORE",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline was a store instruction executing in the Load Store Unit."
+ },
+ {
+ "EventCode": "0x30016",
+ "EventName": "PM_EXEC_STALL_DERAT_DTLB_MISS",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline suffered a TLB miss and waited for it resolve."
},
{
- "EventCode": "0x30020",
- "EventName": "PM_PMC2_REWIND",
- "BriefDescription": "The speculative event selected for PMC2 rewinds and the counter for PMC2 is not charged."
+ "EventCode": "0x30018",
+ "EventName": "PM_DISP_STALL_HELD_SCOREBOARD_CYC",
+ "BriefDescription": "Cycles in which the next-to-complete (NTC) instruction is held at dispatch while waiting on the Scoreboard. This event combines VSCR and FPSCR together."
},
{
- "EventCode": "0x30022",
- "EventName": "PM_PMC4_SAVED",
- "BriefDescription": "The conditions for the speculative event selected for PMC4 are met and PMC4 is charged."
+ "EventCode": "0x3001A",
+ "EventName": "PM_LSU_ST2_FIN",
+ "BriefDescription": "LSU Finished an internal operation in ST2 port."
},
{
- "EventCode": "0x30024",
- "EventName": "PM_PMC6_OVERFLOW",
- "BriefDescription": "The event selected for PMC6 caused the event counter to overflow."
+ "EventCode": "0x30026",
+ "EventName": "PM_EXEC_STALL_STORE_MISS",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline was a store whose cache line was not resident in the L1 and was waiting for allocation of the missing line into the L1."
},
{
"EventCode": "0x30028",
"EventName": "PM_CMPL_STALL_MEM_ECC",
- "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for the non-speculative finish of either a stcx waiting for its result or a load waiting for non-critical sectors of data and ECC."
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for the non-speculative finish of either a STCX waiting for its result or a load waiting for non-critical sectors of data and ECC."
},
{
"EventCode": "0x30036",
@@ -180,6 +285,11 @@
"BriefDescription": "Cycles in which the oldest instruction in the pipeline was a simple fixed point instruction executing in the Load Store Unit."
},
{
+ "EventCode": "0x30038",
+ "EventName": "PM_EXEC_STALL_DMISS_LMEM",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from the local memory, local OpenCAPI cache, or local OpenCAPI memory."
+ },
+ {
"EventCode": "0x3003A",
"EventName": "PM_CMPL_STALL_EXCEPTION",
"BriefDescription": "Cycles in which the oldest instruction in the pipeline was not allowed to complete because it was interrupted by ANY exception, which has to be serviced before the instruction can complete."
@@ -187,17 +297,42 @@
{
"EventCode": "0x3F044",
"EventName": "PM_VSU2_ISSUE",
- "BriefDescription": "VSU instructions issued to VSU pipe 2."
+ "BriefDescription": "VSU instruction issued to VSU pipe 2."
},
{
"EventCode": "0x30058",
"EventName": "PM_TLBIE_FIN",
- "BriefDescription": "TLBIE instructions finished in the LSU. Two TLBIEs can finish each cycle. All will be counted."
+ "BriefDescription": "TLBIE instruction finished in the LSU. Two TLBIEs can finish each cycle. All will be counted."
},
{
- "EventCode": "0x3D058",
- "EventName": "PM_SCALAR_FSQRT_FDIV_ISSUE",
- "BriefDescription": "Scalar versions of four floating point operations: fdiv,fsqrt (xvdivdp, xvdivsp, xvsqrtdp, xvsqrtsp)."
+ "EventCode": "0x34054",
+ "EventName": "PM_EXEC_STALL_DMISS_L2L3_NOCONFLICT",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from the local L2 or local L3, without a dispatch conflict."
+ },
+ {
+ "EventCode": "0x34056",
+ "EventName": "PM_EXEC_STALL_LOAD_FINISH",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline was finishing a load after its data was reloaded from a data source beyond the local L1; cycles in which the LSU was processing an L1-hit; cycles in which the next-to-finish (NTF) instruction merged with another load in the LMQ; cycles in which the NTF instruction is waiting for a data reload for a load miss, but the data comes back with a non-NTF instruction."
+ },
+ {
+ "EventCode": "0x34058",
+ "EventName": "PM_DISP_STALL_BR_MPRED_ICMISS",
+ "BriefDescription": "Cycles when dispatch was stalled after a mispredicted branch resulted in an instruction cache miss."
+ },
+ {
+ "EventCode": "0x3D05C",
+ "EventName": "PM_DISP_STALL_HELD_RENAME_CYC",
+ "BriefDescription": "Cycles in which the next-to-complete (NTC) instruction is held at dispatch because the mapper/SRB was full. Includes GPR (count, link, tar), VSR, VMR, FPR and XVFC."
+ },
+ {
+ "EventCode": "0x3E052",
+ "EventName": "PM_DISP_STALL_IC_L3",
+ "BriefDescription": "Cycles when dispatch was stalled while the instruction was fetched from the local L3."
+ },
+ {
+ "EventCode": "0x30060",
+ "EventName": "PM_DISP_HELD_XVFC_MAPPER_CYC",
+ "BriefDescription": "Cycles dispatch is held because the XVFC mapper/SRB was full."
},
{
"EventCode": "0x30066",
@@ -215,9 +350,9 @@
"BriefDescription": "Cycles in which both instructions in the ICT entry pair show as finished. These are the cycles between finish and completion for the oldest pair of instructions in the pipeline."
},
{
- "EventCode": "0x40010",
- "EventName": "PM_PMC3_OVERFLOW",
- "BriefDescription": "The event selected for PMC3 caused the event counter to overflow."
+ "EventCode": "0x4C010",
+ "EventName": "PM_DISP_STALL_BR_MPRED_IC_L3MISS",
+ "BriefDescription": "Cycles when dispatch was stalled while the instruction was fetched from sources beyond the local L3 after suffering a mispredicted branch."
},
{
"EventCode": "0x4C012",
@@ -225,16 +360,36 @@
"BriefDescription": "Cycles in which the oldest instruction in the pipeline suffered an ERAT miss and waited for it resolve."
},
{
+ "EventCode": "0x4C016",
+ "EventName": "PM_EXEC_STALL_DMISS_L2L3_CONFLICT",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from the local L2 or local L3, with a dispatch conflict."
+ },
+ {
"EventCode": "0x4C018",
"EventName": "PM_CMPL_STALL",
"BriefDescription": "Cycles in which the oldest instruction in the pipeline cannot complete because the thread was blocked for any reason."
},
{
+ "EventCode": "0x4C01A",
+ "EventName": "PM_EXEC_STALL_DMISS_OFF_NODE",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from a distant chip."
+ },
+ {
"EventCode": "0x4C01E",
"EventName": "PM_LSU_ST3_FIN",
"BriefDescription": "LSU Finished an internal operation in ST3 port."
},
{
+ "EventCode": "0x4D014",
+ "EventName": "PM_EXEC_STALL_LOAD",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline was a load instruction executing in the Load Store Unit."
+ },
+ {
+ "EventCode": "0x4D016",
+ "EventName": "PM_EXEC_STALL_PTESYNC",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline was a PTESYNC instruction executing in the Load Store Unit."
+ },
+ {
"EventCode": "0x4D018",
"EventName": "PM_EXEC_STALL_BRU",
"BriefDescription": "Cycles in which the oldest instruction in the pipeline was executing in the Branch unit."
@@ -250,9 +405,24 @@
"BriefDescription": "Cycles in which the oldest instruction in the pipeline was a TLBIEL instruction executing in the Load Store Unit. TLBIEL instructions have lower overhead than TLBIE instructions because they don't get set to the nest."
},
{
+ "EventCode": "0x4D01E",
+ "EventName": "PM_DISP_STALL_BR_MPRED",
+ "BriefDescription": "Cycles when dispatch was stalled for this thread due to a mispredicted branch."
+ },
+ {
+ "EventCode": "0x4E010",
+ "EventName": "PM_DISP_STALL_IC_L3MISS",
+ "BriefDescription": "Cycles when dispatch was stalled while the instruction was fetched from any source beyond the local L3."
+ },
+ {
"EventCode": "0x4E012",
"EventName": "PM_EXEC_STALL_UNKNOWN",
- "BriefDescription": "Cycles in which the oldest instruction in the pipeline completed without an ntf_type pulse. The ntf_pulse was missed by the ISU because the NTF finishes and completions came too close together."
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline completed without an ntf_type pulse. The ntf_pulse was missed by the ISU because the next-to-finish (NTF) instruction finishes and completions came too close together."
+ },
+ {
+ "EventCode": "0x4E01A",
+ "EventName": "PM_DISP_STALL_HELD_CYC",
+ "BriefDescription": "Cycles in which the next-to-complete (NTC) instruction is held at dispatch for any reason."
},
{
"EventCode": "0x4D020",
@@ -260,24 +430,24 @@
"BriefDescription": "VSU instruction was issued to VSU pipe 3."
},
{
- "EventCode": "0x40132",
- "EventName": "PM_MRK_LSU_FIN",
- "BriefDescription": "LSU marked instruction finish."
+ "EventCode": "0x4003C",
+ "EventName": "PM_DISP_STALL_HELD_SYNC_CYC",
+ "BriefDescription": "Cycles in which the next-to-complete (NTC) instruction is held at dispatch because of a synchronizing instruction that requires the ICT to be empty before dispatch."
},
{
"EventCode": "0x45058",
"EventName": "PM_IC_MISS_CMPL",
- "BriefDescription": "Non-speculative icache miss, counted at completion."
+ "BriefDescription": "Non-speculative instruction cache miss, counted at completion."
},
{
- "EventCode": "0x4D050",
- "EventName": "PM_VSU_NON_FLOP_CMPL",
- "BriefDescription": "Non-floating point VSU instructions completed."
+ "EventCode": "0x40060",
+ "EventName": "PM_DISP_HELD_SCOREBOARD_CYC",
+ "BriefDescription": "Cycles dispatch is held while waiting on the Scoreboard. This event combines VSCR and FPSCR together."
},
{
- "EventCode": "0x4D052",
- "EventName": "PM_2FLOP_CMPL",
- "BriefDescription": "Double Precision vector version of fmul, fsub, fcmp, fsel, fabs, fnabs, fres, fsqrte, fneg completed."
+ "EventCode": "0x40062",
+ "EventName": "PM_DISP_HELD_RENAME_CYC",
+ "BriefDescription": "Cycles dispatch is held because the mapper/SRB was full. Includes GPR (count, link, tar), VSR, VMR, FPR and XVFC."
},
{
"EventCode": "0x400F2",
diff --git a/tools/perf/pmu-events/arch/powerpc/power10/pmc.json b/tools/perf/pmu-events/arch/powerpc/power10/pmc.json
index b5d1bd39cfb2..c606ae03cd27 100644
--- a/tools/perf/pmu-events/arch/powerpc/power10/pmc.json
+++ b/tools/perf/pmu-events/arch/powerpc/power10/pmc.json
@@ -1,22 +1,202 @@
[
{
+ "EventCode": "0x100FE",
+ "EventName": "PM_INST_CMPL",
+ "BriefDescription": "PowerPC instruction completed."
+ },
+ {
+ "EventCode": "0x1000A",
+ "EventName": "PM_PMC3_REWIND",
+ "BriefDescription": "The speculative event selected for PMC3 rewinds and the counter for PMC3 is not charged."
+ },
+ {
+ "EventCode": "0x10010",
+ "EventName": "PM_PMC4_OVERFLOW",
+ "BriefDescription": "The event selected for PMC4 caused the event counter to overflow."
+ },
+ {
+ "EventCode": "0x1001C",
+ "EventName": "PM_ULTRAVISOR_INST_CMPL",
+ "BriefDescription": "PowerPC instruction completed while the thread was in ultravisor state."
+ },
+ {
+ "EventCode": "0x100F0",
+ "EventName": "PM_CYC",
+ "BriefDescription": "Processor cycles."
+ },
+ {
+ "EventCode": "0x10020",
+ "EventName": "PM_PMC4_REWIND",
+ "BriefDescription": "The speculative event selected for PMC4 rewinds and the counter for PMC4 is not charged."
+ },
+ {
+ "EventCode": "0x10022",
+ "EventName": "PM_PMC2_SAVED",
+ "BriefDescription": "The conditions for the speculative event selected for PMC2 are met and PMC2 is charged."
+ },
+ {
+ "EventCode": "0x10024",
+ "EventName": "PM_PMC5_OVERFLOW",
+ "BriefDescription": "The event selected for PMC5 caused the event counter to overflow."
+ },
+ {
+ "EventCode": "0x1002A",
+ "EventName": "PM_PMC3_HELD_CYC",
+ "BriefDescription": "Cycles when the speculative counter for PMC3 is frozen."
+ },
+ {
+ "EventCode": "0x1F15E",
+ "EventName": "PM_MRK_START_PROBE_NOP_CMPL",
+ "BriefDescription": "Marked Start probe nop (AND R0,R0,R0) completed."
+ },
+ {
+ "EventCode": "0x1006C",
+ "EventName": "PM_RUN_CYC_ST_MODE",
+ "BriefDescription": "Cycles when the run latch is set and the core is in ST mode."
+ },
+ {
+ "EventCode": "0x101E8",
+ "EventName": "PM_THRESH_EXC_256",
+ "BriefDescription": "Threshold counter exceeded a count of 256."
+ },
+ {
+ "EventCode": "0x101EC",
+ "EventName": "PM_THRESH_MET",
+ "BriefDescription": "Threshold exceeded."
+ },
+ {
+ "EventCode": "0x100FA",
+ "EventName": "PM_RUN_LATCH_ANY_THREAD_CYC",
+ "BriefDescription": "Cycles when at least one thread has the run latch set."
+ },
+ {
+ "EventCode": "0x2000A",
+ "EventName": "PM_HYPERVISOR_CYC",
+ "BriefDescription": "Cycles when the thread is in Hypervisor state. MSR[S HV PR]=010."
+ },
+ {
+ "EventCode": "0x2000C",
+ "EventName": "PM_RUN_LATCH_ALL_THREADS_CYC",
+ "BriefDescription": "Cycles when the run latch is set for all threads."
+ },
+ {
+ "EventCode": "0x20010",
+ "EventName": "PM_PMC1_OVERFLOW",
+ "BriefDescription": "The event selected for PMC1 caused the event counter to overflow."
+ },
+ {
+ "EventCode": "0x2006C",
+ "EventName": "PM_RUN_CYC_SMT4_MODE",
+ "BriefDescription": "Cycles when this thread's run latch is set and the core is in SMT4 mode."
+ },
+ {
+ "EventCode": "0x201E6",
+ "EventName": "PM_THRESH_EXC_32",
+ "BriefDescription": "Threshold counter exceeded a value of 32."
+ },
+ {
+ "EventCode": "0x201E8",
+ "EventName": "PM_THRESH_EXC_512",
+ "BriefDescription": "Threshold counter exceeded a value of 512."
+ },
+ {
+ "EventCode": "0x200F4",
+ "EventName": "PM_RUN_CYC",
+ "BriefDescription": "Processor cycles gated by the run latch."
+ },
+ {
+ "EventCode": "0x30010",
+ "EventName": "PM_PMC2_OVERFLOW",
+ "BriefDescription": "The event selected for PMC2 caused the event counter to overflow."
+ },
+ {
+ "EventCode": "0x30020",
+ "EventName": "PM_PMC2_REWIND",
+ "BriefDescription": "The speculative event selected for PMC2 rewinds and the counter for PMC2 is not charged."
+ },
+ {
+ "EventCode": "0x30022",
+ "EventName": "PM_PMC4_SAVED",
+ "BriefDescription": "The conditions for the speculative event selected for PMC4 are met and PMC4 is charged."
+ },
+ {
+ "EventCode": "0x30024",
+ "EventName": "PM_PMC6_OVERFLOW",
+ "BriefDescription": "The event selected for PMC6 caused the event counter to overflow."
+ },
+ {
+ "EventCode": "0x3006C",
+ "EventName": "PM_RUN_CYC_SMT2_MODE",
+ "BriefDescription": "Cycles when this thread's run latch is set and the core is in SMT2 mode."
+ },
+ {
"EventCode": "0x301E8",
"EventName": "PM_THRESH_EXC_64",
"BriefDescription": "Threshold counter exceeded a value of 64."
},
{
- "EventCode": "0x45050",
- "EventName": "PM_1FLOP_CMPL",
- "BriefDescription": "One floating point instruction completed (fadd, fmul, fsub, fcmp, fsel, fabs, fnabs, fres, fsqrte, fneg)."
+ "EventCode": "0x301EA",
+ "EventName": "PM_THRESH_EXC_1024",
+ "BriefDescription": "Threshold counter exceeded a value of 1024."
+ },
+ {
+ "EventCode": "0x40010",
+ "EventName": "PM_PMC3_OVERFLOW",
+ "BriefDescription": "The event selected for PMC3 caused the event counter to overflow."
+ },
+ {
+ "EventCode": "0x40114",
+ "EventName": "PM_MRK_START_PROBE_NOP_DISP",
+ "BriefDescription": "Marked Start probe nop dispatched. Instruction AND R0,R0,R0."
+ },
+ {
+ "EventCode": "0x4D010",
+ "EventName": "PM_PMC1_SAVED",
+ "BriefDescription": "The conditions for the speculative event selected for PMC1 are met and PMC1 is charged."
+ },
+ {
+ "EventCode": "0x4D012",
+ "EventName": "PM_PMC3_SAVED",
+ "BriefDescription": "The conditions for the speculative event selected for PMC3 are met and PMC3 is charged."
+ },
+ {
+ "EventCode": "0x4D022",
+ "EventName": "PM_HYPERVISOR_INST_CMPL",
+ "BriefDescription": "PowerPC instruction completed while the thread was in hypervisor state."
+ },
+ {
+ "EventCode": "0x4D026",
+ "EventName": "PM_ULTRAVISOR_CYC",
+ "BriefDescription": "Cycles when the thread is in Ultravisor state. MSR[S HV PR]=110."
+ },
+ {
+ "EventCode": "0x4D028",
+ "EventName": "PM_PRIVILEGED_CYC",
+ "BriefDescription": "Cycles when the thread is in Privileged state. MSR[S HV PR]=x00."
+ },
+ {
+ "EventCode": "0x4D02C",
+ "EventName": "PM_PMC1_REWIND",
+ "BriefDescription": "The speculative event selected for PMC1 rewinds and the counter for PMC1 is not charged."
+ },
+ {
+ "EventCode": "0x40030",
+ "EventName": "PM_INST_FIN",
+ "BriefDescription": "Instruction finished."
+ },
+ {
+ "EventCode": "0x40134",
+ "EventName": "PM_MRK_INST_TIMEO",
+ "BriefDescription": "Marked instruction finish timeout (instruction was lost)."
},
{
- "EventCode": "0x45052",
- "EventName": "PM_4FLOP_CMPL",
- "BriefDescription": "Four floating point instructions completed (fadd, fmul, fsub, fcmp, fsel, fabs, fnabs, fres, fsqrte, fneg)."
+ "EventCode": "0x401EA",
+ "EventName": "PM_THRESH_EXC_128",
+ "BriefDescription": "Threshold counter exceeded a value of 128."
},
{
- "EventCode": "0x4D054",
- "EventName": "PM_8FLOP_CMPL",
- "BriefDescription": "Four Double Precision vector instructions completed."
+ "EventCode": "0x400FA",
+ "EventName": "PM_RUN_INST_CMPL",
+ "BriefDescription": "PowerPC instruction completed while the run latch is set."
}
]
diff --git a/tools/perf/pmu-events/arch/powerpc/power10/translation.json b/tools/perf/pmu-events/arch/powerpc/power10/translation.json
index db3766dca07c..ea73900d248a 100644
--- a/tools/perf/pmu-events/arch/powerpc/power10/translation.json
+++ b/tools/perf/pmu-events/arch/powerpc/power10/translation.json
@@ -1,35 +1,10 @@
[
{
- "EventCode": "0x1F15E",
- "EventName": "PM_MRK_START_PROBE_NOP_CMPL",
- "BriefDescription": "Marked Start probe nop (AND R0,R0,R0) completed."
- },
- {
- "EventCode": "0x20016",
- "EventName": "PM_ST_FIN",
- "BriefDescription": "Store finish count. Includes speculative activity."
- },
- {
"EventCode": "0x20018",
"EventName": "PM_ST_FWD",
"BriefDescription": "Store forwards that finished."
},
{
- "EventCode": "0x2011C",
- "EventName": "PM_MRK_NTF_CYC",
- "BriefDescription": "Cycles during which the marked instruction is the oldest in the pipeline (NTF or NTC)."
- },
- {
- "EventCode": "0x2E01C",
- "EventName": "PM_EXEC_STALL_TLBIE",
- "BriefDescription": "Cycles in which the oldest instruction in the pipeline was a TLBIE instruction executing in the Load Store Unit."
- },
- {
- "EventCode": "0x201E6",
- "EventName": "PM_THRESH_EXC_32",
- "BriefDescription": "Threshold counter exceeded a value of 32."
- },
- {
"EventCode": "0x200F0",
"EventName": "PM_ST_CMPL",
"BriefDescription": "Stores completed from S2Q (2nd-level store queue). This event includes regular stores, stcx and cache inhibited stores. The following operations are excluded (pteupdate, snoop tlbie complete, store atomics, miso, load atomic payloads, tlbie, tlbsync, slbieg, isync, msgsnd, slbiag, cpabort, copy, tcheck, tend, stsync, dcbst, icbi, dcbf, hwsync, lwsync, ptesync, eieio, msgsync)."
@@ -37,21 +12,11 @@
{
"EventCode": "0x200FE",
"EventName": "PM_DATA_FROM_L2MISS",
- "BriefDescription": "The processor's data cache was reloaded from a source other than the local core's L1 or L2 due to a demand miss."
- },
- {
- "EventCode": "0x30010",
- "EventName": "PM_PMC2_OVERFLOW",
- "BriefDescription": "The event selected for PMC2 caused the event counter to overflow."
- },
- {
- "EventCode": "0x4D010",
- "EventName": "PM_PMC1_SAVED",
- "BriefDescription": "The conditions for the speculative event selected for PMC1 are met and PMC1 is charged."
+ "BriefDescription": "The processor's L1 data cache was reloaded from a source beyond the local core's L2 due to a demand miss."
},
{
- "EventCode": "0x4D05C",
- "EventName": "PM_DPP_FLOP_CMPL",
- "BriefDescription": "Double-Precision or Quad-Precision instructions completed."
+ "EventCode": "0x300F0",
+ "EventName": "PM_ST_MISS_L1",
+ "BriefDescription": "Store Missed L1."
}
]
diff --git a/tools/perf/pmu-events/arch/x86/alderlake/adl-metrics.json b/tools/perf/pmu-events/arch/x86/alderlake/adl-metrics.json
index daf9458f0b77..c6780d5c456b 100644
--- a/tools/perf/pmu-events/arch/x86/alderlake/adl-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/alderlake/adl-metrics.json
@@ -558,6 +558,7 @@
},
{
"BriefDescription": "Counts the number of cycles a core is stalled due to a demand load which hit in the Last Level Cache (LLC) or other core with HITE/F/M.",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "cpu_atom@MEM_BOUND_STALLS.LOAD_LLC_HIT@ / tma_info_core_clks - max((cpu_atom@MEM_BOUND_STALLS.LOAD@ - cpu_atom@LD_HEAD.L1_MISS_AT_RET@) / tma_info_core_clks, 0) * cpu_atom@MEM_BOUND_STALLS.LOAD_LLC_HIT@ / cpu_atom@MEM_BOUND_STALLS.LOAD@",
"MetricGroup": "TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_l3_bound",
@@ -800,6 +801,7 @@
},
{
"BriefDescription": "Counts the number of cycles that the oldest load of the load buffer is stalled at retirement due to a store forward block.",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "LD_HEAD.ST_ADDR_AT_RET / tma_info_core_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
"MetricName": "tma_store_fwd_blk",
@@ -1058,7 +1060,6 @@
},
{
"BriefDescription": "This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired)",
- "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "tma_x87_use + tma_fp_scalar + tma_fp_vector",
"MetricGroup": "HPC;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_fp_arith",
@@ -1230,6 +1231,7 @@
},
{
"BriefDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck",
+ "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "100 * (tma_fetch_latency * tma_icache_misses / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))",
"MetricGroup": "Fed;FetchLat;IcMiss;tma_issueFL",
"MetricName": "tma_info_botlnk_l2_ic_misses",
@@ -1267,6 +1269,7 @@
},
{
"BriefDescription": "Total pipeline cost of (external) Memory Bandwidth related bottlenecks",
+ "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "100 * tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_mem_bandwidth / (tma_mem_bandwidth + tma_mem_latency)) + tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_sq_full / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full))) + tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_fb_full / (tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk))",
"MetricGroup": "Mem;MemoryBW;Offcore;tma_issueBW",
"MetricName": "tma_info_bottleneck_memory_bandwidth",
@@ -1355,7 +1358,6 @@
},
{
"BriefDescription": "Floating Point Operations Per Cycle",
- "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "(cpu_core@FP_ARITH_INST_RETIRED.SCALAR_SINGLE@ + cpu_core@FP_ARITH_INST_RETIRED.SCALAR_DOUBLE@ + 2 * cpu_core@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE@ + 4 * (cpu_core@FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE@ + cpu_core@FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE@) + 8 * cpu_core@FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE@) / tma_info_core_core_clks",
"MetricGroup": "Flops;Ret",
"MetricName": "tma_info_core_flopc",
@@ -1363,7 +1365,6 @@
},
{
"BriefDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width)",
- "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "(cpu_core@FP_ARITH_DISPATCHED.PORT_0@ + cpu_core@FP_ARITH_DISPATCHED.PORT_1@ + cpu_core@FP_ARITH_DISPATCHED.PORT_5@) / (2 * tma_info_core_core_clks)",
"MetricGroup": "Cor;Flops;HPC",
"MetricName": "tma_info_core_fp_arith_utilization",
@@ -1769,7 +1770,6 @@
},
{
"BriefDescription": "Average number of Uops retired in cycles where at least one uop has retired.",
- "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "tma_retiring * tma_info_thread_slots / cpu_core@UOPS_RETIRED.SLOTS\\,cmask\\=1@",
"MetricGroup": "Pipeline;Ret",
"MetricName": "tma_info_pipeline_retire",
@@ -2002,6 +2002,7 @@
},
{
"BriefDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "(cpu_core@MEMORY_ACTIVITY.STALLS_L2_MISS@ - cpu_core@MEMORY_ACTIVITY.STALLS_L3_MISS@) / tma_info_thread_clks",
"MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_l3_bound",
@@ -2375,6 +2376,7 @@
},
{
"BriefDescription": "This metric represents rate of split store accesses",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "MEM_INST_RETIRED.SPLIT_STORES / tma_info_core_core_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_issueSpSt;tma_store_bound_group",
"MetricName": "tma_split_stores",
@@ -2405,6 +2407,7 @@
},
{
"BriefDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "13 * cpu_core@LD_BLOCKS.STORE_FORWARD@ / tma_info_thread_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
"MetricName": "tma_store_fwd_blk",
diff --git a/tools/perf/pmu-events/arch/x86/alderlaken/adln-metrics.json b/tools/perf/pmu-events/arch/x86/alderlaken/adln-metrics.json
index 0f1628d698da..06e67e34e1bf 100644
--- a/tools/perf/pmu-events/arch/x86/alderlaken/adln-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/alderlaken/adln-metrics.json
@@ -466,6 +466,7 @@
},
{
"BriefDescription": "Counts the number of cycles a core is stalled due to a demand load which hit in the Last Level Cache (LLC) or other core with HITE/F/M.",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "MEM_BOUND_STALLS.LOAD_LLC_HIT / tma_info_core_clks - max((MEM_BOUND_STALLS.LOAD - LD_HEAD.L1_MISS_AT_RET) / tma_info_core_clks, 0) * MEM_BOUND_STALLS.LOAD_LLC_HIT / MEM_BOUND_STALLS.LOAD",
"MetricGroup": "TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_l3_bound",
@@ -682,6 +683,7 @@
},
{
"BriefDescription": "Counts the number of cycles that the oldest load of the load buffer is stalled at retirement due to a store forward block.",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "LD_HEAD.ST_ADDR_AT_RET / tma_info_core_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
"MetricName": "tma_store_fwd_blk",
diff --git a/tools/perf/pmu-events/arch/x86/icelake/icl-metrics.json b/tools/perf/pmu-events/arch/x86/icelake/icl-metrics.json
index 8fcc05c4e0a1..a6eed0d9a26d 100644
--- a/tools/perf/pmu-events/arch/x86/icelake/icl-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/icelake/icl-metrics.json
@@ -85,6 +85,7 @@
},
{
"BriefDescription": "This metric estimates how often memory load accesses were aliased by preceding stores (in program order) with a 4K address offset",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS / tma_info_thread_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
"MetricName": "tma_4k_aliasing",
@@ -319,7 +320,6 @@
},
{
"BriefDescription": "This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired)",
- "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "tma_x87_use + tma_fp_scalar + tma_fp_vector",
"MetricGroup": "HPC;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_fp_arith",
@@ -464,6 +464,7 @@
},
{
"BriefDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck",
+ "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "100 * (tma_fetch_latency * tma_icache_misses / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))",
"MetricGroup": "Fed;FetchLat;IcMiss;tma_issueFL",
"MetricName": "tma_info_botlnk_l2_ic_misses",
@@ -497,6 +498,7 @@
},
{
"BriefDescription": "Total pipeline cost of (external) Memory Bandwidth related bottlenecks",
+ "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "100 * tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_mem_bandwidth / (tma_mem_bandwidth + tma_mem_latency)) + tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_sq_full / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full))) + tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_fb_full / (tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk))",
"MetricGroup": "Mem;MemoryBW;Offcore;tma_issueBW",
"MetricName": "tma_info_bottleneck_memory_bandwidth",
@@ -574,14 +576,12 @@
},
{
"BriefDescription": "Floating Point Operations Per Cycle",
- "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "(cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * cpu@FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE\\,umask\\=0x18@ + 8 * cpu@FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE\\,umask\\=0x60@ + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE) / tma_info_core_core_clks",
"MetricGroup": "Flops;Ret",
"MetricName": "tma_info_core_flopc"
},
{
"BriefDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width)",
- "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "(cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ + cpu@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0xfc@) / (2 * tma_info_core_core_clks)",
"MetricGroup": "Cor;Flops;HPC",
"MetricName": "tma_info_core_fp_arith_utilization",
@@ -927,7 +927,6 @@
},
{
"BriefDescription": "Average number of Uops retired in cycles where at least one uop has retired.",
- "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "tma_retiring * tma_info_thread_slots / cpu@UOPS_RETIRED.SLOTS\\,cmask\\=1@",
"MetricGroup": "Pipeline;Ret",
"MetricName": "tma_info_pipeline_retire"
@@ -1100,6 +1099,7 @@
},
{
"BriefDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "(CYCLE_ACTIVITY.STALLS_L2_MISS - CYCLE_ACTIVITY.STALLS_L3_MISS) / tma_info_thread_clks",
"MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_l3_bound",
@@ -1419,6 +1419,7 @@
},
{
"BriefDescription": "This metric represents rate of split store accesses",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "MEM_INST_RETIRED.SPLIT_STORES / tma_info_core_core_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_issueSpSt;tma_store_bound_group",
"MetricName": "tma_split_stores",
@@ -1446,6 +1447,7 @@
},
{
"BriefDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "13 * LD_BLOCKS.STORE_FORWARD / tma_info_thread_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
"MetricName": "tma_store_fwd_blk",
diff --git a/tools/perf/pmu-events/arch/x86/icelakex/icx-metrics.json b/tools/perf/pmu-events/arch/x86/icelakex/icx-metrics.json
index 9bb7e3f20f7f..7082ad5ba961 100644
--- a/tools/perf/pmu-events/arch/x86/icelakex/icx-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/icelakex/icx-metrics.json
@@ -289,6 +289,7 @@
},
{
"BriefDescription": "This metric estimates how often memory load accesses were aliased by preceding stores (in program order) with a 4K address offset",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS / tma_info_thread_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
"MetricName": "tma_4k_aliasing",
@@ -523,7 +524,6 @@
},
{
"BriefDescription": "This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired)",
- "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "tma_x87_use + tma_fp_scalar + tma_fp_vector",
"MetricGroup": "HPC;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_fp_arith",
@@ -668,6 +668,7 @@
},
{
"BriefDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck",
+ "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "100 * (tma_fetch_latency * tma_icache_misses / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))",
"MetricGroup": "Fed;FetchLat;IcMiss;tma_issueFL",
"MetricName": "tma_info_botlnk_l2_ic_misses",
@@ -701,6 +702,7 @@
},
{
"BriefDescription": "Total pipeline cost of (external) Memory Bandwidth related bottlenecks",
+ "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "100 * tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) * (tma_mem_bandwidth / (tma_mem_bandwidth + tma_mem_latency)) + tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) * (tma_sq_full / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full))) + tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) * (tma_fb_full / (tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk))",
"MetricGroup": "Mem;MemoryBW;Offcore;tma_issueBW",
"MetricName": "tma_info_bottleneck_memory_bandwidth",
@@ -778,14 +780,12 @@
},
{
"BriefDescription": "Floating Point Operations Per Cycle",
- "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "(cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * cpu@FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE\\,umask\\=0x18@ + 8 * cpu@FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE\\,umask\\=0x60@ + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE) / tma_info_core_core_clks",
"MetricGroup": "Flops;Ret",
"MetricName": "tma_info_core_flopc"
},
{
"BriefDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width)",
- "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "(cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ + cpu@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0xfc@) / (2 * tma_info_core_core_clks)",
"MetricGroup": "Cor;Flops;HPC",
"MetricName": "tma_info_core_fp_arith_utilization",
@@ -1144,7 +1144,6 @@
},
{
"BriefDescription": "Average number of Uops retired in cycles where at least one uop has retired.",
- "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "tma_retiring * tma_info_thread_slots / cpu@UOPS_RETIRED.SLOTS\\,cmask\\=1@",
"MetricGroup": "Pipeline;Ret",
"MetricName": "tma_info_pipeline_retire"
@@ -1369,6 +1368,7 @@
},
{
"BriefDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "(CYCLE_ACTIVITY.STALLS_L2_MISS - CYCLE_ACTIVITY.STALLS_L3_MISS) / tma_info_thread_clks",
"MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_l3_bound",
@@ -1715,6 +1715,7 @@
},
{
"BriefDescription": "This metric represents rate of split store accesses",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "MEM_INST_RETIRED.SPLIT_STORES / tma_info_core_core_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_issueSpSt;tma_store_bound_group",
"MetricName": "tma_split_stores",
@@ -1742,6 +1743,7 @@
},
{
"BriefDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "13 * LD_BLOCKS.STORE_FORWARD / tma_info_thread_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
"MetricName": "tma_store_fwd_blk",
diff --git a/tools/perf/pmu-events/arch/x86/mapfile.csv b/tools/perf/pmu-events/arch/x86/mapfile.csv
index 6650100830c4..3a8770e29fe8 100644
--- a/tools/perf/pmu-events/arch/x86/mapfile.csv
+++ b/tools/perf/pmu-events/arch/x86/mapfile.csv
@@ -19,12 +19,12 @@ GenuineIntel-6-3A,v24,ivybridge,core
GenuineIntel-6-3E,v23,ivytown,core
GenuineIntel-6-2D,v23,jaketown,core
GenuineIntel-6-(57|85),v10,knightslanding,core
-GenuineIntel-6-A[AC],v1.03,meteorlake,core
+GenuineIntel-6-A[AC],v1.04,meteorlake,core
GenuineIntel-6-1[AEF],v3,nehalemep,core
GenuineIntel-6-2E,v3,nehalemex,core
GenuineIntel-6-A7,v1.01,rocketlake,core
GenuineIntel-6-2A,v19,sandybridge,core
-GenuineIntel-6-(8F|CF),v1.14,sapphirerapids,core
+GenuineIntel-6-(8F|CF),v1.15,sapphirerapids,core
GenuineIntel-6-AF,v1.00,sierraforest,core
GenuineIntel-6-(37|4A|4C|4D|5A),v15,silvermont,core
GenuineIntel-6-(4E|5E|8E|9E|A5|A6),v57,skylake,core
diff --git a/tools/perf/pmu-events/arch/x86/meteorlake/cache.json b/tools/perf/pmu-events/arch/x86/meteorlake/cache.json
index e1ae7c92f38e..1de0200b32f6 100644
--- a/tools/perf/pmu-events/arch/x86/meteorlake/cache.json
+++ b/tools/perf/pmu-events/arch/x86/meteorlake/cache.json
@@ -37,6 +37,15 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Number of cycles a demand request has waited due to L1D due to lack of L2 resources.",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.L2_STALLS",
+ "PublicDescription": "Counts number of cycles a demand request has waited due to L1D due to lack of L2 resources. Demand requests include cacheable/uncacheable demand load, store, lock or SW prefetch accesses.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Number of L1D misses that are outstanding",
"EventCode": "0x48",
"EventName": "L1D_PEND_MISS.PENDING",
@@ -261,6 +270,15 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Cycles when L1D is locked",
+ "EventCode": "0x42",
+ "EventName": "LOCK_CYCLES.CACHE_LOCK_DURATION",
+ "PublicDescription": "This event counts the number of cycles when the L1D is locked. It is a superset of the 0x1 mask (BUS_LOCK_CLOCKS.BUS_LOCK_DURATION).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Counts the number of cacheable memory requests that miss in the LLC. Counts on a per core basis.",
"EventCode": "0x2e",
"EventName": "LONGEST_LAT_CACHE.MISS",
@@ -515,6 +533,17 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Retired load instructions whose data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
+ "Data_LA": "1",
+ "EventCode": "0xd2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS",
+ "PEBS": "1",
+ "PublicDescription": "Counts the retired load instructions whose data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
+ "SampleAfterValue": "20011",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Retired load instructions whose data sources were hits in L3 without snoops required",
"Data_LA": "1",
"EventCode": "0xd2",
@@ -731,6 +760,14 @@
"Unit": "cpu_atom"
},
{
+ "BriefDescription": "MEM_STORE_RETIRED.L2_HIT",
+ "EventCode": "0x44",
+ "EventName": "MEM_STORE_RETIRED.L2_HIT",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Counts the number of load ops retired.",
"Data_LA": "1",
"EventCode": "0xd0",
@@ -978,6 +1015,15 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Cacheable and Non-Cacheable code read requests",
+ "EventCode": "0x21",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_CODE_RD",
+ "PublicDescription": "Counts both cacheable and Non-Cacheable code read requests.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Demand Data Read requests sent to uncore",
"EventCode": "0x21",
"EventName": "OFFCORE_REQUESTS.DEMAND_DATA_RD",
@@ -996,6 +1042,89 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
+ "CounterMask": "1",
+ "EventCode": "0x20",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
+ "PublicDescription": "Counts cycles when offcore outstanding cacheable Core Data Read transactions are present in the super queue. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles with offcore outstanding Code Reads transactions in the SuperQueue (SQ), queue to uncore.",
+ "CounterMask": "1",
+ "EventCode": "0x20",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_CODE_RD",
+ "PublicDescription": "Counts the number of offcore outstanding Code Reads transactions in the super queue every cycle. The 'Offcore outstanding' state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles where at least 1 outstanding demand data read request is pending.",
+ "CounterMask": "1",
+ "EventCode": "0x20",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles with offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore.",
+ "CounterMask": "1",
+ "EventCode": "0x20",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
+ "PublicDescription": "Counts the number of offcore outstanding demand rfo Reads transactions in the super queue every cycle. The 'Offcore outstanding' state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "OFFCORE_REQUESTS_OUTSTANDING.DATA_RD",
+ "EventCode": "0x20",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DATA_RD",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Offcore outstanding Code Reads transactions in the SuperQueue (SQ), queue to uncore, every cycle.",
+ "EventCode": "0x20",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_CODE_RD",
+ "PublicDescription": "Counts the number of offcore outstanding Code Reads transactions in the super queue every cycle. The 'Offcore outstanding' state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "For every cycle, increments by the number of outstanding demand data read requests pending.",
+ "EventCode": "0x20",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD",
+ "PublicDescription": "For every cycle, increments by the number of outstanding demand data read requests pending. Requests are considered outstanding from the time they miss the core's L2 cache until the transaction completion message is sent to the requestor.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
+ "CounterMask": "6",
+ "EventCode": "0x20",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_GE_6",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Store Read transactions pending for off-core. Highly correlated.",
+ "EventCode": "0x20",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO",
+ "PublicDescription": "Counts the number of off-core outstanding read-for-ownership (RFO) store transactions every cycle. An RFO transaction is considered to be in the Off-core outstanding state between L2 cache miss and transaction completion.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Counts bus locks, accounts for cache line split locks and UC locks.",
"EventCode": "0x2c",
"EventName": "SQ_MISC.BUS_LOCK",
@@ -1005,6 +1134,42 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Number of PREFETCHNTA instructions executed.",
+ "EventCode": "0x40",
+ "EventName": "SW_PREFETCH_ACCESS.NTA",
+ "PublicDescription": "Counts the number of PREFETCHNTA instructions executed.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Number of PREFETCHW instructions executed.",
+ "EventCode": "0x40",
+ "EventName": "SW_PREFETCH_ACCESS.PREFETCHW",
+ "PublicDescription": "Counts the number of PREFETCHW instructions executed.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Number of PREFETCHT0 instructions executed.",
+ "EventCode": "0x40",
+ "EventName": "SW_PREFETCH_ACCESS.T0",
+ "PublicDescription": "Counts the number of PREFETCHT0 instructions executed.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Number of PREFETCHT1 or PREFETCHT2 instructions executed.",
+ "EventCode": "0x40",
+ "EventName": "SW_PREFETCH_ACCESS.T1_T2",
+ "PublicDescription": "Counts the number of PREFETCHT1 or PREFETCHT2 instructions executed.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to an icache miss",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.ICACHE",
diff --git a/tools/perf/pmu-events/arch/x86/meteorlake/floating-point.json b/tools/perf/pmu-events/arch/x86/meteorlake/floating-point.json
index 616489f0974a..f66506ee37ef 100644
--- a/tools/perf/pmu-events/arch/x86/meteorlake/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/meteorlake/floating-point.json
@@ -42,6 +42,14 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "FP_ARITH_DISPATCHED.PORT_5",
+ "EventCode": "0xb3",
+ "EventName": "FP_ARITH_DISPATCHED.PORT_5",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Counts number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE",
diff --git a/tools/perf/pmu-events/arch/x86/meteorlake/frontend.json b/tools/perf/pmu-events/arch/x86/meteorlake/frontend.json
index 0f064518d1c0..8264419500a5 100644
--- a/tools/perf/pmu-events/arch/x86/meteorlake/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/meteorlake/frontend.json
@@ -44,6 +44,14 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "DSB_FILL.FB_STALL_OT",
+ "EventCode": "0x62",
+ "EventName": "DSB_FILL.FB_STALL_OT",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Retired ANT branches",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.ANY_ANT",
@@ -56,6 +64,30 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Retired Instructions who experienced DSB miss.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.ANY_DSB_MISS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x1",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired Instructions that experienced DSB (Decode stream buffer i.e. the decoded instruction-cache) miss.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x3",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired Instructions who experienced a critical DSB miss.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.DSB_MISS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x11",
+ "PEBS": "1",
+ "PublicDescription": "Number of retired Instructions that experienced a critical DSB (Decode stream buffer i.e. the decoded instruction-cache) miss. Critical means stalls were exposed to the back-end as a result of the DSB miss.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x3",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Counts the number of instructions retired that were tagged because empty issue slots were seen before the uop due to ITLB miss",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.ITLB_MISS",
@@ -89,6 +121,18 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Retired Instructions who experienced Instruction L2 Cache true miss.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.L2_MISS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x13",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired Instructions who experienced Instruction L2 Cache true miss.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x3",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Retired instructions after front-end starvation of at least 1 cycle",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_1",
@@ -244,6 +288,18 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Retired Instructions who experienced STLB (2nd level TLB) true miss.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.STLB_MISS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x15",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired Instructions that experienced STLB (2nd level TLB) true miss.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x3",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "FRONTEND_RETIRED.UNKNOWN_BRANCH",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.UNKNOWN_BRANCH",
diff --git a/tools/perf/pmu-events/arch/x86/meteorlake/memory.json b/tools/perf/pmu-events/arch/x86/meteorlake/memory.json
index 67e949b4c789..2605e1d0ba9f 100644
--- a/tools/perf/pmu-events/arch/x86/meteorlake/memory.json
+++ b/tools/perf/pmu-events/arch/x86/meteorlake/memory.json
@@ -67,6 +67,15 @@
"Unit": "cpu_atom"
},
{
+ "BriefDescription": "Number of machine clears due to memory ordering conflicts.",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
+ "PublicDescription": "Counts the number of Machine Clears detected dye to memory ordering. Memory Ordering Machine Clears may apply when a memory read may not conform to the memory ordering rules of the x86 architecture",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
"CounterMask": "3",
"EventCode": "0x47",
@@ -96,6 +105,35 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "MEMORY_ORDERING.MD_NUKE",
+ "EventCode": "0x09",
+ "EventName": "MEMORY_ORDERING.MD_NUKE",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of memory ordering machine clears due to memory renaming.",
+ "EventCode": "0x09",
+ "EventName": "MEMORY_ORDERING.MRN_NUKE",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 1024 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_1024",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x400",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 1024 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "53",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles.",
"Data_LA": "1",
"EventCode": "0xcd",
@@ -122,6 +160,19 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 2048 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_2048",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x800",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 2048 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "23",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles.",
"Data_LA": "1",
"EventCode": "0xcd",
@@ -235,5 +286,34 @@
"SampleAfterValue": "100003",
"UMask": "0x10",
"Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles where data return is pending for a Demand Data Read request who miss L3 cache.",
+ "CounterMask": "1",
+ "EventCode": "0x20",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_L3_MISS_DEMAND_DATA_RD",
+ "PublicDescription": "Cycles with at least 1 Demand Data Read requests who miss L3 cache in the superQ.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "For every cycle, increments by the number of demand data read requests pending that are known to have missed the L3 cache.",
+ "EventCode": "0x20",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.L3_MISS_DEMAND_DATA_RD",
+ "PublicDescription": "For every cycle, increments by the number of demand data read requests pending that are known to have missed the L3 cache. Note that this does not capture all elapsed cycles while requests are outstanding - only cycles from when the requests were known by the requesting core to have missed the L3 cache.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles where the core is waiting on at least 6 outstanding demand data read requests known to have missed the L3 cache.",
+ "CounterMask": "6",
+ "EventCode": "0x20",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.L3_MISS_DEMAND_DATA_RD_GE_6",
+ "PublicDescription": "Cycles where the core is waiting on at least 6 outstanding demand data read requests known to have missed the L3 cache. Note that this event does not capture all elapsed cycles while the requests are outstanding - only cycles from when the requests were known to have missed the L3 cache.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10",
+ "Unit": "cpu_core"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/meteorlake/other.json b/tools/perf/pmu-events/arch/x86/meteorlake/other.json
index 2ec57f487525..f4c603599df4 100644
--- a/tools/perf/pmu-events/arch/x86/meteorlake/other.json
+++ b/tools/perf/pmu-events/arch/x86/meteorlake/other.json
@@ -1,5 +1,13 @@
[
{
+ "BriefDescription": "ASSISTS.PAGE_FAULT",
+ "EventCode": "0xc1",
+ "EventName": "ASSISTS.PAGE_FAULT",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Counts streaming stores that have any type of response.",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.STREAMING_WR.ANY_RESPONSE",
@@ -31,6 +39,14 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "RS.EMPTY_RESOURCE",
+ "EventCode": "0xa5",
+ "EventName": "RS.EMPTY_RESOURCE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Counts the number of issue slots in a UMWAIT or TPAUSE instruction where no uop issues due to the instruction putting the CPU into the C0.1 activity state. For Tremont, UMWAIT and TPAUSE will only put the CPU into C0.1 activity state (not C0.2 activity state)",
"EventCode": "0x75",
"EventName": "SERIALIZATION.C01_MS_SCB",
diff --git a/tools/perf/pmu-events/arch/x86/meteorlake/pipeline.json b/tools/perf/pmu-events/arch/x86/meteorlake/pipeline.json
index eeaa7a97f71c..352c5efafc06 100644
--- a/tools/perf/pmu-events/arch/x86/meteorlake/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/meteorlake/pipeline.json
@@ -312,6 +312,16 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "This event counts the number of mispredicted ret instructions retired. Non PEBS",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.RET",
+ "PEBS": "1",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts mispredicted return instructions retired.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x8",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Counts the number of mispredicted near RET branch instructions retired.",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.RETURN",
@@ -330,6 +340,33 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Core clocks when the thread is in the C0.1 light-weight slower wakeup time but more power saving optimized state.",
+ "EventCode": "0xec",
+ "EventName": "CPU_CLK_UNHALTED.C01",
+ "PublicDescription": "Counts core clocks when the thread is in the C0.1 light-weight slower wakeup time but more power saving optimized state. This state can be entered via the TPAUSE or UMWAIT instructions.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Core clocks when the thread is in the C0.2 light-weight faster wakeup time but less power saving optimized state.",
+ "EventCode": "0xec",
+ "EventName": "CPU_CLK_UNHALTED.C02",
+ "PublicDescription": "Counts core clocks when the thread is in the C0.2 light-weight faster wakeup time but less power saving optimized state. This state can be entered via the TPAUSE or UMWAIT instructions.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x20",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Core clocks when the thread is in the C0.1 or C0.2 or running a PAUSE in C0 ACPI state.",
+ "EventCode": "0xec",
+ "EventName": "CPU_CLK_UNHALTED.C0_WAIT",
+ "PublicDescription": "Counts core clocks when the thread is in the C0.1 or C0.2 power saving optimized states (TPAUSE or UMWAIT instructions) or running the PAUSE instruction.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x70",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Fixed Counter: Counts the number of unhalted core clock cycles",
"EventName": "CPU_CLK_UNHALTED.CORE",
"SampleAfterValue": "2000003",
@@ -362,6 +399,24 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "CPU_CLK_UNHALTED.PAUSE",
+ "EventCode": "0xec",
+ "EventName": "CPU_CLK_UNHALTED.PAUSE",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x40",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "CPU_CLK_UNHALTED.PAUSE_INST",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0xec",
+ "EventName": "CPU_CLK_UNHALTED.PAUSE_INST",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x40",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Core crystal clock cycles. Cycle counts are evenly distributed between active threads in the Core.",
"EventCode": "0x3c",
"EventName": "CPU_CLK_UNHALTED.REF_DISTRIBUTED",
@@ -603,6 +658,15 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Retired NOP instructions.",
+ "EventCode": "0xc0",
+ "EventName": "INST_RETIRED.NOP",
+ "PublicDescription": "Counts all retired NOP or ENDBR32/64 or PREFETCHIT0/1 instructions",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Precise instruction retired with PEBS precise-distribution",
"EventName": "INST_RETIRED.PREC_DIST",
"PEBS": "1",
@@ -612,6 +676,15 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Iterations of Repeat string retired instructions.",
+ "EventCode": "0xc0",
+ "EventName": "INST_RETIRED.REP_ITERATION",
+ "PublicDescription": "Number of iterations of Repeat (REP) string retired instructions such as MOVS, CMPS, and SCAS. Each has a byte, word, and doubleword version and string instructions can be repeated using a repetition prefix, REP, that allows their architectural execution to be repeated a number of times as specified by the RCX register. Note the number of iterations is implementation-dependent.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Cycles the Backend cluster is recovering after a miss-speculation or a Store Buffer or Load Buffer drain stall.",
"CounterMask": "1",
"EventCode": "0xad",
@@ -622,6 +695,17 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Clears speculative count",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0xad",
+ "EventName": "INT_MISC.CLEARS_COUNT",
+ "PublicDescription": "Counts the number of speculative clears due to any type of branch misprediction or machine clears",
+ "SampleAfterValue": "500009",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Counts cycles after recovery from a branch misprediction or machine clear till the first uop is issued from the resteered path.",
"EventCode": "0xad",
"EventName": "INT_MISC.CLEAR_RESTEER_CYCLES",
@@ -631,6 +715,15 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) external stall is sent to Instruction Decode Queue (IDQ) for the thread",
+ "EventCode": "0xad",
+ "EventName": "INT_MISC.RAT_STALLS",
+ "PublicDescription": "This event counts the number of cycles during which Resource Allocation Table (RAT) external stall is sent to Instruction Decode Queue (IDQ) for the current thread. This also includes the cycles during which the Allocator is serving another thread.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for this thread",
"EventCode": "0xad",
"EventName": "INT_MISC.RECOVERY_CYCLES",
@@ -734,6 +827,15 @@
"Unit": "cpu_atom"
},
{
+ "BriefDescription": "False dependencies in MOB due to partial compare on address.",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.ADDRESS_ALIAS",
+ "PublicDescription": "Counts the number of times a load got blocked due to false dependencies in MOB due to partial compare on address.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Counts the number of retired loads that are blocked because its address exactly matches an older store whose data is not ready.",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.DATA_UNKNOWN",
@@ -743,6 +845,15 @@
"Unit": "cpu_atom"
},
{
+ "BriefDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.NO_SR",
+ "PublicDescription": "Counts the number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x88",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Counts the number of retired loads that are blocked because its address partially overlapped with an older store.",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.STORE_FORWARD",
@@ -752,6 +863,15 @@
"Unit": "cpu_atom"
},
{
+ "BriefDescription": "Loads blocked due to overlapping with a preceding store that cannot be forwarded.",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.STORE_FORWARD",
+ "PublicDescription": "Counts the number of times where store forwarding was prevented for a load operation. The most common case is a load blocked due to the address of memory access (partially) overlapping with a preceding uncompleted store. Note: See the table of not supported store forwards in the Optimization Guide.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x82",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
"CounterMask": "1",
"EventCode": "0xa8",
@@ -824,6 +944,24 @@
"Unit": "cpu_atom"
},
{
+ "BriefDescription": "Self-modifying code (SMC) detected.",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.SMC",
+ "PublicDescription": "Counts self-modifying code (SMC) detected, which causes a machine clear.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "LFENCE instructions retired",
+ "EventCode": "0xe0",
+ "EventName": "MISC2_RETIRED.LFENCE",
+ "PublicDescription": "number of LFENCE retired instructions",
+ "SampleAfterValue": "400009",
+ "UMask": "0x20",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Counts cycles where the pipeline is stalled due to serializing operations.",
"EventCode": "0xa2",
"EventName": "RESOURCE_STALLS.SCOREBOARD",
@@ -1261,6 +1399,16 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Cycles with retired uop(s).",
+ "CounterMask": "1",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.CYCLES",
+ "PublicDescription": "Counts cycles where at least one uop has retired.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Retired uops except the last uop of each instruction.",
"EventCode": "0xc2",
"EventName": "UOPS_RETIRED.HEAVY",
@@ -1307,6 +1455,17 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Cycles without actually retired uops.",
+ "CounterMask": "1",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.STALLS",
+ "Invert": "1",
+ "PublicDescription": "This event counts cycles without actually retired uops.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Cycles with less than 10 actually retired uops.",
"CounterMask": "10",
"EventCode": "0xc2",
diff --git a/tools/perf/pmu-events/arch/x86/rocketlake/rkl-metrics.json b/tools/perf/pmu-events/arch/x86/rocketlake/rkl-metrics.json
index 1bb9cededa56..a0191c8b708d 100644
--- a/tools/perf/pmu-events/arch/x86/rocketlake/rkl-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/rocketlake/rkl-metrics.json
@@ -85,6 +85,7 @@
},
{
"BriefDescription": "This metric estimates how often memory load accesses were aliased by preceding stores (in program order) with a 4K address offset",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS / tma_info_thread_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
"MetricName": "tma_4k_aliasing",
@@ -319,7 +320,6 @@
},
{
"BriefDescription": "This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired)",
- "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "tma_x87_use + tma_fp_scalar + tma_fp_vector",
"MetricGroup": "HPC;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_fp_arith",
@@ -464,6 +464,7 @@
},
{
"BriefDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck",
+ "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "100 * (tma_fetch_latency * tma_icache_misses / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))",
"MetricGroup": "Fed;FetchLat;IcMiss;tma_issueFL",
"MetricName": "tma_info_botlnk_l2_ic_misses",
@@ -497,6 +498,7 @@
},
{
"BriefDescription": "Total pipeline cost of (external) Memory Bandwidth related bottlenecks",
+ "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "100 * tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_mem_bandwidth / (tma_mem_bandwidth + tma_mem_latency)) + tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_sq_full / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full))) + tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_fb_full / (tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk))",
"MetricGroup": "Mem;MemoryBW;Offcore;tma_issueBW",
"MetricName": "tma_info_bottleneck_memory_bandwidth",
@@ -574,14 +576,12 @@
},
{
"BriefDescription": "Floating Point Operations Per Cycle",
- "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "(cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * cpu@FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE\\,umask\\=0x18@ + 8 * cpu@FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE\\,umask\\=0x60@ + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE) / tma_info_core_core_clks",
"MetricGroup": "Flops;Ret",
"MetricName": "tma_info_core_flopc"
},
{
"BriefDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width)",
- "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "(cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ + cpu@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0xfc@) / (2 * tma_info_core_core_clks)",
"MetricGroup": "Cor;Flops;HPC",
"MetricName": "tma_info_core_fp_arith_utilization",
@@ -933,7 +933,6 @@
},
{
"BriefDescription": "Average number of Uops retired in cycles where at least one uop has retired.",
- "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "tma_retiring * tma_info_thread_slots / cpu@UOPS_RETIRED.SLOTS\\,cmask\\=1@",
"MetricGroup": "Pipeline;Ret",
"MetricName": "tma_info_pipeline_retire"
@@ -1126,6 +1125,7 @@
},
{
"BriefDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "(CYCLE_ACTIVITY.STALLS_L2_MISS - CYCLE_ACTIVITY.STALLS_L3_MISS) / tma_info_thread_clks",
"MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_l3_bound",
@@ -1445,6 +1445,7 @@
},
{
"BriefDescription": "This metric represents rate of split store accesses",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "MEM_INST_RETIRED.SPLIT_STORES / tma_info_core_core_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_issueSpSt;tma_store_bound_group",
"MetricName": "tma_split_stores",
@@ -1472,6 +1473,7 @@
},
{
"BriefDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "13 * LD_BLOCKS.STORE_FORWARD / tma_info_thread_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
"MetricName": "tma_store_fwd_blk",
diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/other.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/other.json
index 31b6be9fb8c7..442ef3807a9d 100644
--- a/tools/perf/pmu-events/arch/x86/sapphirerapids/other.json
+++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/other.json
@@ -77,6 +77,24 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand data reads that were supplied by PMM attached to this socket, whether or not in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts PMM accesses that are controlled by the close or distant SNC Cluster.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.LOCAL_SOCKET_PMM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x700C00001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were supplied by PMM.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.PMM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x703C00001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts demand data reads that were supplied by DRAM attached to another socket.",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_DATA_RD.REMOTE_DRAM",
diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/spr-metrics.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/spr-metrics.json
index c207c851a9f9..222212abd811 100644
--- a/tools/perf/pmu-events/arch/x86/sapphirerapids/spr-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/spr-metrics.json
@@ -553,7 +553,6 @@
},
{
"BriefDescription": "This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired)",
- "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "tma_x87_use + tma_fp_scalar + tma_fp_vector + tma_fp_amx",
"MetricGroup": "HPC;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_fp_arith",
@@ -717,6 +716,7 @@
},
{
"BriefDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck",
+ "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "100 * (tma_fetch_latency * tma_icache_misses / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))",
"MetricGroup": "Fed;FetchLat;IcMiss;tma_issueFL",
"MetricName": "tma_info_botlnk_l2_ic_misses",
@@ -750,6 +750,7 @@
},
{
"BriefDescription": "Total pipeline cost of (external) Memory Bandwidth related bottlenecks",
+ "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "100 * tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) * (tma_mem_bandwidth / (tma_mem_bandwidth + tma_mem_latency)) + tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) * (tma_sq_full / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full))) + tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) * (tma_fb_full / (tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk))",
"MetricGroup": "Mem;MemoryBW;Offcore;tma_issueBW",
"MetricName": "tma_info_bottleneck_memory_bandwidth",
@@ -827,14 +828,12 @@
},
{
"BriefDescription": "Floating Point Operations Per Cycle",
- "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ + FP_ARITH_INST_RETIRED2.SCALAR_HALF + 2 * (FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED2.COMPLEX_SCALAR_HALF) + 4 * cpu@FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE\\,umask\\=0x18@ + 8 * (FP_ARITH_INST_RETIRED2.128B_PACKED_HALF + cpu@FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE\\,umask\\=0x60@) + 16 * (FP_ARITH_INST_RETIRED2.256B_PACKED_HALF + FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE) + 32 * FP_ARITH_INST_RETIRED2.512B_PACKED_HALF + 4 * AMX_OPS_RETIRED.BF16",
"MetricGroup": "Flops;Ret",
"MetricName": "tma_info_core_flopc"
},
{
"BriefDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width)",
- "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "(FP_ARITH_DISPATCHED.PORT_0 + FP_ARITH_DISPATCHED.PORT_1 + FP_ARITH_DISPATCHED.PORT_5) / (2 * tma_info_core_core_clks)",
"MetricGroup": "Cor;Flops;HPC",
"MetricName": "tma_info_core_fp_arith_utilization",
@@ -1216,7 +1215,6 @@
},
{
"BriefDescription": "Average number of Uops retired in cycles where at least one uop has retired.",
- "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "tma_retiring * tma_info_thread_slots / cpu@UOPS_RETIRED.SLOTS\\,cmask\\=1@",
"MetricGroup": "Pipeline;Ret",
"MetricName": "tma_info_pipeline_retire"
@@ -1467,6 +1465,7 @@
},
{
"BriefDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "(MEMORY_ACTIVITY.STALLS_L2_MISS - MEMORY_ACTIVITY.STALLS_L3_MISS) / tma_info_thread_clks",
"MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_l3_bound",
@@ -1841,6 +1840,7 @@
},
{
"BriefDescription": "This metric represents rate of split store accesses",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "MEM_INST_RETIRED.SPLIT_STORES / tma_info_core_core_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_issueSpSt;tma_store_bound_group",
"MetricName": "tma_split_stores",
@@ -1868,6 +1868,7 @@
},
{
"BriefDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "13 * LD_BLOCKS.STORE_FORWARD / tma_info_thread_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
"MetricName": "tma_store_fwd_blk",
diff --git a/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json b/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json
index 94cb38540b5a..2795a404bb58 100644
--- a/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json
@@ -923,7 +923,7 @@
},
{
"BriefDescription": "Average number of parallel data read requests to external memory",
- "MetricExpr": "UNC_ARB_TRK_OCCUPANCY.DATA_READ / UNC_ARB_TRK_OCCUPANCY.DATA_READ@thresh\\=1@",
+ "MetricExpr": "UNC_ARB_TRK_OCCUPANCY.DATA_READ / UNC_ARB_TRK_OCCUPANCY.DATA_READ@cmask\\=1@",
"MetricGroup": "Mem;MemoryBW;SoC",
"MetricName": "tma_info_system_mem_parallel_reads",
"PublicDescription": "Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches"
diff --git a/tools/perf/pmu-events/arch/x86/tigerlake/tgl-metrics.json b/tools/perf/pmu-events/arch/x86/tigerlake/tgl-metrics.json
index c7c2d6ab1a93..fab084e1bc69 100644
--- a/tools/perf/pmu-events/arch/x86/tigerlake/tgl-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/tigerlake/tgl-metrics.json
@@ -79,6 +79,7 @@
},
{
"BriefDescription": "This metric estimates how often memory load accesses were aliased by preceding stores (in program order) with a 4K address offset",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS / tma_info_thread_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
"MetricName": "tma_4k_aliasing",
@@ -313,7 +314,6 @@
},
{
"BriefDescription": "This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired)",
- "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "tma_x87_use + tma_fp_scalar + tma_fp_vector",
"MetricGroup": "HPC;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_fp_arith",
@@ -458,6 +458,7 @@
},
{
"BriefDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck",
+ "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "100 * (tma_fetch_latency * tma_icache_misses / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))",
"MetricGroup": "Fed;FetchLat;IcMiss;tma_issueFL",
"MetricName": "tma_info_botlnk_l2_ic_misses",
@@ -491,6 +492,7 @@
},
{
"BriefDescription": "Total pipeline cost of (external) Memory Bandwidth related bottlenecks",
+ "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "100 * tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_mem_bandwidth / (tma_mem_bandwidth + tma_mem_latency)) + tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_sq_full / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full))) + tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_fb_full / (tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk))",
"MetricGroup": "Mem;MemoryBW;Offcore;tma_issueBW",
"MetricName": "tma_info_bottleneck_memory_bandwidth",
@@ -568,14 +570,12 @@
},
{
"BriefDescription": "Floating Point Operations Per Cycle",
- "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "(cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * cpu@FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE\\,umask\\=0x18@ + 8 * cpu@FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE\\,umask\\=0x60@ + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE) / tma_info_core_core_clks",
"MetricGroup": "Flops;Ret",
"MetricName": "tma_info_core_flopc"
},
{
"BriefDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width)",
- "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "(cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ + cpu@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0xfc@) / (2 * tma_info_core_core_clks)",
"MetricGroup": "Cor;Flops;HPC",
"MetricName": "tma_info_core_fp_arith_utilization",
@@ -927,7 +927,6 @@
},
{
"BriefDescription": "Average number of Uops retired in cycles where at least one uop has retired.",
- "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "tma_retiring * tma_info_thread_slots / cpu@UOPS_RETIRED.SLOTS\\,cmask\\=1@",
"MetricGroup": "Pipeline;Ret",
"MetricName": "tma_info_pipeline_retire"
@@ -1114,6 +1113,7 @@
},
{
"BriefDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "(CYCLE_ACTIVITY.STALLS_L2_MISS - CYCLE_ACTIVITY.STALLS_L3_MISS) / tma_info_thread_clks",
"MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_l3_bound",
@@ -1433,6 +1433,7 @@
},
{
"BriefDescription": "This metric represents rate of split store accesses",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "MEM_INST_RETIRED.SPLIT_STORES / tma_info_core_core_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_issueSpSt;tma_store_bound_group",
"MetricName": "tma_split_stores",
@@ -1460,6 +1461,7 @@
},
{
"BriefDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "13 * LD_BLOCKS.STORE_FORWARD / tma_info_thread_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
"MetricName": "tma_store_fwd_blk",
diff --git a/tools/perf/pmu-events/empty-pmu-events.c b/tools/perf/pmu-events/empty-pmu-events.c
index a630c617e879..12bd043a05e3 100644
--- a/tools/perf/pmu-events/empty-pmu-events.c
+++ b/tools/perf/pmu-events/empty-pmu-events.c
@@ -266,19 +266,53 @@ static const struct pmu_sys_events pmu_sys_event_tables[] = {
},
};
-int pmu_events_table_for_each_event(const struct pmu_events_table *table, pmu_event_iter_fn fn,
- void *data)
+int pmu_events_table__for_each_event(const struct pmu_events_table *table, struct perf_pmu *pmu,
+ pmu_event_iter_fn fn, void *data)
{
for (const struct pmu_event *pe = &table->entries[0]; pe->name; pe++) {
- int ret = fn(pe, table, data);
+ int ret;
+ if (pmu && !pmu__name_match(pmu, pe->pmu))
+ continue;
+
+ ret = fn(pe, table, data);
if (ret)
return ret;
}
return 0;
}
-int pmu_metrics_table_for_each_metric(const struct pmu_metrics_table *table, pmu_metric_iter_fn fn,
+int pmu_events_table__find_event(const struct pmu_events_table *table,
+ struct perf_pmu *pmu,
+ const char *name,
+ pmu_event_iter_fn fn,
+ void *data)
+{
+ for (const struct pmu_event *pe = &table->entries[0]; pe->name; pe++) {
+ if (pmu && !pmu__name_match(pmu, pe->pmu))
+ continue;
+
+ if (!strcasecmp(pe->name, name))
+ return fn(pe, table, data);
+ }
+ return -1000;
+}
+
+size_t pmu_events_table__num_events(const struct pmu_events_table *table,
+ struct perf_pmu *pmu)
+{
+ size_t count = 0;
+
+ for (const struct pmu_event *pe = &table->entries[0]; pe->name; pe++) {
+ if (pmu && !pmu__name_match(pmu, pe->pmu))
+ continue;
+
+ count++;
+ }
+ return count;
+}
+
+int pmu_metrics_table__for_each_metric(const struct pmu_metrics_table *table, pmu_metric_iter_fn fn,
void *data)
{
for (const struct pmu_metric *pm = &table->entries[0]; pm->metric_expr; pm++) {
@@ -371,7 +405,8 @@ const struct pmu_metrics_table *find_core_metrics_table(const char *arch, const
int pmu_for_each_core_event(pmu_event_iter_fn fn, void *data)
{
for (const struct pmu_events_map *tables = &pmu_events_map[0]; tables->arch; tables++) {
- int ret = pmu_events_table_for_each_event(&tables->event_table, fn, data);
+ int ret = pmu_events_table__for_each_event(&tables->event_table,
+ /*pmu=*/ NULL, fn, data);
if (ret)
return ret;
@@ -384,7 +419,7 @@ int pmu_for_each_core_metric(pmu_metric_iter_fn fn, void *data)
for (const struct pmu_events_map *tables = &pmu_events_map[0];
tables->arch;
tables++) {
- int ret = pmu_metrics_table_for_each_metric(&tables->metric_table, fn, data);
+ int ret = pmu_metrics_table__for_each_metric(&tables->metric_table, fn, data);
if (ret)
return ret;
@@ -408,7 +443,7 @@ int pmu_for_each_sys_event(pmu_event_iter_fn fn, void *data)
for (const struct pmu_sys_events *tables = &pmu_sys_event_tables[0];
tables->name;
tables++) {
- int ret = pmu_events_table_for_each_event(&tables->table, fn, data);
+ int ret = pmu_events_table__for_each_event(&tables->table, /*pmu=*/ NULL, fn, data);
if (ret)
return ret;
diff --git a/tools/perf/pmu-events/jevents.py b/tools/perf/pmu-events/jevents.py
index 12e80bb7939b..a7e88332276d 100755
--- a/tools/perf/pmu-events/jevents.py
+++ b/tools/perf/pmu-events/jevents.py
@@ -42,7 +42,7 @@ _metricgroups = {}
# Order specific JsonEvent attributes will be visited.
_json_event_attributes = [
# cmp_sevent related attributes.
- 'name', 'pmu', 'topic', 'desc',
+ 'name', 'topic', 'desc',
# Seems useful, put it early.
'event',
# Short things in alphabetical order.
@@ -53,7 +53,7 @@ _json_event_attributes = [
# Attributes that are in pmu_metric rather than pmu_event.
_json_metric_attributes = [
- 'pmu', 'metric_name', 'metric_group', 'metric_expr', 'metric_threshold',
+ 'metric_name', 'metric_group', 'metric_expr', 'metric_threshold',
'desc', 'long_desc', 'unit', 'compat', 'metricgroup_no_group',
'default_metricgroup_name', 'aggr_mode', 'event_grouping'
]
@@ -113,13 +113,24 @@ class BigCString:
strings: Set[str]
big_string: Sequence[str]
offsets: Dict[str, int]
+ insert_number: int
+ insert_point: Dict[str, int]
+ metrics: Set[str]
def __init__(self):
self.strings = set()
+ self.insert_number = 0;
+ self.insert_point = {}
+ self.metrics = set()
- def add(self, s: str) -> None:
+ def add(self, s: str, metric: bool) -> None:
"""Called to add to the big string."""
- self.strings.add(s)
+ if s not in self.strings:
+ self.strings.add(s)
+ self.insert_point[s] = self.insert_number
+ self.insert_number += 1
+ if metric:
+ self.metrics.add(s)
def compute(self) -> None:
"""Called once all strings are added to compute the string and offsets."""
@@ -160,8 +171,11 @@ class BigCString:
self.big_string = []
self.offsets = {}
+ def string_cmp_key(s: str) -> Tuple[bool, int, str]:
+ return (s in self.metrics, self.insert_point[s], s)
+
# Emit all strings that aren't folded in a sorted manner.
- for s in sorted(self.strings):
+ for s in sorted(self.strings, key=string_cmp_key):
if s not in folded_strings:
self.offsets[s] = big_string_offset
self.big_string.append(f'/* offset={big_string_offset} */ "')
@@ -252,7 +266,7 @@ class JsonEvent:
def unit_to_pmu(unit: str) -> Optional[str]:
"""Convert a JSON Unit to Linux PMU name."""
if not unit:
- return None
+ return 'default_core'
# Comment brought over from jevents.c:
# it's not realistic to keep adding these, we need something more scalable ...
table = {
@@ -274,6 +288,7 @@ class JsonEvent:
'DFPMC': 'amd_df',
'cpu_core': 'cpu_core',
'cpu_atom': 'cpu_atom',
+ 'ali_drw': 'ali_drw',
}
return table[unit] if unit in table else f'uncore_{unit.lower()}'
@@ -342,16 +357,15 @@ class JsonEvent:
self.desc += extra_desc
if self.long_desc and extra_desc:
self.long_desc += extra_desc
- if self.pmu:
- if self.desc and not self.desc.endswith('. '):
- self.desc += '. '
- self.desc = (self.desc if self.desc else '') + ('Unit: ' + self.pmu + ' ')
- if arch_std and arch_std.lower() in _arch_std_events:
- event = _arch_std_events[arch_std.lower()].event
- # Copy from the architecture standard event to self for undefined fields.
- for attr, value in _arch_std_events[arch_std.lower()].__dict__.items():
- if hasattr(self, attr) and not getattr(self, attr):
- setattr(self, attr, value)
+ if arch_std:
+ if arch_std.lower() in _arch_std_events:
+ event = _arch_std_events[arch_std.lower()].event
+ # Copy from the architecture standard event to self for undefined fields.
+ for attr, value in _arch_std_events[arch_std.lower()].__dict__.items():
+ if hasattr(self, attr) and not getattr(self, attr):
+ setattr(self, attr, value)
+ else:
+ raise argparse.ArgumentTypeError('Cannot find arch std event:', arch_std)
self.event = real_event(self.name, event)
@@ -433,13 +447,13 @@ def add_events_table_entries(item: os.DirEntry, topic: str) -> None:
def print_pending_events() -> None:
"""Optionally close events table."""
- def event_cmp_key(j: JsonEvent) -> Tuple[bool, str, str, str, str]:
+ def event_cmp_key(j: JsonEvent) -> Tuple[str, str, bool, str, str]:
def fix_none(s: Optional[str]) -> str:
if s is None:
return ''
return s
- return (j.desc is not None, fix_none(j.topic), fix_none(j.name), fix_none(j.pmu),
+ return (fix_none(j.pmu).replace(',','_'), fix_none(j.name), j.desc is not None, fix_none(j.topic),
fix_none(j.metric_name))
global _pending_events
@@ -454,13 +468,36 @@ def print_pending_events() -> None:
global event_tables
_event_tables.append(_pending_events_tblname)
- _args.output_file.write(
- f'static const struct compact_pmu_event {_pending_events_tblname}[] = {{\n')
-
+ first = True
+ last_pmu = None
+ pmus = set()
for event in sorted(_pending_events, key=event_cmp_key):
+ if event.pmu != last_pmu:
+ if not first:
+ _args.output_file.write('};\n')
+ pmu_name = event.pmu.replace(',', '_')
+ _args.output_file.write(
+ f'static const struct compact_pmu_event {_pending_events_tblname}_{pmu_name}[] = {{\n')
+ first = False
+ last_pmu = event.pmu
+ pmus.add((event.pmu, pmu_name))
+
_args.output_file.write(event.to_c_string(metric=False))
_pending_events = []
+ _args.output_file.write(f"""
+}};
+
+const struct pmu_table_entry {_pending_events_tblname}[] = {{
+""")
+ for (pmu, tbl_pmu) in sorted(pmus):
+ pmu_name = f"{pmu}\\000"
+ _args.output_file.write(f"""{{
+ .entries = {_pending_events_tblname}_{tbl_pmu},
+ .num_entries = ARRAY_SIZE({_pending_events_tblname}_{tbl_pmu}),
+ .pmu_name = {{ {_bcs.offsets[pmu_name]} /* {pmu_name} */ }},
+}},
+""")
_args.output_file.write('};\n\n')
def print_pending_metrics() -> None:
@@ -486,13 +523,36 @@ def print_pending_metrics() -> None:
global metric_tables
_metric_tables.append(_pending_metrics_tblname)
- _args.output_file.write(
- f'static const struct compact_pmu_event {_pending_metrics_tblname}[] = {{\n')
-
+ first = True
+ last_pmu = None
+ pmus = set()
for metric in sorted(_pending_metrics, key=metric_cmp_key):
+ if metric.pmu != last_pmu:
+ if not first:
+ _args.output_file.write('};\n')
+ pmu_name = metric.pmu.replace(',', '_')
+ _args.output_file.write(
+ f'static const struct compact_pmu_event {_pending_metrics_tblname}_{pmu_name}[] = {{\n')
+ first = False
+ last_pmu = metric.pmu
+ pmus.add((metric.pmu, pmu_name))
+
_args.output_file.write(metric.to_c_string(metric=True))
_pending_metrics = []
+ _args.output_file.write(f"""
+}};
+
+const struct pmu_table_entry {_pending_metrics_tblname}[] = {{
+""")
+ for (pmu, tbl_pmu) in sorted(pmus):
+ pmu_name = f"{pmu}\\000"
+ _args.output_file.write(f"""{{
+ .entries = {_pending_metrics_tblname}_{tbl_pmu},
+ .num_entries = ARRAY_SIZE({_pending_metrics_tblname}_{tbl_pmu}),
+ .pmu_name = {{ {_bcs.offsets[pmu_name]} /* {pmu_name} */ }},
+}},
+""")
_args.output_file.write('};\n\n')
def get_topic(topic: str) -> str:
@@ -521,17 +581,20 @@ def preprocess_one_file(parents: Sequence[str], item: os.DirEntry) -> None:
assert len(mgroup) > 1, parents
description = f"{metricgroup_descriptions[mgroup]}\\000"
mgroup = f"{mgroup}\\000"
- _bcs.add(mgroup)
- _bcs.add(description)
+ _bcs.add(mgroup, metric=True)
+ _bcs.add(description, metric=True)
_metricgroups[mgroup] = description
return
topic = get_topic(item.name)
for event in read_json_events(item.path, topic):
+ pmu_name = f"{event.pmu}\\000"
if event.name:
- _bcs.add(event.build_c_string(metric=False))
+ _bcs.add(pmu_name, metric=False)
+ _bcs.add(event.build_c_string(metric=False), metric=False)
if event.metric_name:
- _bcs.add(event.build_c_string(metric=True))
+ _bcs.add(pmu_name, metric=True)
+ _bcs.add(event.build_c_string(metric=True), metric=True)
def process_one_file(parents: Sequence[str], item: os.DirEntry) -> None:
"""Process a JSON file during the main walk."""
@@ -573,14 +636,14 @@ def print_mapping_table(archs: Sequence[str]) -> None:
_args.output_file.write("""
/* Struct used to make the PMU event table implementation opaque to callers. */
struct pmu_events_table {
- const struct compact_pmu_event *entries;
- size_t length;
+ const struct pmu_table_entry *pmus;
+ uint32_t num_pmus;
};
/* Struct used to make the PMU metric table implementation opaque to callers. */
struct pmu_metrics_table {
- const struct compact_pmu_event *entries;
- size_t length;
+ const struct pmu_table_entry *pmus;
+ uint32_t num_pmus;
};
/*
@@ -610,12 +673,12 @@ const struct pmu_events_map pmu_events_map[] = {
\t.arch = "testarch",
\t.cpuid = "testcpu",
\t.event_table = {
-\t\t.entries = pmu_events__test_soc_cpu,
-\t\t.length = ARRAY_SIZE(pmu_events__test_soc_cpu),
+\t\t.pmus = pmu_events__test_soc_cpu,
+\t\t.num_pmus = ARRAY_SIZE(pmu_events__test_soc_cpu),
\t},
\t.metric_table = {
-\t\t.entries = pmu_metrics__test_soc_cpu,
-\t\t.length = ARRAY_SIZE(pmu_metrics__test_soc_cpu),
+\t\t.pmus = pmu_metrics__test_soc_cpu,
+\t\t.num_pmus = ARRAY_SIZE(pmu_metrics__test_soc_cpu),
\t}
},
""")
@@ -645,12 +708,12 @@ const struct pmu_events_map pmu_events_map[] = {
\t.arch = "{arch}",
\t.cpuid = "{cpuid}",
\t.event_table = {{
-\t\t.entries = {event_tblname},
-\t\t.length = {event_size}
+\t\t.pmus = {event_tblname},
+\t\t.num_pmus = {event_size}
\t}},
\t.metric_table = {{
-\t\t.entries = {metric_tblname},
-\t\t.length = {metric_size}
+\t\t.pmus = {metric_tblname},
+\t\t.num_pmus = {metric_size}
\t}}
}},
""")
@@ -681,15 +744,15 @@ static const struct pmu_sys_events pmu_sys_event_tables[] = {
for tblname in _sys_event_tables:
_args.output_file.write(f"""\t{{
\t\t.event_table = {{
-\t\t\t.entries = {tblname},
-\t\t\t.length = ARRAY_SIZE({tblname})
+\t\t\t.pmus = {tblname},
+\t\t\t.num_pmus = ARRAY_SIZE({tblname})
\t\t}},""")
metric_tblname = _sys_event_table_to_metric_table_mapping[tblname]
if metric_tblname in _sys_metric_tables:
_args.output_file.write(f"""
\t\t.metric_table = {{
-\t\t\t.entries = {metric_tblname},
-\t\t\t.length = ARRAY_SIZE({metric_tblname})
+\t\t\t.pmus = {metric_tblname},
+\t\t\t.num_pmus = ARRAY_SIZE({metric_tblname})
\t\t}},""")
printed_metric_tables.append(metric_tblname)
_args.output_file.write(f"""
@@ -749,15 +812,18 @@ static void decompress_metric(int offset, struct pmu_metric *pm)
_args.output_file.write('\twhile (*p++);')
_args.output_file.write("""}
-int pmu_events_table_for_each_event(const struct pmu_events_table *table,
- pmu_event_iter_fn fn,
- void *data)
+static int pmu_events_table__for_each_event_pmu(const struct pmu_events_table *table,
+ const struct pmu_table_entry *pmu,
+ pmu_event_iter_fn fn,
+ void *data)
{
- for (size_t i = 0; i < table->length; i++) {
- struct pmu_event pe;
- int ret;
+ int ret;
+ struct pmu_event pe = {
+ .pmu = &big_c_string[pmu->pmu_name.offset],
+ };
- decompress_event(table->entries[i].offset, &pe);
+ for (uint32_t i = 0; i < pmu->num_entries; i++) {
+ decompress_event(pmu->entries[i].offset, &pe);
if (!pe.name)
continue;
ret = fn(&pe, table, data);
@@ -765,17 +831,119 @@ int pmu_events_table_for_each_event(const struct pmu_events_table *table,
return ret;
}
return 0;
+ }
+
+static int pmu_events_table__find_event_pmu(const struct pmu_events_table *table,
+ const struct pmu_table_entry *pmu,
+ const char *name,
+ pmu_event_iter_fn fn,
+ void *data)
+{
+ struct pmu_event pe = {
+ .pmu = &big_c_string[pmu->pmu_name.offset],
+ };
+ int low = 0, high = pmu->num_entries - 1;
+
+ while (low <= high) {
+ int cmp, mid = (low + high) / 2;
+
+ decompress_event(pmu->entries[mid].offset, &pe);
+
+ if (!pe.name && !name)
+ goto do_call;
+
+ if (!pe.name && name) {
+ low = mid + 1;
+ continue;
+ }
+ if (pe.name && !name) {
+ high = mid - 1;
+ continue;
+ }
+
+ cmp = strcasecmp(pe.name, name);
+ if (cmp < 0) {
+ low = mid + 1;
+ continue;
+ }
+ if (cmp > 0) {
+ high = mid - 1;
+ continue;
+ }
+ do_call:
+ return fn ? fn(&pe, table, data) : 0;
+ }
+ return -1000;
}
-int pmu_metrics_table_for_each_metric(const struct pmu_metrics_table *table,
- pmu_metric_iter_fn fn,
- void *data)
+int pmu_events_table__for_each_event(const struct pmu_events_table *table,
+ struct perf_pmu *pmu,
+ pmu_event_iter_fn fn,
+ void *data)
+{
+ for (size_t i = 0; i < table->num_pmus; i++) {
+ const struct pmu_table_entry *table_pmu = &table->pmus[i];
+ const char *pmu_name = &big_c_string[table_pmu->pmu_name.offset];
+ int ret;
+
+ if (pmu && !pmu__name_match(pmu, pmu_name))
+ continue;
+
+ ret = pmu_events_table__for_each_event_pmu(table, table_pmu, fn, data);
+ if (pmu || ret)
+ return ret;
+ }
+ return 0;
+}
+
+int pmu_events_table__find_event(const struct pmu_events_table *table,
+ struct perf_pmu *pmu,
+ const char *name,
+ pmu_event_iter_fn fn,
+ void *data)
{
- for (size_t i = 0; i < table->length; i++) {
- struct pmu_metric pm;
+ for (size_t i = 0; i < table->num_pmus; i++) {
+ const struct pmu_table_entry *table_pmu = &table->pmus[i];
+ const char *pmu_name = &big_c_string[table_pmu->pmu_name.offset];
int ret;
- decompress_metric(table->entries[i].offset, &pm);
+ if (!pmu__name_match(pmu, pmu_name))
+ continue;
+
+ ret = pmu_events_table__find_event_pmu(table, table_pmu, name, fn, data);
+ if (ret != -1000)
+ return ret;
+ }
+ return -1000;
+}
+
+size_t pmu_events_table__num_events(const struct pmu_events_table *table,
+ struct perf_pmu *pmu)
+{
+ size_t count = 0;
+
+ for (size_t i = 0; i < table->num_pmus; i++) {
+ const struct pmu_table_entry *table_pmu = &table->pmus[i];
+ const char *pmu_name = &big_c_string[table_pmu->pmu_name.offset];
+
+ if (pmu__name_match(pmu, pmu_name))
+ count += table_pmu->num_entries;
+ }
+ return count;
+}
+
+static int pmu_metrics_table__for_each_metric_pmu(const struct pmu_metrics_table *table,
+ const struct pmu_table_entry *pmu,
+ pmu_metric_iter_fn fn,
+ void *data)
+{
+ int ret;
+ struct pmu_metric pm = {
+ .pmu = &big_c_string[pmu->pmu_name.offset],
+ };
+
+ for (uint32_t i = 0; i < pmu->num_entries; i++) {
+ decompress_metric(pmu->entries[i].offset, &pm);
if (!pm.metric_expr)
continue;
ret = fn(&pm, table, data);
@@ -785,11 +953,25 @@ int pmu_metrics_table_for_each_metric(const struct pmu_metrics_table *table,
return 0;
}
+int pmu_metrics_table__for_each_metric(const struct pmu_metrics_table *table,
+ pmu_metric_iter_fn fn,
+ void *data)
+{
+ for (size_t i = 0; i < table->num_pmus; i++) {
+ int ret = pmu_metrics_table__for_each_metric_pmu(table, &table->pmus[i],
+ fn, data);
+
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
const struct pmu_events_table *perf_pmu__find_events_table(struct perf_pmu *pmu)
{
const struct pmu_events_table *table = NULL;
char *cpuid = perf_pmu__getcpuid(pmu);
- int i;
+ size_t i;
/* on some platforms which uses cpus map, cpuid can be NULL for
* PMUs other than CORE PMUs.
@@ -809,7 +991,17 @@ const struct pmu_events_table *perf_pmu__find_events_table(struct perf_pmu *pmu)
}
}
free(cpuid);
- return table;
+ if (!pmu)
+ return table;
+
+ for (i = 0; i < table->num_pmus; i++) {
+ const struct pmu_table_entry *table_pmu = &table->pmus[i];
+ const char *pmu_name = &big_c_string[table_pmu->pmu_name.offset];
+
+ if (pmu__name_match(pmu, pmu_name))
+ return table;
+ }
+ return NULL;
}
const struct pmu_metrics_table *perf_pmu__find_metrics_table(struct perf_pmu *pmu)
@@ -866,7 +1058,8 @@ int pmu_for_each_core_event(pmu_event_iter_fn fn, void *data)
for (const struct pmu_events_map *tables = &pmu_events_map[0];
tables->arch;
tables++) {
- int ret = pmu_events_table_for_each_event(&tables->event_table, fn, data);
+ int ret = pmu_events_table__for_each_event(&tables->event_table,
+ /*pmu=*/ NULL, fn, data);
if (ret)
return ret;
@@ -879,7 +1072,7 @@ int pmu_for_each_core_metric(pmu_metric_iter_fn fn, void *data)
for (const struct pmu_events_map *tables = &pmu_events_map[0];
tables->arch;
tables++) {
- int ret = pmu_metrics_table_for_each_metric(&tables->metric_table, fn, data);
+ int ret = pmu_metrics_table__for_each_metric(&tables->metric_table, fn, data);
if (ret)
return ret;
@@ -903,7 +1096,8 @@ int pmu_for_each_sys_event(pmu_event_iter_fn fn, void *data)
for (const struct pmu_sys_events *tables = &pmu_sys_event_tables[0];
tables->name;
tables++) {
- int ret = pmu_events_table_for_each_event(&tables->event_table, fn, data);
+ int ret = pmu_events_table__for_each_event(&tables->event_table,
+ /*pmu=*/ NULL, fn, data);
if (ret)
return ret;
@@ -916,7 +1110,7 @@ int pmu_for_each_sys_metric(pmu_metric_iter_fn fn, void *data)
for (const struct pmu_sys_events *tables = &pmu_sys_event_tables[0];
tables->name;
tables++) {
- int ret = pmu_metrics_table_for_each_metric(&tables->metric_table, fn, data);
+ int ret = pmu_metrics_table__for_each_metric(&tables->metric_table, fn, data);
if (ret)
return ret;
@@ -999,14 +1193,20 @@ such as "arm/cortex-a34".''',
_args = ap.parse_args()
_args.output_file.write("""
-#include "pmu-events/pmu-events.h"
+#include <pmu-events/pmu-events.h>
#include "util/header.h"
#include "util/pmu.h"
#include <string.h>
#include <stddef.h>
struct compact_pmu_event {
- int offset;
+ int offset;
+};
+
+struct pmu_table_entry {
+ const struct compact_pmu_event *entries;
+ uint32_t num_entries;
+ struct compact_pmu_event pmu_name;
};
""")
diff --git a/tools/perf/pmu-events/metric.py b/tools/perf/pmu-events/metric.py
index 85a3545f5b6a..0e9ec65d92ae 100644
--- a/tools/perf/pmu-events/metric.py
+++ b/tools/perf/pmu-events/metric.py
@@ -413,6 +413,10 @@ def has_event(event: Event) -> Function:
# pylint: disable=invalid-name
return Function('has_event', event)
+def strcmp_cpuid_str(event: str) -> Function:
+ # pylint: disable=redefined-builtin
+ # pylint: disable=invalid-name
+ return Function('strcmp_cpuid_str', event)
class Metric:
"""An individual metric that will specifiable on the perf command line."""
@@ -541,14 +545,23 @@ def ParsePerfJson(orig: str) -> Expression:
"""
# pylint: disable=eval-used
py = orig.strip()
+ # First try to convert everything that looks like a string (event name) into Event(r"EVENT_NAME").
+ # This isn't very selective so is followed up by converting some unwanted conversions back again
py = re.sub(r'([a-zA-Z][^-+/\* \\\(\),]*(?:\\.[^-+/\* \\\(\),]*)*)',
r'Event(r"\1")', py)
+ # If it started with a # it should have been a literal, rather than an event name
py = re.sub(r'#Event\(r"([^"]*)"\)', r'Literal("#\1")', py)
+ # Convert accidentally converted hex constants ("0Event(r"xDEADBEEF)"") back to a constant,
+ # but keep it wrapped in Event(), otherwise Python drops the 0x prefix and it gets interpreted as
+ # a double by the Bison parser
+ py = re.sub(r'0Event\(r"[xX]([0-9a-fA-F]*)"\)', r'Event("0x\1")', py)
+ # Convert accidentally converted scientific notation constants back
py = re.sub(r'([0-9]+)Event\(r"(e[0-9]+)"\)', r'\1\2', py)
- keywords = ['if', 'else', 'min', 'max', 'd_ratio', 'source_count', 'has_event']
+ # Convert all the known keywords back from events to just the keyword
+ keywords = ['if', 'else', 'min', 'max', 'd_ratio', 'source_count', 'has_event', 'strcmp_cpuid_str',
+ 'cpuid_not_more_than']
for kw in keywords:
py = re.sub(rf'Event\(r"{kw}"\)', kw, py)
-
try:
parsed = ast.parse(py, mode='eval')
except SyntaxError as e:
diff --git a/tools/perf/pmu-events/pmu-events.h b/tools/perf/pmu-events/pmu-events.h
index caf59f23cd64..f5aa96f1685c 100644
--- a/tools/perf/pmu-events/pmu-events.h
+++ b/tools/perf/pmu-events/pmu-events.h
@@ -3,6 +3,7 @@
#define PMU_EVENTS_H
#include <stdbool.h>
+#include <stddef.h>
struct perf_pmu;
@@ -77,9 +78,19 @@ typedef int (*pmu_metric_iter_fn)(const struct pmu_metric *pm,
const struct pmu_metrics_table *table,
void *data);
-int pmu_events_table_for_each_event(const struct pmu_events_table *table, pmu_event_iter_fn fn,
+int pmu_events_table__for_each_event(const struct pmu_events_table *table,
+ struct perf_pmu *pmu,
+ pmu_event_iter_fn fn,
void *data);
-int pmu_metrics_table_for_each_metric(const struct pmu_metrics_table *table, pmu_metric_iter_fn fn,
+int pmu_events_table__find_event(const struct pmu_events_table *table,
+ struct perf_pmu *pmu,
+ const char *name,
+ pmu_event_iter_fn fn,
+ void *data);
+size_t pmu_events_table__num_events(const struct pmu_events_table *table,
+ struct perf_pmu *pmu);
+
+int pmu_metrics_table__for_each_metric(const struct pmu_metrics_table *table, pmu_metric_iter_fn fn,
void *data);
const struct pmu_events_table *perf_pmu__find_events_table(struct perf_pmu *pmu);
diff --git a/tools/perf/scripts/python/Perf-Trace-Util/Build b/tools/perf/scripts/python/Perf-Trace-Util/Build
index 7d0e33ce6aba..5b0b5ff7e14a 100644
--- a/tools/perf/scripts/python/Perf-Trace-Util/Build
+++ b/tools/perf/scripts/python/Perf-Trace-Util/Build
@@ -1,3 +1,4 @@
perf-y += Context.o
-CFLAGS_Context.o += $(PYTHON_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-nested-externs
+# -Wno-declaration-after-statement: The python headers have mixed code with declarations (decls after asserts, for instance)
+CFLAGS_Context.o += $(PYTHON_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-nested-externs -Wno-declaration-after-statement
diff --git a/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py
index 7384dcb628c4..b75d31858e54 100644
--- a/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py
+++ b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py
@@ -54,6 +54,7 @@ try:
import audit
machine_to_id = {
'x86_64': audit.MACH_86_64,
+ 'aarch64': audit.MACH_AARCH64,
'alpha' : audit.MACH_ALPHA,
'ia64' : audit.MACH_IA64,
'ppc' : audit.MACH_PPC,
@@ -73,9 +74,9 @@ try:
except:
if not audit_package_warned:
audit_package_warned = True
- print("Install the audit-libs-python package to get syscall names.\n"
- "For example:\n # apt-get install python-audit (Ubuntu)"
- "\n # yum install audit-libs-python (Fedora)"
+ print("Install the python-audit package to get syscall names.\n"
+ "For example:\n # apt-get install python3-audit (Ubuntu)"
+ "\n # yum install python3-audit (Fedora)"
"\n etc.\n")
def syscall_name(id):
diff --git a/tools/perf/scripts/python/bin/gecko-record b/tools/perf/scripts/python/bin/gecko-record
new file mode 100644
index 000000000000..f0d1aa55f171
--- /dev/null
+++ b/tools/perf/scripts/python/bin/gecko-record
@@ -0,0 +1,2 @@
+#!/bin/bash
+perf record -F 99 -g "$@"
diff --git a/tools/perf/scripts/python/bin/gecko-report b/tools/perf/scripts/python/bin/gecko-report
new file mode 100755
index 000000000000..1867ec8d9757
--- /dev/null
+++ b/tools/perf/scripts/python/bin/gecko-report
@@ -0,0 +1,7 @@
+#!/bin/bash
+# description: create firefox gecko profile json format from perf.data
+if [ "$*" = "-i -" ]; then
+perf script -s "$PERF_EXEC_PATH"/scripts/python/gecko.py
+else
+perf script -s "$PERF_EXEC_PATH"/scripts/python/gecko.py -- "$@"
+fi
diff --git a/tools/perf/scripts/python/gecko.py b/tools/perf/scripts/python/gecko.py
new file mode 100644
index 000000000000..bc5a72f94bfa
--- /dev/null
+++ b/tools/perf/scripts/python/gecko.py
@@ -0,0 +1,395 @@
+# gecko.py - Convert perf record output to Firefox's gecko profile format
+# SPDX-License-Identifier: GPL-2.0
+#
+# The script converts perf.data to Gecko Profile Format,
+# which can be read by https://profiler.firefox.com/.
+#
+# Usage:
+#
+# perf record -a -g -F 99 sleep 60
+# perf script report gecko
+#
+# Combined:
+#
+# perf script gecko -F 99 -a sleep 60
+
+import os
+import sys
+import time
+import json
+import string
+import random
+import argparse
+import threading
+import webbrowser
+import urllib.parse
+from os import system
+from functools import reduce
+from dataclasses import dataclass, field
+from http.server import HTTPServer, SimpleHTTPRequestHandler, test
+from typing import List, Dict, Optional, NamedTuple, Set, Tuple, Any
+
+# Add the Perf-Trace-Util library to the Python path
+sys.path.append(os.environ['PERF_EXEC_PATH'] + \
+ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+
+from perf_trace_context import *
+from Core import *
+
+StringID = int
+StackID = int
+FrameID = int
+CategoryID = int
+Milliseconds = float
+
+# start_time is intialiazed only once for the all event traces.
+start_time = None
+
+# https://github.com/firefox-devtools/profiler/blob/53970305b51b9b472e26d7457fee1d66cd4e2737/src/types/profile.js#L425
+# Follow Brendan Gregg's Flamegraph convention: orange for kernel and yellow for user space by default.
+CATEGORIES = None
+
+# The product name is used by the profiler UI to show the Operating system and Processor.
+PRODUCT = os.popen('uname -op').read().strip()
+
+# store the output file
+output_file = None
+
+# Here key = tid, value = Thread
+tid_to_thread = dict()
+
+# The HTTP server is used to serve the profile to the profiler UI.
+http_server_thread = None
+
+# The category index is used by the profiler UI to show the color of the flame graph.
+USER_CATEGORY_INDEX = 0
+KERNEL_CATEGORY_INDEX = 1
+
+# https://github.com/firefox-devtools/profiler/blob/53970305b51b9b472e26d7457fee1d66cd4e2737/src/types/gecko-profile.js#L156
+class Frame(NamedTuple):
+ string_id: StringID
+ relevantForJS: bool
+ innerWindowID: int
+ implementation: None
+ optimizations: None
+ line: None
+ column: None
+ category: CategoryID
+ subcategory: int
+
+# https://github.com/firefox-devtools/profiler/blob/53970305b51b9b472e26d7457fee1d66cd4e2737/src/types/gecko-profile.js#L216
+class Stack(NamedTuple):
+ prefix_id: Optional[StackID]
+ frame_id: FrameID
+
+# https://github.com/firefox-devtools/profiler/blob/53970305b51b9b472e26d7457fee1d66cd4e2737/src/types/gecko-profile.js#L90
+class Sample(NamedTuple):
+ stack_id: Optional[StackID]
+ time_ms: Milliseconds
+ responsiveness: int
+
+@dataclass
+class Thread:
+ """A builder for a profile of the thread.
+
+ Attributes:
+ comm: Thread command-line (name).
+ pid: process ID of containing process.
+ tid: thread ID.
+ samples: Timeline of profile samples.
+ frameTable: interned stack frame ID -> stack frame.
+ stringTable: interned string ID -> string.
+ stringMap: interned string -> string ID.
+ stackTable: interned stack ID -> stack.
+ stackMap: (stack prefix ID, leaf stack frame ID) -> interned Stack ID.
+ frameMap: Stack Frame string -> interned Frame ID.
+ comm: str
+ pid: int
+ tid: int
+ samples: List[Sample] = field(default_factory=list)
+ frameTable: List[Frame] = field(default_factory=list)
+ stringTable: List[str] = field(default_factory=list)
+ stringMap: Dict[str, int] = field(default_factory=dict)
+ stackTable: List[Stack] = field(default_factory=list)
+ stackMap: Dict[Tuple[Optional[int], int], int] = field(default_factory=dict)
+ frameMap: Dict[str, int] = field(default_factory=dict)
+ """
+ comm: str
+ pid: int
+ tid: int
+ samples: List[Sample] = field(default_factory=list)
+ frameTable: List[Frame] = field(default_factory=list)
+ stringTable: List[str] = field(default_factory=list)
+ stringMap: Dict[str, int] = field(default_factory=dict)
+ stackTable: List[Stack] = field(default_factory=list)
+ stackMap: Dict[Tuple[Optional[int], int], int] = field(default_factory=dict)
+ frameMap: Dict[str, int] = field(default_factory=dict)
+
+ def _intern_stack(self, frame_id: int, prefix_id: Optional[int]) -> int:
+ """Gets a matching stack, or saves the new stack. Returns a Stack ID."""
+ key = f"{frame_id}" if prefix_id is None else f"{frame_id},{prefix_id}"
+ # key = (prefix_id, frame_id)
+ stack_id = self.stackMap.get(key)
+ if stack_id is None:
+ # return stack_id
+ stack_id = len(self.stackTable)
+ self.stackTable.append(Stack(prefix_id=prefix_id, frame_id=frame_id))
+ self.stackMap[key] = stack_id
+ return stack_id
+
+ def _intern_string(self, string: str) -> int:
+ """Gets a matching string, or saves the new string. Returns a String ID."""
+ string_id = self.stringMap.get(string)
+ if string_id is not None:
+ return string_id
+ string_id = len(self.stringTable)
+ self.stringTable.append(string)
+ self.stringMap[string] = string_id
+ return string_id
+
+ def _intern_frame(self, frame_str: str) -> int:
+ """Gets a matching stack frame, or saves the new frame. Returns a Frame ID."""
+ frame_id = self.frameMap.get(frame_str)
+ if frame_id is not None:
+ return frame_id
+ frame_id = len(self.frameTable)
+ self.frameMap[frame_str] = frame_id
+ string_id = self._intern_string(frame_str)
+
+ symbol_name_to_category = KERNEL_CATEGORY_INDEX if frame_str.find('kallsyms') != -1 \
+ or frame_str.find('/vmlinux') != -1 \
+ or frame_str.endswith('.ko)') \
+ else USER_CATEGORY_INDEX
+
+ self.frameTable.append(Frame(
+ string_id=string_id,
+ relevantForJS=False,
+ innerWindowID=0,
+ implementation=None,
+ optimizations=None,
+ line=None,
+ column=None,
+ category=symbol_name_to_category,
+ subcategory=None,
+ ))
+ return frame_id
+
+ def _add_sample(self, comm: str, stack: List[str], time_ms: Milliseconds) -> None:
+ """Add a timestamped stack trace sample to the thread builder.
+ Args:
+ comm: command-line (name) of the thread at this sample
+ stack: sampled stack frames. Root first, leaf last.
+ time_ms: timestamp of sample in milliseconds.
+ """
+ # Ihreads may not set their names right after they are created.
+ # Instead, they might do it later. In such situations, to use the latest name they have set.
+ if self.comm != comm:
+ self.comm = comm
+
+ prefix_stack_id = reduce(lambda prefix_id, frame: self._intern_stack
+ (self._intern_frame(frame), prefix_id), stack, None)
+ if prefix_stack_id is not None:
+ self.samples.append(Sample(stack_id=prefix_stack_id,
+ time_ms=time_ms,
+ responsiveness=0))
+
+ def _to_json_dict(self) -> Dict:
+ """Converts current Thread to GeckoThread JSON format."""
+ # Gecko profile format is row-oriented data as List[List],
+ # And a schema for interpreting each index.
+ # Schema:
+ # https://github.com/firefox-devtools/profiler/blob/main/docs-developer/gecko-profile-format.md
+ # https://github.com/firefox-devtools/profiler/blob/53970305b51b9b472e26d7457fee1d66cd4e2737/src/types/gecko-profile.js#L230
+ return {
+ "tid": self.tid,
+ "pid": self.pid,
+ "name": self.comm,
+ # https://github.com/firefox-devtools/profiler/blob/53970305b51b9b472e26d7457fee1d66cd4e2737/src/types/gecko-profile.js#L51
+ "markers": {
+ "schema": {
+ "name": 0,
+ "startTime": 1,
+ "endTime": 2,
+ "phase": 3,
+ "category": 4,
+ "data": 5,
+ },
+ "data": [],
+ },
+
+ # https://github.com/firefox-devtools/profiler/blob/53970305b51b9b472e26d7457fee1d66cd4e2737/src/types/gecko-profile.js#L90
+ "samples": {
+ "schema": {
+ "stack": 0,
+ "time": 1,
+ "responsiveness": 2,
+ },
+ "data": self.samples
+ },
+
+ # https://github.com/firefox-devtools/profiler/blob/53970305b51b9b472e26d7457fee1d66cd4e2737/src/types/gecko-profile.js#L156
+ "frameTable": {
+ "schema": {
+ "location": 0,
+ "relevantForJS": 1,
+ "innerWindowID": 2,
+ "implementation": 3,
+ "optimizations": 4,
+ "line": 5,
+ "column": 6,
+ "category": 7,
+ "subcategory": 8,
+ },
+ "data": self.frameTable,
+ },
+
+ # https://github.com/firefox-devtools/profiler/blob/53970305b51b9b472e26d7457fee1d66cd4e2737/src/types/gecko-profile.js#L216
+ "stackTable": {
+ "schema": {
+ "prefix": 0,
+ "frame": 1,
+ },
+ "data": self.stackTable,
+ },
+ "stringTable": self.stringTable,
+ "registerTime": 0,
+ "unregisterTime": None,
+ "processType": "default",
+ }
+
+# Uses perf script python interface to parse each
+# event and store the data in the thread builder.
+def process_event(param_dict: Dict) -> None:
+ global start_time
+ global tid_to_thread
+ time_stamp = (param_dict['sample']['time'] // 1000) / 1000
+ pid = param_dict['sample']['pid']
+ tid = param_dict['sample']['tid']
+ comm = param_dict['comm']
+
+ # Start time is the time of the first sample
+ if not start_time:
+ start_time = time_stamp
+
+ # Parse and append the callchain of the current sample into a stack.
+ stack = []
+ if param_dict['callchain']:
+ for call in param_dict['callchain']:
+ if 'sym' not in call:
+ continue
+ stack.append(f'{call["sym"]["name"]} (in {call["dso"]})')
+ if len(stack) != 0:
+ # Reverse the stack, as root come first and the leaf at the end.
+ stack = stack[::-1]
+
+ # During perf record if -g is not used, the callchain is not available.
+ # In that case, the symbol and dso are available in the event parameters.
+ else:
+ func = param_dict['symbol'] if 'symbol' in param_dict else '[unknown]'
+ dso = param_dict['dso'] if 'dso' in param_dict else '[unknown]'
+ stack.append(f'{func} (in {dso})')
+
+ # Add sample to the specific thread.
+ thread = tid_to_thread.get(tid)
+ if thread is None:
+ thread = Thread(comm=comm, pid=pid, tid=tid)
+ tid_to_thread[tid] = thread
+ thread._add_sample(comm=comm, stack=stack, time_ms=time_stamp)
+
+def trace_begin() -> None:
+ global output_file
+ if (output_file is None):
+ print("Staring Firefox Profiler on your default browser...")
+ global http_server_thread
+ http_server_thread = threading.Thread(target=test, args=(CORSRequestHandler, HTTPServer,))
+ http_server_thread.daemon = True
+ http_server_thread.start()
+
+# Trace_end runs at the end and will be used to aggregate
+# the data into the final json object and print it out to stdout.
+def trace_end() -> None:
+ global output_file
+ threads = [thread._to_json_dict() for thread in tid_to_thread.values()]
+
+ # Schema: https://github.com/firefox-devtools/profiler/blob/53970305b51b9b472e26d7457fee1d66cd4e2737/src/types/gecko-profile.js#L305
+ gecko_profile_with_meta = {
+ "meta": {
+ "interval": 1,
+ "processType": 0,
+ "product": PRODUCT,
+ "stackwalk": 1,
+ "debug": 0,
+ "gcpoison": 0,
+ "asyncstack": 1,
+ "startTime": start_time,
+ "shutdownTime": None,
+ "version": 24,
+ "presymbolicated": True,
+ "categories": CATEGORIES,
+ "markerSchema": [],
+ },
+ "libs": [],
+ "threads": threads,
+ "processes": [],
+ "pausedRanges": [],
+ }
+ # launch the profiler on local host if not specified --save-only args, otherwise print to file
+ if (output_file is None):
+ output_file = 'gecko_profile.json'
+ with open(output_file, 'w') as f:
+ json.dump(gecko_profile_with_meta, f, indent=2)
+ launchFirefox(output_file)
+ time.sleep(1)
+ print(f'[ perf gecko: Captured and wrote into {output_file} ]')
+ else:
+ print(f'[ perf gecko: Captured and wrote into {output_file} ]')
+ with open(output_file, 'w') as f:
+ json.dump(gecko_profile_with_meta, f, indent=2)
+
+# Used to enable Cross-Origin Resource Sharing (CORS) for requests coming from 'https://profiler.firefox.com', allowing it to access resources from this server.
+class CORSRequestHandler(SimpleHTTPRequestHandler):
+ def end_headers (self):
+ self.send_header('Access-Control-Allow-Origin', 'https://profiler.firefox.com')
+ SimpleHTTPRequestHandler.end_headers(self)
+
+# start a local server to serve the gecko_profile.json file to the profiler.firefox.com
+def launchFirefox(file):
+ safe_string = urllib.parse.quote_plus(f'http://localhost:8000/{file}')
+ url = 'https://profiler.firefox.com/from-url/' + safe_string
+ webbrowser.open(f'{url}')
+
+def main() -> None:
+ global output_file
+ global CATEGORIES
+ parser = argparse.ArgumentParser(description="Convert perf.data to Firefox\'s Gecko Profile format which can be uploaded to profiler.firefox.com for visualization")
+
+ # Add the command-line options
+ # Colors must be defined according to this:
+ # https://github.com/firefox-devtools/profiler/blob/50124adbfa488adba6e2674a8f2618cf34b59cd2/res/css/categories.css
+ parser.add_argument('--user-color', default='yellow', help='Color for the User category', choices=['yellow', 'blue', 'purple', 'green', 'orange', 'red', 'grey', 'magenta'])
+ parser.add_argument('--kernel-color', default='orange', help='Color for the Kernel category', choices=['yellow', 'blue', 'purple', 'green', 'orange', 'red', 'grey', 'magenta'])
+ # If --save-only is specified, the output will be saved to a file instead of opening Firefox's profiler directly.
+ parser.add_argument('--save-only', help='Save the output to a file instead of opening Firefox\'s profiler')
+
+ # Parse the command-line arguments
+ args = parser.parse_args()
+ # Access the values provided by the user
+ user_color = args.user_color
+ kernel_color = args.kernel_color
+ output_file = args.save_only
+
+ CATEGORIES = [
+ {
+ "name": 'User',
+ "color": user_color,
+ "subcategories": ['Other']
+ },
+ {
+ "name": 'Kernel',
+ "color": kernel_color,
+ "subcategories": ['Other']
+ },
+ ]
+
+if __name__ == '__main__':
+ main()
diff --git a/tools/perf/tests/.gitignore b/tools/perf/tests/.gitignore
deleted file mode 100644
index d053b325f728..000000000000
--- a/tools/perf/tests/.gitignore
+++ /dev/null
@@ -1,5 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-llvm-src-base.c
-llvm-src-kbuild.c
-llvm-src-prologue.c
-llvm-src-relocation.c
diff --git a/tools/perf/tests/Build b/tools/perf/tests/Build
index fb9ac5dc4079..63d5e6d5f165 100644
--- a/tools/perf/tests/Build
+++ b/tools/perf/tests/Build
@@ -37,8 +37,6 @@ perf-y += sample-parsing.o
perf-y += parse-no-sample-id-all.o
perf-y += kmod-path.o
perf-y += thread-map.o
-perf-y += llvm.o llvm-src-base.o llvm-src-kbuild.o llvm-src-prologue.o llvm-src-relocation.o
-perf-y += bpf.o
perf-y += topology.o
perf-y += mem.o
perf-y += cpumap.o
@@ -51,7 +49,6 @@ perf-y += sdt.o
perf-y += is_printable_array.o
perf-y += bitmap.o
perf-y += perf-hooks.o
-perf-y += clang.o
perf-y += unit_number__scnprintf.o
perf-y += mem2node.o
perf-y += maps.o
@@ -70,34 +67,6 @@ perf-y += sigtrap.o
perf-y += event_groups.o
perf-y += symbols.o
-$(OUTPUT)tests/llvm-src-base.c: tests/bpf-script-example.c tests/Build
- $(call rule_mkdir)
- $(Q)echo '#include <tests/llvm.h>' > $@
- $(Q)echo 'const char test_llvm__bpf_base_prog[] =' >> $@
- $(Q)sed -e 's/"/\\"/g' -e 's/\(.*\)/"\1\\n"/g' $< >> $@
- $(Q)echo ';' >> $@
-
-$(OUTPUT)tests/llvm-src-kbuild.c: tests/bpf-script-test-kbuild.c tests/Build
- $(call rule_mkdir)
- $(Q)echo '#include <tests/llvm.h>' > $@
- $(Q)echo 'const char test_llvm__bpf_test_kbuild_prog[] =' >> $@
- $(Q)sed -e 's/"/\\"/g' -e 's/\(.*\)/"\1\\n"/g' $< >> $@
- $(Q)echo ';' >> $@
-
-$(OUTPUT)tests/llvm-src-prologue.c: tests/bpf-script-test-prologue.c tests/Build
- $(call rule_mkdir)
- $(Q)echo '#include <tests/llvm.h>' > $@
- $(Q)echo 'const char test_llvm__bpf_test_prologue_prog[] =' >> $@
- $(Q)sed -e 's/"/\\"/g' -e 's/\(.*\)/"\1\\n"/g' $< >> $@
- $(Q)echo ';' >> $@
-
-$(OUTPUT)tests/llvm-src-relocation.c: tests/bpf-script-test-relocation.c tests/Build
- $(call rule_mkdir)
- $(Q)echo '#include <tests/llvm.h>' > $@
- $(Q)echo 'const char test_llvm__bpf_test_relocation[] =' >> $@
- $(Q)sed -e 's/"/\\"/g' -e 's/\(.*\)/"\1\\n"/g' $< >> $@
- $(Q)echo ';' >> $@
-
ifeq ($(SRCARCH),$(filter $(SRCARCH),x86 arm arm64 powerpc))
perf-$(CONFIG_DWARF_UNWIND) += dwarf-unwind.o
endif
diff --git a/tools/perf/tests/bpf-script-example.c b/tools/perf/tests/bpf-script-example.c
deleted file mode 100644
index b638cc99d5ae..000000000000
--- a/tools/perf/tests/bpf-script-example.c
+++ /dev/null
@@ -1,60 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * bpf-script-example.c
- * Test basic LLVM building
- */
-#ifndef LINUX_VERSION_CODE
-# error Need LINUX_VERSION_CODE
-# error Example: for 4.2 kernel, put 'clang-opt="-DLINUX_VERSION_CODE=0x40200" into llvm section of ~/.perfconfig'
-#endif
-#define BPF_ANY 0
-#define BPF_MAP_TYPE_ARRAY 2
-#define BPF_FUNC_map_lookup_elem 1
-#define BPF_FUNC_map_update_elem 2
-
-static void *(*bpf_map_lookup_elem)(void *map, void *key) =
- (void *) BPF_FUNC_map_lookup_elem;
-static void *(*bpf_map_update_elem)(void *map, void *key, void *value, int flags) =
- (void *) BPF_FUNC_map_update_elem;
-
-/*
- * Following macros are taken from tools/lib/bpf/bpf_helpers.h,
- * and are used to create BTF defined maps. It is easier to take
- * 2 simple macros, than being able to include above header in
- * runtime.
- *
- * __uint - defines integer attribute of BTF map definition,
- * Such attributes are represented using a pointer to an array,
- * in which dimensionality of array encodes specified integer
- * value.
- *
- * __type - defines pointer variable with typeof(val) type for
- * attributes like key or value, which will be defined by the
- * size of the type.
- */
-#define __uint(name, val) int (*name)[val]
-#define __type(name, val) typeof(val) *name
-
-#define SEC(NAME) __attribute__((section(NAME), used))
-struct {
- __uint(type, BPF_MAP_TYPE_ARRAY);
- __uint(max_entries, 1);
- __type(key, int);
- __type(value, int);
-} flip_table SEC(".maps");
-
-SEC("syscalls:sys_enter_epoll_pwait")
-int bpf_func__SyS_epoll_pwait(void *ctx)
-{
- int ind =0;
- int *flag = bpf_map_lookup_elem(&flip_table, &ind);
- int new_flag;
- if (!flag)
- return 0;
- /* flip flag and store back */
- new_flag = !*flag;
- bpf_map_update_elem(&flip_table, &ind, &new_flag, BPF_ANY);
- return new_flag;
-}
-char _license[] SEC("license") = "GPL";
-int _version SEC("version") = LINUX_VERSION_CODE;
diff --git a/tools/perf/tests/bpf-script-test-kbuild.c b/tools/perf/tests/bpf-script-test-kbuild.c
deleted file mode 100644
index 219673aa278f..000000000000
--- a/tools/perf/tests/bpf-script-test-kbuild.c
+++ /dev/null
@@ -1,21 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * bpf-script-test-kbuild.c
- * Test include from kernel header
- */
-#ifndef LINUX_VERSION_CODE
-# error Need LINUX_VERSION_CODE
-# error Example: for 4.2 kernel, put 'clang-opt="-DLINUX_VERSION_CODE=0x40200" into llvm section of ~/.perfconfig'
-#endif
-#define SEC(NAME) __attribute__((section(NAME), used))
-
-#include <uapi/linux/fs.h>
-
-SEC("func=vfs_llseek")
-int bpf_func__vfs_llseek(void *ctx)
-{
- return 0;
-}
-
-char _license[] SEC("license") = "GPL";
-int _version SEC("version") = LINUX_VERSION_CODE;
diff --git a/tools/perf/tests/bpf-script-test-prologue.c b/tools/perf/tests/bpf-script-test-prologue.c
deleted file mode 100644
index 91778b5c6125..000000000000
--- a/tools/perf/tests/bpf-script-test-prologue.c
+++ /dev/null
@@ -1,49 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * bpf-script-test-prologue.c
- * Test BPF prologue
- */
-#ifndef LINUX_VERSION_CODE
-# error Need LINUX_VERSION_CODE
-# error Example: for 4.2 kernel, put 'clang-opt="-DLINUX_VERSION_CODE=0x40200" into llvm section of ~/.perfconfig'
-#endif
-#define SEC(NAME) __attribute__((section(NAME), used))
-
-#include <uapi/linux/fs.h>
-
-/*
- * If CONFIG_PROFILE_ALL_BRANCHES is selected,
- * 'if' is redefined after include kernel header.
- * Recover 'if' for BPF object code.
- */
-#ifdef if
-# undef if
-#endif
-
-typedef unsigned int __bitwise fmode_t;
-
-#define FMODE_READ 0x1
-#define FMODE_WRITE 0x2
-
-static void (*bpf_trace_printk)(const char *fmt, int fmt_size, ...) =
- (void *) 6;
-
-SEC("func=null_lseek file->f_mode offset orig")
-int bpf_func__null_lseek(void *ctx, int err, unsigned long _f_mode,
- unsigned long offset, unsigned long orig)
-{
- fmode_t f_mode = (fmode_t)_f_mode;
-
- if (err)
- return 0;
- if (f_mode & FMODE_WRITE)
- return 0;
- if (offset & 1)
- return 0;
- if (orig == SEEK_CUR)
- return 0;
- return 1;
-}
-
-char _license[] SEC("license") = "GPL";
-int _version SEC("version") = LINUX_VERSION_CODE;
diff --git a/tools/perf/tests/bpf-script-test-relocation.c b/tools/perf/tests/bpf-script-test-relocation.c
deleted file mode 100644
index 74006e4b2d24..000000000000
--- a/tools/perf/tests/bpf-script-test-relocation.c
+++ /dev/null
@@ -1,51 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * bpf-script-test-relocation.c
- * Test BPF loader checking relocation
- */
-#ifndef LINUX_VERSION_CODE
-# error Need LINUX_VERSION_CODE
-# error Example: for 4.2 kernel, put 'clang-opt="-DLINUX_VERSION_CODE=0x40200" into llvm section of ~/.perfconfig'
-#endif
-#define BPF_ANY 0
-#define BPF_MAP_TYPE_ARRAY 2
-#define BPF_FUNC_map_lookup_elem 1
-#define BPF_FUNC_map_update_elem 2
-
-static void *(*bpf_map_lookup_elem)(void *map, void *key) =
- (void *) BPF_FUNC_map_lookup_elem;
-static void *(*bpf_map_update_elem)(void *map, void *key, void *value, int flags) =
- (void *) BPF_FUNC_map_update_elem;
-
-struct bpf_map_def {
- unsigned int type;
- unsigned int key_size;
- unsigned int value_size;
- unsigned int max_entries;
-};
-
-#define SEC(NAME) __attribute__((section(NAME), used))
-struct bpf_map_def SEC("maps") my_table = {
- .type = BPF_MAP_TYPE_ARRAY,
- .key_size = sizeof(int),
- .value_size = sizeof(int),
- .max_entries = 1,
-};
-
-int this_is_a_global_val;
-
-SEC("func=sys_write")
-int bpf_func__sys_write(void *ctx)
-{
- int key = 0;
- int value = 0;
-
- /*
- * Incorrect relocation. Should not allow this program be
- * loaded into kernel.
- */
- bpf_map_update_elem(&this_is_a_global_val, &key, &value, 0);
- return 0;
-}
-char _license[] SEC("license") = "GPL";
-int _version SEC("version") = LINUX_VERSION_CODE;
diff --git a/tools/perf/tests/bpf.c b/tools/perf/tests/bpf.c
deleted file mode 100644
index 8beb46066034..000000000000
--- a/tools/perf/tests/bpf.c
+++ /dev/null
@@ -1,389 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <errno.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <sys/epoll.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <fcntl.h>
-#include <util/record.h>
-#include <util/util.h>
-#include <util/bpf-loader.h>
-#include <util/evlist.h>
-#include <linux/filter.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <api/fs/fs.h>
-#include <perf/mmap.h>
-#include "tests.h"
-#include "llvm.h"
-#include "debug.h"
-#include "parse-events.h"
-#include "util/mmap.h"
-#define NR_ITERS 111
-#define PERF_TEST_BPF_PATH "/sys/fs/bpf/perf_test"
-
-#if defined(HAVE_LIBBPF_SUPPORT) && defined(HAVE_LIBTRACEEVENT)
-#include <linux/bpf.h>
-#include <bpf/bpf.h>
-
-static int epoll_pwait_loop(void)
-{
- int i;
-
- /* Should fail NR_ITERS times */
- for (i = 0; i < NR_ITERS; i++)
- epoll_pwait(-(i + 1), NULL, 0, 0, NULL);
- return 0;
-}
-
-#ifdef HAVE_BPF_PROLOGUE
-
-static int llseek_loop(void)
-{
- int fds[2], i;
-
- fds[0] = open("/dev/null", O_RDONLY);
- fds[1] = open("/dev/null", O_RDWR);
-
- if (fds[0] < 0 || fds[1] < 0)
- return -1;
-
- for (i = 0; i < NR_ITERS; i++) {
- lseek(fds[i % 2], i, (i / 2) % 2 ? SEEK_CUR : SEEK_SET);
- lseek(fds[(i + 1) % 2], i, (i / 2) % 2 ? SEEK_CUR : SEEK_SET);
- }
- close(fds[0]);
- close(fds[1]);
- return 0;
-}
-
-#endif
-
-static struct {
- enum test_llvm__testcase prog_id;
- const char *name;
- const char *msg_compile_fail;
- const char *msg_load_fail;
- int (*target_func)(void);
- int expect_result;
- bool pin;
-} bpf_testcase_table[] = {
- {
- .prog_id = LLVM_TESTCASE_BASE,
- .name = "[basic_bpf_test]",
- .msg_compile_fail = "fix 'perf test LLVM' first",
- .msg_load_fail = "load bpf object failed",
- .target_func = &epoll_pwait_loop,
- .expect_result = (NR_ITERS + 1) / 2,
- },
- {
- .prog_id = LLVM_TESTCASE_BASE,
- .name = "[bpf_pinning]",
- .msg_compile_fail = "fix kbuild first",
- .msg_load_fail = "check your vmlinux setting?",
- .target_func = &epoll_pwait_loop,
- .expect_result = (NR_ITERS + 1) / 2,
- .pin = true,
- },
-#ifdef HAVE_BPF_PROLOGUE
- {
- .prog_id = LLVM_TESTCASE_BPF_PROLOGUE,
- .name = "[bpf_prologue_test]",
- .msg_compile_fail = "fix kbuild first",
- .msg_load_fail = "check your vmlinux setting?",
- .target_func = &llseek_loop,
- .expect_result = (NR_ITERS + 1) / 4,
- },
-#endif
-};
-
-static int do_test(struct bpf_object *obj, int (*func)(void),
- int expect)
-{
- struct record_opts opts = {
- .target = {
- .uid = UINT_MAX,
- .uses_mmap = true,
- },
- .freq = 0,
- .mmap_pages = 256,
- .default_interval = 1,
- };
-
- char pid[16];
- char sbuf[STRERR_BUFSIZE];
- struct evlist *evlist;
- int i, ret = TEST_FAIL, err = 0, count = 0;
-
- struct parse_events_state parse_state;
- struct parse_events_error parse_error;
-
- parse_events_error__init(&parse_error);
- bzero(&parse_state, sizeof(parse_state));
- parse_state.error = &parse_error;
- INIT_LIST_HEAD(&parse_state.list);
-
- err = parse_events_load_bpf_obj(&parse_state, &parse_state.list, obj, NULL);
- parse_events_error__exit(&parse_error);
- if (err == -ENODATA) {
- pr_debug("Failed to add events selected by BPF, debuginfo package not installed\n");
- return TEST_SKIP;
- }
- if (err || list_empty(&parse_state.list)) {
- pr_debug("Failed to add events selected by BPF\n");
- return TEST_FAIL;
- }
-
- snprintf(pid, sizeof(pid), "%d", getpid());
- pid[sizeof(pid) - 1] = '\0';
- opts.target.tid = opts.target.pid = pid;
-
- /* Instead of evlist__new_default, don't add default events */
- evlist = evlist__new();
- if (!evlist) {
- pr_debug("Not enough memory to create evlist\n");
- return TEST_FAIL;
- }
-
- err = evlist__create_maps(evlist, &opts.target);
- if (err < 0) {
- pr_debug("Not enough memory to create thread/cpu maps\n");
- goto out_delete_evlist;
- }
-
- evlist__splice_list_tail(evlist, &parse_state.list);
-
- evlist__config(evlist, &opts, NULL);
-
- err = evlist__open(evlist);
- if (err < 0) {
- pr_debug("perf_evlist__open: %s\n",
- str_error_r(errno, sbuf, sizeof(sbuf)));
- goto out_delete_evlist;
- }
-
- err = evlist__mmap(evlist, opts.mmap_pages);
- if (err < 0) {
- pr_debug("evlist__mmap: %s\n",
- str_error_r(errno, sbuf, sizeof(sbuf)));
- goto out_delete_evlist;
- }
-
- evlist__enable(evlist);
- (*func)();
- evlist__disable(evlist);
-
- for (i = 0; i < evlist->core.nr_mmaps; i++) {
- union perf_event *event;
- struct mmap *md;
-
- md = &evlist->mmap[i];
- if (perf_mmap__read_init(&md->core) < 0)
- continue;
-
- while ((event = perf_mmap__read_event(&md->core)) != NULL) {
- const u32 type = event->header.type;
-
- if (type == PERF_RECORD_SAMPLE)
- count ++;
- }
- perf_mmap__read_done(&md->core);
- }
-
- if (count != expect * evlist->core.nr_entries) {
- pr_debug("BPF filter result incorrect, expected %d, got %d samples\n", expect * evlist->core.nr_entries, count);
- goto out_delete_evlist;
- }
-
- ret = TEST_OK;
-
-out_delete_evlist:
- evlist__delete(evlist);
- return ret;
-}
-
-static struct bpf_object *
-prepare_bpf(void *obj_buf, size_t obj_buf_sz, const char *name)
-{
- struct bpf_object *obj;
-
- obj = bpf__prepare_load_buffer(obj_buf, obj_buf_sz, name);
- if (IS_ERR(obj)) {
- pr_debug("Compile BPF program failed.\n");
- return NULL;
- }
- return obj;
-}
-
-static int __test__bpf(int idx)
-{
- int ret;
- void *obj_buf;
- size_t obj_buf_sz;
- struct bpf_object *obj;
-
- ret = test_llvm__fetch_bpf_obj(&obj_buf, &obj_buf_sz,
- bpf_testcase_table[idx].prog_id,
- false, NULL);
- if (ret != TEST_OK || !obj_buf || !obj_buf_sz) {
- pr_debug("Unable to get BPF object, %s\n",
- bpf_testcase_table[idx].msg_compile_fail);
- if ((idx == 0) || (ret == TEST_SKIP))
- return TEST_SKIP;
- else
- return TEST_FAIL;
- }
-
- obj = prepare_bpf(obj_buf, obj_buf_sz,
- bpf_testcase_table[idx].name);
- if ((!!bpf_testcase_table[idx].target_func) != (!!obj)) {
- if (!obj)
- pr_debug("Fail to load BPF object: %s\n",
- bpf_testcase_table[idx].msg_load_fail);
- else
- pr_debug("Success unexpectedly: %s\n",
- bpf_testcase_table[idx].msg_load_fail);
- ret = TEST_FAIL;
- goto out;
- }
-
- if (obj) {
- ret = do_test(obj,
- bpf_testcase_table[idx].target_func,
- bpf_testcase_table[idx].expect_result);
- if (ret != TEST_OK)
- goto out;
- if (bpf_testcase_table[idx].pin) {
- int err;
-
- if (!bpf_fs__mount()) {
- pr_debug("BPF filesystem not mounted\n");
- ret = TEST_FAIL;
- goto out;
- }
- err = mkdir(PERF_TEST_BPF_PATH, 0777);
- if (err && errno != EEXIST) {
- pr_debug("Failed to make perf_test dir: %s\n",
- strerror(errno));
- ret = TEST_FAIL;
- goto out;
- }
- if (bpf_object__pin(obj, PERF_TEST_BPF_PATH))
- ret = TEST_FAIL;
- if (rm_rf(PERF_TEST_BPF_PATH))
- ret = TEST_FAIL;
- }
- }
-
-out:
- free(obj_buf);
- bpf__clear();
- return ret;
-}
-
-static int check_env(void)
-{
- LIBBPF_OPTS(bpf_prog_load_opts, opts);
- int err;
- char license[] = "GPL";
-
- struct bpf_insn insns[] = {
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- };
-
- err = fetch_kernel_version(&opts.kern_version, NULL, 0);
- if (err) {
- pr_debug("Unable to get kernel version\n");
- return err;
- }
- err = bpf_prog_load(BPF_PROG_TYPE_KPROBE, NULL, license, insns,
- ARRAY_SIZE(insns), &opts);
- if (err < 0) {
- pr_err("Missing basic BPF support, skip this test: %s\n",
- strerror(errno));
- return err;
- }
- close(err);
-
- return 0;
-}
-
-static int test__bpf(int i)
-{
- int err;
-
- if (i < 0 || i >= (int)ARRAY_SIZE(bpf_testcase_table))
- return TEST_FAIL;
-
- if (geteuid() != 0) {
- pr_debug("Only root can run BPF test\n");
- return TEST_SKIP;
- }
-
- if (check_env())
- return TEST_SKIP;
-
- err = __test__bpf(i);
- return err;
-}
-#endif
-
-static int test__basic_bpf_test(struct test_suite *test __maybe_unused,
- int subtest __maybe_unused)
-{
-#if defined(HAVE_LIBBPF_SUPPORT) && defined(HAVE_LIBTRACEEVENT)
- return test__bpf(0);
-#else
- pr_debug("Skip BPF test because BPF or libtraceevent support is not compiled\n");
- return TEST_SKIP;
-#endif
-}
-
-static int test__bpf_pinning(struct test_suite *test __maybe_unused,
- int subtest __maybe_unused)
-{
-#if defined(HAVE_LIBBPF_SUPPORT) && defined(HAVE_LIBTRACEEVENT)
- return test__bpf(1);
-#else
- pr_debug("Skip BPF test because BPF or libtraceevent support is not compiled\n");
- return TEST_SKIP;
-#endif
-}
-
-static int test__bpf_prologue_test(struct test_suite *test __maybe_unused,
- int subtest __maybe_unused)
-{
-#if defined(HAVE_LIBBPF_SUPPORT) && defined(HAVE_BPF_PROLOGUE) && defined(HAVE_LIBTRACEEVENT)
- return test__bpf(2);
-#else
- pr_debug("Skip BPF test because BPF or libtraceevent support is not compiled\n");
- return TEST_SKIP;
-#endif
-}
-
-
-static struct test_case bpf_tests[] = {
-#if defined(HAVE_LIBBPF_SUPPORT) && defined(HAVE_LIBTRACEEVENT)
- TEST_CASE("Basic BPF filtering", basic_bpf_test),
- TEST_CASE_REASON("BPF pinning", bpf_pinning,
- "clang isn't installed or environment missing BPF support"),
-#ifdef HAVE_BPF_PROLOGUE
- TEST_CASE_REASON("BPF prologue generation", bpf_prologue_test,
- "clang/debuginfo isn't installed or environment missing BPF support"),
-#else
- TEST_CASE_REASON("BPF prologue generation", bpf_prologue_test, "not compiled in"),
-#endif
-#else
- TEST_CASE_REASON("Basic BPF filtering", basic_bpf_test, "not compiled in or missing libtraceevent support"),
- TEST_CASE_REASON("BPF pinning", bpf_pinning, "not compiled in or missing libtraceevent support"),
- TEST_CASE_REASON("BPF prologue generation", bpf_prologue_test, "not compiled in or missing libtraceevent support"),
-#endif
- { .name = NULL, }
-};
-
-struct test_suite suite__bpf = {
- .desc = "BPF filter",
- .test_cases = bpf_tests,
-};
diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c
index 1f6557ce3b0a..0ad18cf6dd22 100644
--- a/tools/perf/tests/builtin-test.c
+++ b/tools/perf/tests/builtin-test.c
@@ -33,9 +33,18 @@
static bool dont_fork;
const char *dso_to_test;
-struct test_suite *__weak arch_tests[] = {
+/*
+ * List of architecture specific tests. Not a weak symbol as the array length is
+ * dependent on the initialization, as such GCC with LTO complains of
+ * conflicting definitions with a weak symbol.
+ */
+#if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__) || defined(__powerpc64__)
+extern struct test_suite *arch_tests[];
+#else
+static struct test_suite *arch_tests[] = {
NULL,
};
+#endif
static struct test_suite *generic_tests[] = {
&suite__vmlinux_matches_kallsyms,
@@ -83,9 +92,7 @@ static struct test_suite *generic_tests[] = {
&suite__fdarray__add,
&suite__kmod_path__parse,
&suite__thread_map,
- &suite__llvm,
&suite__session_topology,
- &suite__bpf,
&suite__thread_map_synthesize,
&suite__thread_map_remove,
&suite__cpu_map,
@@ -99,7 +106,6 @@ static struct test_suite *generic_tests[] = {
&suite__is_printable_array,
&suite__bitmap_print,
&suite__perf_hooks,
- &suite__clang,
&suite__unit_number__scnprint,
&suite__mem2node,
&suite__time_utils,
diff --git a/tools/perf/tests/clang.c b/tools/perf/tests/clang.c
deleted file mode 100644
index a7111005d5b9..000000000000
--- a/tools/perf/tests/clang.c
+++ /dev/null
@@ -1,32 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include "tests.h"
-#include "c++/clang-c.h"
-#include <linux/kernel.h>
-
-#ifndef HAVE_LIBCLANGLLVM_SUPPORT
-static int test__clang_to_IR(struct test_suite *test __maybe_unused,
- int subtest __maybe_unused)
-{
- return TEST_SKIP;
-}
-
-static int test__clang_to_obj(struct test_suite *test __maybe_unused,
- int subtest __maybe_unused)
-{
- return TEST_SKIP;
-}
-#endif
-
-static struct test_case clang_tests[] = {
- TEST_CASE_REASON("builtin clang compile C source to IR", clang_to_IR,
- "not compiled in"),
- TEST_CASE_REASON("builtin clang compile C source to ELF object",
- clang_to_obj,
- "not compiled in"),
- { .name = NULL, }
-};
-
-struct test_suite suite__clang = {
- .desc = "builtin clang support",
- .test_cases = clang_tests,
-};
diff --git a/tools/perf/tests/config-fragments/README b/tools/perf/tests/config-fragments/README
new file mode 100644
index 000000000000..fe7de5d93674
--- /dev/null
+++ b/tools/perf/tests/config-fragments/README
@@ -0,0 +1,7 @@
+This folder is for kernel config fragments that can be merged with
+defconfig to give full test coverage of a perf test run. This is only
+an optimistic set as some features require hardware support in order to
+pass and not skip.
+
+'config' is shared across all platforms, and for arch specific files,
+the file name should match that used in the ARCH=... make option.
diff --git a/tools/perf/tests/config-fragments/arm64 b/tools/perf/tests/config-fragments/arm64
new file mode 100644
index 000000000000..64c4ab17cd58
--- /dev/null
+++ b/tools/perf/tests/config-fragments/arm64
@@ -0,0 +1 @@
+CONFIG_CORESIGHT_SOURCE_ETM4X=y
diff --git a/tools/perf/tests/config-fragments/config b/tools/perf/tests/config-fragments/config
new file mode 100644
index 000000000000..c340b3195fca
--- /dev/null
+++ b/tools/perf/tests/config-fragments/config
@@ -0,0 +1,11 @@
+CONFIG_TRACEPOINTS=y
+CONFIG_STACKTRACE=y
+CONFIG_NOP_TRACER=y
+CONFIG_RING_BUFFER=y
+CONFIG_EVENT_TRACING=y
+CONFIG_CONTEXT_SWITCH_TRACER=y
+CONFIG_TRACING=y
+CONFIG_GENERIC_TRACER=y
+CONFIG_FTRACE=y
+CONFIG_FTRACE_SYSCALLS=y
+CONFIG_BRANCH_PROFILE_NONE=y
diff --git a/tools/perf/tests/dlfilter-test.c b/tools/perf/tests/dlfilter-test.c
index 086fd2179e41..da3a9b50b1b1 100644
--- a/tools/perf/tests/dlfilter-test.c
+++ b/tools/perf/tests/dlfilter-test.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Test dlfilter C API. A perf.data file is synthesized and then processed
- * by perf script with a dlfilter named dlfilter-test-api-v0.so. Also a C file
+ * by perf script with dlfilters named dlfilter-test-api-v*.so. Also a C file
* is compiled to provide a dso to match the synthesized perf.data file.
*/
@@ -37,6 +37,8 @@
#define MAP_START 0x400000
+#define DLFILTER_TEST_NAME_MAX 128
+
struct test_data {
struct perf_tool tool;
struct machine *machine;
@@ -45,6 +47,8 @@ struct test_data {
u64 bar;
u64 ip;
u64 addr;
+ char name[DLFILTER_TEST_NAME_MAX];
+ char desc[DLFILTER_TEST_NAME_MAX];
char perf[PATH_MAX];
char perf_data_file_name[PATH_MAX];
char c_file_name[PATH_MAX];
@@ -215,7 +219,7 @@ static int write_prog(char *file_name)
return err ? -1 : 0;
}
-static int get_dlfilters_path(char *buf, size_t sz)
+static int get_dlfilters_path(const char *name, char *buf, size_t sz)
{
char perf[PATH_MAX];
char path[PATH_MAX];
@@ -224,12 +228,12 @@ static int get_dlfilters_path(char *buf, size_t sz)
perf_exe(perf, sizeof(perf));
perf_path = dirname(perf);
- snprintf(path, sizeof(path), "%s/dlfilters/dlfilter-test-api-v0.so", perf_path);
+ snprintf(path, sizeof(path), "%s/dlfilters/%s", perf_path, name);
if (access(path, R_OK)) {
exec_path = get_argv_exec_path();
if (!exec_path)
return -1;
- snprintf(path, sizeof(path), "%s/dlfilters/dlfilter-test-api-v0.so", exec_path);
+ snprintf(path, sizeof(path), "%s/dlfilters/%s", exec_path, name);
free(exec_path);
if (access(path, R_OK))
return -1;
@@ -244,9 +248,9 @@ static int check_filter_desc(struct test_data *td)
char *desc = NULL;
int ret;
- if (get_filter_desc(td->dlfilters, "dlfilter-test-api-v0.so", &desc, &long_desc) &&
+ if (get_filter_desc(td->dlfilters, td->name, &desc, &long_desc) &&
long_desc && !strcmp(long_desc, "Filter used by the 'dlfilter C API' perf test") &&
- desc && !strcmp(desc, "dlfilter to test v0 C API"))
+ desc && !strcmp(desc, td->desc))
ret = 0;
else
ret = -1;
@@ -284,7 +288,7 @@ static int get_ip_addr(struct test_data *td)
static int do_run_perf_script(struct test_data *td, int do_early)
{
return system_cmd("%s script -i %s "
- "--dlfilter %s/dlfilter-test-api-v0.so "
+ "--dlfilter %s/%s "
"--dlarg first "
"--dlarg %d "
"--dlarg %" PRIu64 " "
@@ -292,7 +296,7 @@ static int do_run_perf_script(struct test_data *td, int do_early)
"--dlarg %d "
"--dlarg last",
td->perf, td->perf_data_file_name, td->dlfilters,
- verbose, td->ip, td->addr, do_early);
+ td->name, verbose, td->ip, td->addr, do_early);
}
static int run_perf_script(struct test_data *td)
@@ -321,7 +325,7 @@ static int test__dlfilter_test(struct test_data *td)
u64 id = 99;
int err;
- if (get_dlfilters_path(td->dlfilters, PATH_MAX))
+ if (get_dlfilters_path(td->name, td->dlfilters, PATH_MAX))
return test_result("dlfilters not found", TEST_SKIP);
if (check_filter_desc(td))
@@ -399,14 +403,18 @@ static void test_data__free(struct test_data *td)
}
}
-static int test__dlfilter(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
+static int test__dlfilter_ver(int ver)
{
struct test_data td = {.fd = -1};
int pid = getpid();
int err;
+ pr_debug("\n-- Testing version %d API --\n", ver);
+
perf_exe(td.perf, sizeof(td.perf));
+ snprintf(td.name, sizeof(td.name), "dlfilter-test-api-v%d.so", ver);
+ snprintf(td.desc, sizeof(td.desc), "dlfilter to test v%d C API", ver);
snprintf(td.perf_data_file_name, PATH_MAX, "/tmp/dlfilter-test-%u-perf-data", pid);
snprintf(td.c_file_name, PATH_MAX, "/tmp/dlfilter-test-%u-prog.c", pid);
snprintf(td.prog_file_name, PATH_MAX, "/tmp/dlfilter-test-%u-prog", pid);
@@ -416,4 +424,14 @@ static int test__dlfilter(struct test_suite *test __maybe_unused, int subtest __
return err;
}
+static int test__dlfilter(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
+{
+ int err = test__dlfilter_ver(0);
+
+ if (err)
+ return err;
+ /* No test for version 1 */
+ return test__dlfilter_ver(2);
+}
+
DEFINE_SUITE("dlfilter C API", dlfilter);
diff --git a/tools/perf/tests/expr.c b/tools/perf/tests/expr.c
index c1c3fcbc2753..81229fa4f1e9 100644
--- a/tools/perf/tests/expr.c
+++ b/tools/perf/tests/expr.c
@@ -70,7 +70,7 @@ static int test__expr(struct test_suite *t __maybe_unused, int subtest __maybe_u
{
struct expr_id_data *val_ptr;
const char *p;
- double val, num_cpus, num_cores, num_dies, num_packages;
+ double val, num_cpus_online, num_cpus, num_cores, num_dies, num_packages;
int ret;
struct expr_parse_ctx *ctx;
bool is_intel = false;
@@ -227,7 +227,10 @@ static int test__expr(struct test_suite *t __maybe_unused, int subtest __maybe_u
/* Test toplogy constants appear well ordered. */
expr__ctx_clear(ctx);
+ TEST_ASSERT_VAL("#num_cpus_online",
+ expr__parse(&num_cpus_online, ctx, "#num_cpus_online") == 0);
TEST_ASSERT_VAL("#num_cpus", expr__parse(&num_cpus, ctx, "#num_cpus") == 0);
+ TEST_ASSERT_VAL("#num_cpus >= #num_cpus_online", num_cpus >= num_cpus_online);
TEST_ASSERT_VAL("#num_cores", expr__parse(&num_cores, ctx, "#num_cores") == 0);
TEST_ASSERT_VAL("#num_cpus >= #num_cores", num_cpus >= num_cores);
TEST_ASSERT_VAL("#num_dies", expr__parse(&num_dies, ctx, "#num_dies") == 0);
diff --git a/tools/perf/tests/llvm.c b/tools/perf/tests/llvm.c
deleted file mode 100644
index 0bc25a56cfef..000000000000
--- a/tools/perf/tests/llvm.c
+++ /dev/null
@@ -1,219 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include "tests.h"
-#include "debug.h"
-
-#ifdef HAVE_LIBBPF_SUPPORT
-#include <bpf/libbpf.h>
-#include <util/llvm-utils.h>
-#include "llvm.h"
-static int test__bpf_parsing(void *obj_buf, size_t obj_buf_sz)
-{
- struct bpf_object *obj;
-
- obj = bpf_object__open_mem(obj_buf, obj_buf_sz, NULL);
- if (libbpf_get_error(obj))
- return TEST_FAIL;
- bpf_object__close(obj);
- return TEST_OK;
-}
-
-static struct {
- const char *source;
- const char *desc;
- bool should_load_fail;
-} bpf_source_table[__LLVM_TESTCASE_MAX] = {
- [LLVM_TESTCASE_BASE] = {
- .source = test_llvm__bpf_base_prog,
- .desc = "Basic BPF llvm compile",
- },
- [LLVM_TESTCASE_KBUILD] = {
- .source = test_llvm__bpf_test_kbuild_prog,
- .desc = "kbuild searching",
- },
- [LLVM_TESTCASE_BPF_PROLOGUE] = {
- .source = test_llvm__bpf_test_prologue_prog,
- .desc = "Compile source for BPF prologue generation",
- },
- [LLVM_TESTCASE_BPF_RELOCATION] = {
- .source = test_llvm__bpf_test_relocation,
- .desc = "Compile source for BPF relocation",
- .should_load_fail = true,
- },
-};
-
-int
-test_llvm__fetch_bpf_obj(void **p_obj_buf,
- size_t *p_obj_buf_sz,
- enum test_llvm__testcase idx,
- bool force,
- bool *should_load_fail)
-{
- const char *source;
- const char *desc;
- const char *tmpl_old, *clang_opt_old;
- char *tmpl_new = NULL, *clang_opt_new = NULL;
- int err, old_verbose, ret = TEST_FAIL;
-
- if (idx >= __LLVM_TESTCASE_MAX)
- return TEST_FAIL;
-
- source = bpf_source_table[idx].source;
- desc = bpf_source_table[idx].desc;
- if (should_load_fail)
- *should_load_fail = bpf_source_table[idx].should_load_fail;
-
- /*
- * Skip this test if user's .perfconfig doesn't set [llvm] section
- * and clang is not found in $PATH
- */
- if (!force && (!llvm_param.user_set_param &&
- llvm__search_clang())) {
- pr_debug("No clang, skip this test\n");
- return TEST_SKIP;
- }
-
- /*
- * llvm is verbosity when error. Suppress all error output if
- * not 'perf test -v'.
- */
- old_verbose = verbose;
- if (verbose == 0)
- verbose = -1;
-
- *p_obj_buf = NULL;
- *p_obj_buf_sz = 0;
-
- if (!llvm_param.clang_bpf_cmd_template)
- goto out;
-
- if (!llvm_param.clang_opt)
- llvm_param.clang_opt = strdup("");
-
- err = asprintf(&tmpl_new, "echo '%s' | %s%s", source,
- llvm_param.clang_bpf_cmd_template,
- old_verbose ? "" : " 2>/dev/null");
- if (err < 0)
- goto out;
- err = asprintf(&clang_opt_new, "-xc %s", llvm_param.clang_opt);
- if (err < 0)
- goto out;
-
- tmpl_old = llvm_param.clang_bpf_cmd_template;
- llvm_param.clang_bpf_cmd_template = tmpl_new;
- clang_opt_old = llvm_param.clang_opt;
- llvm_param.clang_opt = clang_opt_new;
-
- err = llvm__compile_bpf("-", p_obj_buf, p_obj_buf_sz);
-
- llvm_param.clang_bpf_cmd_template = tmpl_old;
- llvm_param.clang_opt = clang_opt_old;
-
- verbose = old_verbose;
- if (err)
- goto out;
-
- ret = TEST_OK;
-out:
- free(tmpl_new);
- free(clang_opt_new);
- if (ret != TEST_OK)
- pr_debug("Failed to compile test case: '%s'\n", desc);
- return ret;
-}
-
-static int test__llvm(int subtest)
-{
- int ret;
- void *obj_buf = NULL;
- size_t obj_buf_sz = 0;
- bool should_load_fail = false;
-
- if ((subtest < 0) || (subtest >= __LLVM_TESTCASE_MAX))
- return TEST_FAIL;
-
- ret = test_llvm__fetch_bpf_obj(&obj_buf, &obj_buf_sz,
- subtest, false, &should_load_fail);
-
- if (ret == TEST_OK && !should_load_fail) {
- ret = test__bpf_parsing(obj_buf, obj_buf_sz);
- if (ret != TEST_OK) {
- pr_debug("Failed to parse test case '%s'\n",
- bpf_source_table[subtest].desc);
- }
- }
- free(obj_buf);
-
- return ret;
-}
-#endif //HAVE_LIBBPF_SUPPORT
-
-static int test__llvm__bpf_base_prog(struct test_suite *test __maybe_unused,
- int subtest __maybe_unused)
-{
-#ifdef HAVE_LIBBPF_SUPPORT
- return test__llvm(LLVM_TESTCASE_BASE);
-#else
- pr_debug("Skip LLVM test because BPF support is not compiled\n");
- return TEST_SKIP;
-#endif
-}
-
-static int test__llvm__bpf_test_kbuild_prog(struct test_suite *test __maybe_unused,
- int subtest __maybe_unused)
-{
-#ifdef HAVE_LIBBPF_SUPPORT
- return test__llvm(LLVM_TESTCASE_KBUILD);
-#else
- pr_debug("Skip LLVM test because BPF support is not compiled\n");
- return TEST_SKIP;
-#endif
-}
-
-static int test__llvm__bpf_test_prologue_prog(struct test_suite *test __maybe_unused,
- int subtest __maybe_unused)
-{
-#ifdef HAVE_LIBBPF_SUPPORT
- return test__llvm(LLVM_TESTCASE_BPF_PROLOGUE);
-#else
- pr_debug("Skip LLVM test because BPF support is not compiled\n");
- return TEST_SKIP;
-#endif
-}
-
-static int test__llvm__bpf_test_relocation(struct test_suite *test __maybe_unused,
- int subtest __maybe_unused)
-{
-#ifdef HAVE_LIBBPF_SUPPORT
- return test__llvm(LLVM_TESTCASE_BPF_RELOCATION);
-#else
- pr_debug("Skip LLVM test because BPF support is not compiled\n");
- return TEST_SKIP;
-#endif
-}
-
-
-static struct test_case llvm_tests[] = {
-#ifdef HAVE_LIBBPF_SUPPORT
- TEST_CASE("Basic BPF llvm compile", llvm__bpf_base_prog),
- TEST_CASE("kbuild searching", llvm__bpf_test_kbuild_prog),
- TEST_CASE("Compile source for BPF prologue generation",
- llvm__bpf_test_prologue_prog),
- TEST_CASE("Compile source for BPF relocation", llvm__bpf_test_relocation),
-#else
- TEST_CASE_REASON("Basic BPF llvm compile", llvm__bpf_base_prog, "not compiled in"),
- TEST_CASE_REASON("kbuild searching", llvm__bpf_test_kbuild_prog, "not compiled in"),
- TEST_CASE_REASON("Compile source for BPF prologue generation",
- llvm__bpf_test_prologue_prog, "not compiled in"),
- TEST_CASE_REASON("Compile source for BPF relocation",
- llvm__bpf_test_relocation, "not compiled in"),
-#endif
- { .name = NULL, }
-};
-
-struct test_suite suite__llvm = {
- .desc = "LLVM search and compile",
- .test_cases = llvm_tests,
-};
diff --git a/tools/perf/tests/llvm.h b/tools/perf/tests/llvm.h
deleted file mode 100644
index f68b0d9b8ae2..000000000000
--- a/tools/perf/tests/llvm.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef PERF_TEST_LLVM_H
-#define PERF_TEST_LLVM_H
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <stddef.h> /* for size_t */
-#include <stdbool.h> /* for bool */
-
-extern const char test_llvm__bpf_base_prog[];
-extern const char test_llvm__bpf_test_kbuild_prog[];
-extern const char test_llvm__bpf_test_prologue_prog[];
-extern const char test_llvm__bpf_test_relocation[];
-
-enum test_llvm__testcase {
- LLVM_TESTCASE_BASE,
- LLVM_TESTCASE_KBUILD,
- LLVM_TESTCASE_BPF_PROLOGUE,
- LLVM_TESTCASE_BPF_RELOCATION,
- __LLVM_TESTCASE_MAX,
-};
-
-int test_llvm__fetch_bpf_obj(void **p_obj_buf, size_t *p_obj_buf_sz,
- enum test_llvm__testcase index, bool force,
- bool *should_load_fail);
-#ifdef __cplusplus
-}
-#endif
-#endif
diff --git a/tools/perf/tests/make b/tools/perf/tests/make
index 58cf96d762d0..ea4c341f5af1 100644
--- a/tools/perf/tests/make
+++ b/tools/perf/tests/make
@@ -95,7 +95,6 @@ make_with_babeltrace:= LIBBABELTRACE=1
make_with_coresight := CORESIGHT=1
make_no_sdt := NO_SDT=1
make_no_syscall_tbl := NO_SYSCALL_TABLE=1
-make_with_clangllvm := LIBCLANGLLVM=1
make_no_libpfm4 := NO_LIBPFM4=1
make_with_gtk2 := GTK2=1
make_refcnt_check := EXTRA_CFLAGS="-DREFCNT_CHECKING=1"
diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c
index 658fb9599d95..d47f1f871164 100644
--- a/tools/perf/tests/parse-events.c
+++ b/tools/perf/tests/parse-events.c
@@ -2170,7 +2170,7 @@ static const struct evlist_test test__events[] = {
static const struct evlist_test test__events_pmu[] = {
{
- .name = "cpu/config=10,config1,config2=3,period=1000/u",
+ .name = "cpu/config=10,config1=1,config2=3,period=1000/u",
.valid = test__pmu_cpu_valid,
.check = test__checkevent_pmu,
/* 0 */
@@ -2472,7 +2472,7 @@ static int test_term(const struct terms_test *t)
INIT_LIST_HEAD(&terms);
- ret = parse_events_terms(&terms, t->str);
+ ret = parse_events_terms(&terms, t->str, /*input=*/ NULL);
if (ret) {
pr_debug("failed to parse terms '%s', err %d\n",
t->str , ret);
diff --git a/tools/perf/tests/pmu-events.c b/tools/perf/tests/pmu-events.c
index 64383fc34ef1..f5321fbdee79 100644
--- a/tools/perf/tests/pmu-events.c
+++ b/tools/perf/tests/pmu-events.c
@@ -44,6 +44,7 @@ struct perf_pmu_test_pmu {
static const struct perf_pmu_test_event bp_l1_btb_correct = {
.event = {
+ .pmu = "default_core",
.name = "bp_l1_btb_correct",
.event = "event=0x8a",
.desc = "L1 BTB Correction",
@@ -55,6 +56,7 @@ static const struct perf_pmu_test_event bp_l1_btb_correct = {
static const struct perf_pmu_test_event bp_l2_btb_correct = {
.event = {
+ .pmu = "default_core",
.name = "bp_l2_btb_correct",
.event = "event=0x8b",
.desc = "L2 BTB Correction",
@@ -66,6 +68,7 @@ static const struct perf_pmu_test_event bp_l2_btb_correct = {
static const struct perf_pmu_test_event segment_reg_loads_any = {
.event = {
+ .pmu = "default_core",
.name = "segment_reg_loads.any",
.event = "event=0x6,period=200000,umask=0x80",
.desc = "Number of segment register loads",
@@ -77,6 +80,7 @@ static const struct perf_pmu_test_event segment_reg_loads_any = {
static const struct perf_pmu_test_event dispatch_blocked_any = {
.event = {
+ .pmu = "default_core",
.name = "dispatch_blocked.any",
.event = "event=0x9,period=200000,umask=0x20",
.desc = "Memory cluster signals to block micro-op dispatch for any reason",
@@ -88,6 +92,7 @@ static const struct perf_pmu_test_event dispatch_blocked_any = {
static const struct perf_pmu_test_event eist_trans = {
.event = {
+ .pmu = "default_core",
.name = "eist_trans",
.event = "event=0x3a,period=200000,umask=0x0",
.desc = "Number of Enhanced Intel SpeedStep(R) Technology (EIST) transitions",
@@ -99,6 +104,7 @@ static const struct perf_pmu_test_event eist_trans = {
static const struct perf_pmu_test_event l3_cache_rd = {
.event = {
+ .pmu = "default_core",
.name = "l3_cache_rd",
.event = "event=0x40",
.desc = "L3 cache access, read",
@@ -123,7 +129,7 @@ static const struct perf_pmu_test_event uncore_hisi_ddrc_flux_wcmd = {
.event = {
.name = "uncore_hisi_ddrc.flux_wcmd",
.event = "event=0x2",
- .desc = "DDRC write commands. Unit: hisi_sccl,ddrc ",
+ .desc = "DDRC write commands",
.topic = "uncore",
.long_desc = "DDRC write commands",
.pmu = "hisi_sccl,ddrc",
@@ -137,7 +143,7 @@ static const struct perf_pmu_test_event unc_cbo_xsnp_response_miss_eviction = {
.event = {
.name = "unc_cbo_xsnp_response.miss_eviction",
.event = "event=0x22,umask=0x81",
- .desc = "A cross-core snoop resulted from L3 Eviction which misses in some processor core. Unit: uncore_cbox ",
+ .desc = "A cross-core snoop resulted from L3 Eviction which misses in some processor core",
.topic = "uncore",
.long_desc = "A cross-core snoop resulted from L3 Eviction which misses in some processor core",
.pmu = "uncore_cbox",
@@ -151,7 +157,7 @@ static const struct perf_pmu_test_event uncore_hyphen = {
.event = {
.name = "event-hyphen",
.event = "event=0xe0,umask=0x00",
- .desc = "UNC_CBO_HYPHEN. Unit: uncore_cbox ",
+ .desc = "UNC_CBO_HYPHEN",
.topic = "uncore",
.long_desc = "UNC_CBO_HYPHEN",
.pmu = "uncore_cbox",
@@ -165,7 +171,7 @@ static const struct perf_pmu_test_event uncore_two_hyph = {
.event = {
.name = "event-two-hyph",
.event = "event=0xc0,umask=0x00",
- .desc = "UNC_CBO_TWO_HYPH. Unit: uncore_cbox ",
+ .desc = "UNC_CBO_TWO_HYPH",
.topic = "uncore",
.long_desc = "UNC_CBO_TWO_HYPH",
.pmu = "uncore_cbox",
@@ -179,7 +185,7 @@ static const struct perf_pmu_test_event uncore_hisi_l3c_rd_hit_cpipe = {
.event = {
.name = "uncore_hisi_l3c.rd_hit_cpipe",
.event = "event=0x7",
- .desc = "Total read hits. Unit: hisi_sccl,l3c ",
+ .desc = "Total read hits",
.topic = "uncore",
.long_desc = "Total read hits",
.pmu = "hisi_sccl,l3c",
@@ -193,7 +199,7 @@ static const struct perf_pmu_test_event uncore_imc_free_running_cache_miss = {
.event = {
.name = "uncore_imc_free_running.cache_miss",
.event = "event=0x12",
- .desc = "Total cache misses. Unit: uncore_imc_free_running ",
+ .desc = "Total cache misses",
.topic = "uncore",
.long_desc = "Total cache misses",
.pmu = "uncore_imc_free_running",
@@ -207,7 +213,7 @@ static const struct perf_pmu_test_event uncore_imc_cache_hits = {
.event = {
.name = "uncore_imc.cache_hits",
.event = "event=0x34",
- .desc = "Total cache hits. Unit: uncore_imc ",
+ .desc = "Total cache hits",
.topic = "uncore",
.long_desc = "Total cache hits",
.pmu = "uncore_imc",
@@ -232,13 +238,13 @@ static const struct perf_pmu_test_event sys_ddr_pmu_write_cycles = {
.event = {
.name = "sys_ddr_pmu.write_cycles",
.event = "event=0x2b",
- .desc = "ddr write-cycles event. Unit: uncore_sys_ddr_pmu ",
+ .desc = "ddr write-cycles event",
.topic = "uncore",
.pmu = "uncore_sys_ddr_pmu",
.compat = "v8",
},
.alias_str = "event=0x2b",
- .alias_long_desc = "ddr write-cycles event. Unit: uncore_sys_ddr_pmu ",
+ .alias_long_desc = "ddr write-cycles event",
.matching_pmu = "uncore_sys_ddr_pmu",
};
@@ -246,13 +252,13 @@ static const struct perf_pmu_test_event sys_ccn_pmu_read_cycles = {
.event = {
.name = "sys_ccn_pmu.read_cycles",
.event = "config=0x2c",
- .desc = "ccn read-cycles event. Unit: uncore_sys_ccn_pmu ",
+ .desc = "ccn read-cycles event",
.topic = "uncore",
.pmu = "uncore_sys_ccn_pmu",
.compat = "0x01",
},
.alias_str = "config=0x2c",
- .alias_long_desc = "ccn read-cycles event. Unit: uncore_sys_ccn_pmu ",
+ .alias_long_desc = "ccn read-cycles event",
.matching_pmu = "uncore_sys_ccn_pmu",
};
@@ -341,7 +347,7 @@ static int compare_pmu_events(const struct pmu_event *e1, const struct pmu_event
return 0;
}
-static int compare_alias_to_test_event(struct perf_pmu_alias *alias,
+static int compare_alias_to_test_event(struct pmu_event_info *alias,
struct perf_pmu_test_event const *test_event,
char const *pmu_name)
{
@@ -385,8 +391,8 @@ static int compare_alias_to_test_event(struct perf_pmu_alias *alias,
return -1;
}
-
- if (!is_same(alias->pmu_name, test_event->event.pmu)) {
+ if (!is_same(alias->pmu_name, test_event->event.pmu) &&
+ !is_same(alias->pmu_name, "default_core")) {
pr_debug("testing aliases PMU %s: mismatched pmu_name, %s vs %s\n",
pmu_name, alias->pmu_name, test_event->event.pmu);
return -1;
@@ -403,7 +409,7 @@ static int test__pmu_event_table_core_callback(const struct pmu_event *pe,
struct perf_pmu_test_event const **test_event_table;
bool found = false;
- if (pe->pmu)
+ if (strcmp(pe->pmu, "default_core"))
test_event_table = &uncore_events[0];
else
test_event_table = &core_events[0];
@@ -477,12 +483,14 @@ static int test__pmu_event_table(struct test_suite *test __maybe_unused,
if (!table || !sys_event_table)
return -1;
- err = pmu_events_table_for_each_event(table, test__pmu_event_table_core_callback,
+ err = pmu_events_table__for_each_event(table, /*pmu=*/ NULL,
+ test__pmu_event_table_core_callback,
&map_events);
if (err)
return err;
- err = pmu_events_table_for_each_event(sys_event_table, test__pmu_event_table_sys_callback,
+ err = pmu_events_table__for_each_event(sys_event_table, /*pmu=*/ NULL,
+ test__pmu_event_table_sys_callback,
&map_events);
if (err)
return err;
@@ -496,26 +504,30 @@ static int test__pmu_event_table(struct test_suite *test __maybe_unused,
return 0;
}
-static struct perf_pmu_alias *find_alias(const char *test_event, struct list_head *aliases)
-{
- struct perf_pmu_alias *alias;
+struct test_core_pmu_event_aliases_cb_args {
+ struct perf_pmu_test_event const *test_event;
+ int *count;
+};
- list_for_each_entry(alias, aliases, list)
- if (!strcmp(test_event, alias->name))
- return alias;
+static int test_core_pmu_event_aliases_cb(void *state, struct pmu_event_info *alias)
+{
+ struct test_core_pmu_event_aliases_cb_args *args = state;
- return NULL;
+ if (compare_alias_to_test_event(alias, args->test_event, alias->pmu->name))
+ return -1;
+ (*args->count)++;
+ pr_debug2("testing aliases core PMU %s: matched event %s\n",
+ alias->pmu_name, alias->name);
+ return 0;
}
/* Verify aliases are as expected */
-static int __test_core_pmu_event_aliases(char *pmu_name, int *count)
+static int __test_core_pmu_event_aliases(const char *pmu_name, int *count)
{
struct perf_pmu_test_event const **test_event_table;
struct perf_pmu *pmu;
- LIST_HEAD(aliases);
int res = 0;
const struct pmu_events_table *table = find_core_events_table("testarch", "testcpu");
- struct perf_pmu_alias *a, *tmp;
if (!table)
return -1;
@@ -526,37 +538,40 @@ static int __test_core_pmu_event_aliases(char *pmu_name, int *count)
if (!pmu)
return -1;
- pmu->name = pmu_name;
-
- pmu_add_cpu_aliases_table(&aliases, pmu, table);
-
+ INIT_LIST_HEAD(&pmu->format);
+ INIT_LIST_HEAD(&pmu->aliases);
+ INIT_LIST_HEAD(&pmu->caps);
+ INIT_LIST_HEAD(&pmu->list);
+ pmu->name = strdup(pmu_name);
+ pmu->is_core = true;
+
+ pmu->events_table = table;
+ pmu_add_cpu_aliases_table(pmu, table);
+ pmu->cpu_aliases_added = true;
+ pmu->sysfs_aliases_loaded = true;
+
+ res = pmu_events_table__find_event(table, pmu, "bp_l1_btb_correct", NULL, NULL);
+ if (res != 0) {
+ pr_debug("Missing test event in test architecture");
+ return res;
+ }
for (; *test_event_table; test_event_table++) {
- struct perf_pmu_test_event const *test_event = *test_event_table;
- struct pmu_event const *event = &test_event->event;
- struct perf_pmu_alias *alias = find_alias(event->name, &aliases);
-
- if (!alias) {
- pr_debug("testing aliases core PMU %s: no alias, alias_table->name=%s\n",
- pmu_name, event->name);
- res = -1;
- break;
- }
-
- if (compare_alias_to_test_event(alias, test_event, pmu_name)) {
- res = -1;
- break;
- }
-
- (*count)++;
- pr_debug2("testing aliases core PMU %s: matched event %s\n",
- pmu_name, alias->name);
+ struct perf_pmu_test_event test_event = **test_event_table;
+ struct pmu_event const *event = &test_event.event;
+ struct test_core_pmu_event_aliases_cb_args args = {
+ .test_event = &test_event,
+ .count = count,
+ };
+ int err;
+
+ test_event.event.pmu = pmu_name;
+ err = perf_pmu__find_event(pmu, event->name, &args,
+ test_core_pmu_event_aliases_cb);
+ if (err)
+ res = err;
}
+ perf_pmu__delete(pmu);
- list_for_each_entry_safe(a, tmp, &aliases, list) {
- list_del(&a->list);
- perf_pmu_free_alias(a);
- }
- free(pmu);
return res;
}
@@ -566,20 +581,20 @@ static int __test_uncore_pmu_event_aliases(struct perf_pmu_test_pmu *test_pmu)
struct perf_pmu_test_event const **table;
struct perf_pmu *pmu = &test_pmu->pmu;
const char *pmu_name = pmu->name;
- struct perf_pmu_alias *a, *tmp, *alias;
const struct pmu_events_table *events_table;
- LIST_HEAD(aliases);
int res = 0;
events_table = find_core_events_table("testarch", "testcpu");
if (!events_table)
return -1;
- pmu_add_cpu_aliases_table(&aliases, pmu, events_table);
- pmu_add_sys_aliases(&aliases, pmu);
+ pmu->events_table = events_table;
+ pmu_add_cpu_aliases_table(pmu, events_table);
+ pmu->cpu_aliases_added = true;
+ pmu->sysfs_aliases_loaded = true;
+ pmu_add_sys_aliases(pmu);
/* Count how many aliases we generated */
- list_for_each_entry(alias, &aliases, list)
- alias_count++;
+ alias_count = perf_pmu__num_events(pmu);
/* Count how many aliases we expect from the known table */
for (table = &test_pmu->aliases[0]; *table; table++)
@@ -588,33 +603,25 @@ static int __test_uncore_pmu_event_aliases(struct perf_pmu_test_pmu *test_pmu)
if (alias_count != to_match_count) {
pr_debug("testing aliases uncore PMU %s: mismatch expected aliases (%d) vs found (%d)\n",
pmu_name, to_match_count, alias_count);
- res = -1;
- goto out;
+ return -1;
}
- list_for_each_entry(alias, &aliases, list) {
- bool matched = false;
-
- for (table = &test_pmu->aliases[0]; *table; table++) {
- struct perf_pmu_test_event const *test_event = *table;
- struct pmu_event const *event = &test_event->event;
-
- if (!strcmp(event->name, alias->name)) {
- if (compare_alias_to_test_event(alias,
- test_event,
- pmu_name)) {
- continue;
- }
- matched = true;
- matched_count++;
- }
- }
-
- if (matched == false) {
+ for (table = &test_pmu->aliases[0]; *table; table++) {
+ struct perf_pmu_test_event test_event = **table;
+ struct pmu_event const *event = &test_event.event;
+ int err;
+ struct test_core_pmu_event_aliases_cb_args args = {
+ .test_event = &test_event,
+ .count = &matched_count,
+ };
+
+ err = perf_pmu__find_event(pmu, event->name, &args,
+ test_core_pmu_event_aliases_cb);
+ if (err) {
+ res = err;
pr_debug("testing aliases uncore PMU %s: could not match alias %s\n",
- pmu_name, alias->name);
- res = -1;
- goto out;
+ pmu_name, event->name);
+ return -1;
}
}
@@ -623,19 +630,13 @@ static int __test_uncore_pmu_event_aliases(struct perf_pmu_test_pmu *test_pmu)
pmu_name, matched_count, alias_count);
res = -1;
}
-
-out:
- list_for_each_entry_safe(a, tmp, &aliases, list) {
- list_del(&a->list);
- perf_pmu_free_alias(a);
- }
return res;
}
static struct perf_pmu_test_pmu test_pmus[] = {
{
.pmu = {
- .name = (char *)"hisi_sccl1_ddrc2",
+ .name = "hisi_sccl1_ddrc2",
.is_uncore = 1,
},
.aliases = {
@@ -644,7 +645,7 @@ static struct perf_pmu_test_pmu test_pmus[] = {
},
{
.pmu = {
- .name = (char *)"uncore_cbox_0",
+ .name = "uncore_cbox_0",
.is_uncore = 1,
},
.aliases = {
@@ -655,7 +656,7 @@ static struct perf_pmu_test_pmu test_pmus[] = {
},
{
.pmu = {
- .name = (char *)"hisi_sccl3_l3c7",
+ .name = "hisi_sccl3_l3c7",
.is_uncore = 1,
},
.aliases = {
@@ -664,7 +665,7 @@ static struct perf_pmu_test_pmu test_pmus[] = {
},
{
.pmu = {
- .name = (char *)"uncore_imc_free_running_0",
+ .name = "uncore_imc_free_running_0",
.is_uncore = 1,
},
.aliases = {
@@ -673,7 +674,7 @@ static struct perf_pmu_test_pmu test_pmus[] = {
},
{
.pmu = {
- .name = (char *)"uncore_imc_0",
+ .name = "uncore_imc_0",
.is_uncore = 1,
},
.aliases = {
@@ -682,9 +683,9 @@ static struct perf_pmu_test_pmu test_pmus[] = {
},
{
.pmu = {
- .name = (char *)"uncore_sys_ddr_pmu0",
+ .name = "uncore_sys_ddr_pmu0",
.is_uncore = 1,
- .id = (char *)"v8",
+ .id = "v8",
},
.aliases = {
&sys_ddr_pmu_write_cycles,
@@ -692,9 +693,9 @@ static struct perf_pmu_test_pmu test_pmus[] = {
},
{
.pmu = {
- .name = (char *)"uncore_sys_ccn_pmu4",
+ .name = "uncore_sys_ccn_pmu4",
.is_uncore = 1,
- .id = (char *)"0x01",
+ .id = "0x01",
},
.aliases = {
&sys_ccn_pmu_read_cycles,
@@ -732,8 +733,13 @@ static int test__aliases(struct test_suite *test __maybe_unused,
}
for (i = 0; i < ARRAY_SIZE(test_pmus); i++) {
- int res = __test_uncore_pmu_event_aliases(&test_pmus[i]);
+ int res;
+
+ INIT_LIST_HEAD(&test_pmus[i].pmu.format);
+ INIT_LIST_HEAD(&test_pmus[i].pmu.aliases);
+ INIT_LIST_HEAD(&test_pmus[i].pmu.caps);
+ res = __test_uncore_pmu_event_aliases(&test_pmus[i]);
if (res)
return res;
}
diff --git a/tools/perf/tests/pmu.c b/tools/perf/tests/pmu.c
index a4452639a3d4..eb60e5f66859 100644
--- a/tools/perf/tests/pmu.c
+++ b/tools/perf/tests/pmu.c
@@ -7,6 +7,7 @@
#include <stdio.h>
#include <linux/kernel.h>
#include <linux/limits.h>
+#include <linux/zalloc.h>
/* Simulated format definitions. */
static struct test_format {
@@ -27,55 +28,55 @@ static struct test_format {
/* Simulated users input. */
static struct parse_events_term test_terms[] = {
{
- .config = (char *) "krava01",
+ .config = "krava01",
.val.num = 15,
.type_val = PARSE_EVENTS__TERM_TYPE_NUM,
.type_term = PARSE_EVENTS__TERM_TYPE_USER,
},
{
- .config = (char *) "krava02",
+ .config = "krava02",
.val.num = 170,
.type_val = PARSE_EVENTS__TERM_TYPE_NUM,
.type_term = PARSE_EVENTS__TERM_TYPE_USER,
},
{
- .config = (char *) "krava03",
+ .config = "krava03",
.val.num = 1,
.type_val = PARSE_EVENTS__TERM_TYPE_NUM,
.type_term = PARSE_EVENTS__TERM_TYPE_USER,
},
{
- .config = (char *) "krava11",
+ .config = "krava11",
.val.num = 27,
.type_val = PARSE_EVENTS__TERM_TYPE_NUM,
.type_term = PARSE_EVENTS__TERM_TYPE_USER,
},
{
- .config = (char *) "krava12",
+ .config = "krava12",
.val.num = 1,
.type_val = PARSE_EVENTS__TERM_TYPE_NUM,
.type_term = PARSE_EVENTS__TERM_TYPE_USER,
},
{
- .config = (char *) "krava13",
+ .config = "krava13",
.val.num = 2,
.type_val = PARSE_EVENTS__TERM_TYPE_NUM,
.type_term = PARSE_EVENTS__TERM_TYPE_USER,
},
{
- .config = (char *) "krava21",
+ .config = "krava21",
.val.num = 119,
.type_val = PARSE_EVENTS__TERM_TYPE_NUM,
.type_term = PARSE_EVENTS__TERM_TYPE_USER,
},
{
- .config = (char *) "krava22",
+ .config = "krava22",
.val.num = 11,
.type_val = PARSE_EVENTS__TERM_TYPE_NUM,
.type_term = PARSE_EVENTS__TERM_TYPE_USER,
},
{
- .config = (char *) "krava23",
+ .config = "krava23",
.val.num = 2,
.type_val = PARSE_EVENTS__TERM_TYPE_NUM,
.type_term = PARSE_EVENTS__TERM_TYPE_USER,
@@ -141,48 +142,55 @@ static struct list_head *test_terms_list(void)
static int test__pmu(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
char dir[PATH_MAX];
- char *format = test_format_dir_get(dir, sizeof(dir));
- LIST_HEAD(formats);
+ char *format;
struct list_head *terms = test_terms_list();
+ struct perf_event_attr attr;
+ struct perf_pmu *pmu;
+ int fd;
int ret;
- if (!format)
- return -EINVAL;
-
- do {
- struct perf_event_attr attr;
- int fd;
-
- memset(&attr, 0, sizeof(attr));
-
- fd = open(format, O_DIRECTORY);
- if (fd < 0) {
- ret = fd;
- break;
- }
- ret = perf_pmu__format_parse(fd, &formats);
- if (ret)
- break;
-
- ret = perf_pmu__config_terms("perf-pmu-test", &formats, &attr,
- terms, false, NULL);
- if (ret)
- break;
+ pmu = zalloc(sizeof(*pmu));
+ if (!pmu)
+ return -ENOMEM;
- ret = -EINVAL;
+ INIT_LIST_HEAD(&pmu->format);
+ INIT_LIST_HEAD(&pmu->aliases);
+ INIT_LIST_HEAD(&pmu->caps);
+ format = test_format_dir_get(dir, sizeof(dir));
+ if (!format) {
+ free(pmu);
+ return -EINVAL;
+ }
- if (attr.config != 0xc00000000002a823)
- break;
- if (attr.config1 != 0x8000400000000145)
- break;
- if (attr.config2 != 0x0400000020041d07)
- break;
+ memset(&attr, 0, sizeof(attr));
- ret = 0;
- } while (0);
+ fd = open(format, O_DIRECTORY);
+ if (fd < 0) {
+ ret = fd;
+ goto out;
+ }
- perf_pmu__del_formats(&formats);
+ pmu->name = strdup("perf-pmu-test");
+ ret = perf_pmu__format_parse(pmu, fd, /*eager_load=*/true);
+ if (ret)
+ goto out;
+
+ ret = perf_pmu__config_terms(pmu, &attr, terms, /*zero=*/false, /*err=*/NULL);
+ if (ret)
+ goto out;
+
+ ret = -EINVAL;
+ if (attr.config != 0xc00000000002a823)
+ goto out;
+ if (attr.config1 != 0x8000400000000145)
+ goto out;
+ if (attr.config2 != 0x0400000020041d07)
+ goto out;
+
+ ret = 0;
+out:
test_format_dir_put(format);
+ perf_pmu__delete(pmu);
return ret;
}
diff --git a/tools/perf/tests/shell/coresight/asm_pure_loop.sh b/tools/perf/tests/shell/coresight/asm_pure_loop.sh
index 569e9d46162b..779bc8608e1e 100755
--- a/tools/perf/tests/shell/coresight/asm_pure_loop.sh
+++ b/tools/perf/tests/shell/coresight/asm_pure_loop.sh
@@ -5,7 +5,7 @@
# Carsten Haitzler <carsten.haitzler@arm.com>, 2021
TEST="asm_pure_loop"
-. $(dirname $0)/../lib/coresight.sh
+. "$(dirname $0)"/../lib/coresight.sh
ARGS=""
DATV="out"
DATA="$DATD/perf-$TEST-$DATV.data"
diff --git a/tools/perf/tests/shell/coresight/memcpy_thread_16k_10.sh b/tools/perf/tests/shell/coresight/memcpy_thread_16k_10.sh
index d21ba8545938..08a44e52ce9b 100755
--- a/tools/perf/tests/shell/coresight/memcpy_thread_16k_10.sh
+++ b/tools/perf/tests/shell/coresight/memcpy_thread_16k_10.sh
@@ -5,7 +5,7 @@
# Carsten Haitzler <carsten.haitzler@arm.com>, 2021
TEST="memcpy_thread"
-. $(dirname $0)/../lib/coresight.sh
+. "$(dirname $0)"/../lib/coresight.sh
ARGS="16 10 1"
DATV="16k_10"
DATA="$DATD/perf-$TEST-$DATV.data"
diff --git a/tools/perf/tests/shell/coresight/thread_loop_check_tid_10.sh b/tools/perf/tests/shell/coresight/thread_loop_check_tid_10.sh
index 7c13636fc778..c83a200dede4 100755
--- a/tools/perf/tests/shell/coresight/thread_loop_check_tid_10.sh
+++ b/tools/perf/tests/shell/coresight/thread_loop_check_tid_10.sh
@@ -5,7 +5,7 @@
# Carsten Haitzler <carsten.haitzler@arm.com>, 2021
TEST="thread_loop"
-. $(dirname $0)/../lib/coresight.sh
+. "$(dirname $0)"/../lib/coresight.sh
ARGS="10 1"
DATV="check-tid-10th"
DATA="$DATD/perf-$TEST-$DATV.data"
diff --git a/tools/perf/tests/shell/coresight/thread_loop_check_tid_2.sh b/tools/perf/tests/shell/coresight/thread_loop_check_tid_2.sh
index a067145af43c..6346fd5e87c8 100755
--- a/tools/perf/tests/shell/coresight/thread_loop_check_tid_2.sh
+++ b/tools/perf/tests/shell/coresight/thread_loop_check_tid_2.sh
@@ -5,7 +5,7 @@
# Carsten Haitzler <carsten.haitzler@arm.com>, 2021
TEST="thread_loop"
-. $(dirname $0)/../lib/coresight.sh
+. "$(dirname $0)"/../lib/coresight.sh
ARGS="2 20"
DATV="check-tid-2th"
DATA="$DATD/perf-$TEST-$DATV.data"
diff --git a/tools/perf/tests/shell/coresight/unroll_loop_thread_10.sh b/tools/perf/tests/shell/coresight/unroll_loop_thread_10.sh
index f48c85230b15..7304e3d3a6ff 100755
--- a/tools/perf/tests/shell/coresight/unroll_loop_thread_10.sh
+++ b/tools/perf/tests/shell/coresight/unroll_loop_thread_10.sh
@@ -5,7 +5,7 @@
# Carsten Haitzler <carsten.haitzler@arm.com>, 2021
TEST="unroll_loop_thread"
-. $(dirname $0)/../lib/coresight.sh
+. "$(dirname $0)"/../lib/coresight.sh
ARGS="10"
DATV="10"
DATA="$DATD/perf-$TEST-$DATV.data"
diff --git a/tools/perf/tests/shell/lib/probe.sh b/tools/perf/tests/shell/lib/probe.sh
index 51e3f60baba0..5aa6e2ec5734 100644
--- a/tools/perf/tests/shell/lib/probe.sh
+++ b/tools/perf/tests/shell/lib/probe.sh
@@ -1,3 +1,4 @@
+#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
# Arnaldo Carvalho de Melo <acme@kernel.org>, 2017
diff --git a/tools/perf/tests/shell/lib/probe_vfs_getname.sh b/tools/perf/tests/shell/lib/probe_vfs_getname.sh
index 60c5e34f90c4..bf4c1fb71c4b 100644
--- a/tools/perf/tests/shell/lib/probe_vfs_getname.sh
+++ b/tools/perf/tests/shell/lib/probe_vfs_getname.sh
@@ -1,3 +1,4 @@
+#!/bin/sh
# Arnaldo Carvalho de Melo <acme@kernel.org>, 2017
perf probe -l 2>&1 | grep -q probe:vfs_getname
@@ -10,11 +11,11 @@ cleanup_probe_vfs_getname() {
}
add_probe_vfs_getname() {
- local verbose=$1
+ add_probe_verbose=$1
if [ $had_vfs_getname -eq 1 ] ; then
line=$(perf probe -L getname_flags 2>&1 | grep -E 'result.*=.*filename;' | sed -r 's/[[:space:]]+([[:digit:]]+)[[:space:]]+result->uptr.*/\1/')
perf probe -q "vfs_getname=getname_flags:${line} pathname=result->name:string" || \
- perf probe $verbose "vfs_getname=getname_flags:${line} pathname=filename:ustring"
+ perf probe $add_probe_verbose "vfs_getname=getname_flags:${line} pathname=filename:ustring"
fi
}
diff --git a/tools/perf/tests/shell/lib/stat_output.sh b/tools/perf/tests/shell/lib/stat_output.sh
index 698343f0ecf9..3cc158a64326 100644
--- a/tools/perf/tests/shell/lib/stat_output.sh
+++ b/tools/perf/tests/shell/lib/stat_output.sh
@@ -1,3 +1,4 @@
+#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
# Return true if perf_event_paranoid is > $1 and not running as root.
diff --git a/tools/perf/tests/shell/lib/waiting.sh b/tools/perf/tests/shell/lib/waiting.sh
index e7a39134a68e..bdd5a7c71591 100644
--- a/tools/perf/tests/shell/lib/waiting.sh
+++ b/tools/perf/tests/shell/lib/waiting.sh
@@ -1,3 +1,4 @@
+#!/bin/sh
# SPDX-License-Identifier: GPL-2.0
tenths=date\ +%s%1N
diff --git a/tools/perf/tests/shell/lock_contention.sh b/tools/perf/tests/shell/lock_contention.sh
index 4a194420416e..d120e83db7d9 100755
--- a/tools/perf/tests/shell/lock_contention.sh
+++ b/tools/perf/tests/shell/lock_contention.sh
@@ -21,7 +21,7 @@ trap_cleanup() {
trap trap_cleanup EXIT TERM INT
check() {
- if [ `id -u` != 0 ]; then
+ if [ "$(id -u)" != 0 ]; then
echo "[Skip] No root permission"
err=2
exit
@@ -157,10 +157,10 @@ test_lock_filter()
perf lock contention -i ${perfdata} -L tasklist_lock -q 2> ${result}
# find out the type of tasklist_lock
- local type=$(head -1 "${result}" | awk '{ print $8 }' | sed -e 's/:.*//')
+ test_lock_filter_type=$(head -1 "${result}" | awk '{ print $8 }' | sed -e 's/:.*//')
- if [ "$(grep -c -v "${type}" "${result}")" != "0" ]; then
- echo "[Fail] Recorded result should not have non-${type} locks:" "$(cat "${result}")"
+ if [ "$(grep -c -v "${test_lock_filter_type}" "${result}")" != "0" ]; then
+ echo "[Fail] Recorded result should not have non-${test_lock_filter_type} locks:" "$(cat "${result}")"
err=1
exit
fi
@@ -170,8 +170,8 @@ test_lock_filter()
fi
perf lock con -a -b -L tasklist_lock -q -- perf bench sched messaging > /dev/null 2> ${result}
- if [ "$(grep -c -v "${type}" "${result}")" != "0" ]; then
- echo "[Fail] BPF result should not have non-${type} locks:" "$(cat "${result}")"
+ if [ "$(grep -c -v "${test_lock_filter_type}" "${result}")" != "0" ]; then
+ echo "[Fail] BPF result should not have non-${test_lock_filter_type} locks:" "$(cat "${result}")"
err=1
exit
fi
diff --git a/tools/perf/tests/shell/probe_vfs_getname.sh b/tools/perf/tests/shell/probe_vfs_getname.sh
index 5d1b63d3f3e1..871243d6d03a 100755
--- a/tools/perf/tests/shell/probe_vfs_getname.sh
+++ b/tools/perf/tests/shell/probe_vfs_getname.sh
@@ -4,11 +4,11 @@
# SPDX-License-Identifier: GPL-2.0
# Arnaldo Carvalho de Melo <acme@kernel.org>, 2017
-. $(dirname $0)/lib/probe.sh
+. "$(dirname $0)"/lib/probe.sh
skip_if_no_perf_probe || exit 2
-. $(dirname $0)/lib/probe_vfs_getname.sh
+. "$(dirname $0)"/lib/probe_vfs_getname.sh
add_probe_vfs_getname || skip_if_no_debuginfo
err=$?
diff --git a/tools/perf/tests/shell/record+zstd_comp_decomp.sh b/tools/perf/tests/shell/record+zstd_comp_decomp.sh
index 49bd875d5122..8929046e9057 100755
--- a/tools/perf/tests/shell/record+zstd_comp_decomp.sh
+++ b/tools/perf/tests/shell/record+zstd_comp_decomp.sh
@@ -13,25 +13,25 @@ skip_if_no_z_record() {
collect_z_record() {
echo "Collecting compressed record file:"
[ "$(uname -m)" != s390x ] && gflag='-g'
- $perf_tool record -o $trace_file $gflag -z -F 5000 -- \
+ $perf_tool record -o "$trace_file" $gflag -z -F 5000 -- \
dd count=500 if=/dev/urandom of=/dev/null
}
check_compressed_stats() {
echo "Checking compressed events stats:"
- $perf_tool report -i $trace_file --header --stats | \
+ $perf_tool report -i "$trace_file" --header --stats | \
grep -E "(# compressed : Zstd,)|(COMPRESSED events:)"
}
check_compressed_output() {
- $perf_tool inject -i $trace_file -o $trace_file.decomp &&
- $perf_tool report -i $trace_file --stdio -F comm,dso,sym | head -n -3 > $trace_file.comp.output &&
- $perf_tool report -i $trace_file.decomp --stdio -F comm,dso,sym | head -n -3 > $trace_file.decomp.output &&
- diff $trace_file.comp.output $trace_file.decomp.output
+ $perf_tool inject -i "$trace_file" -o "$trace_file.decomp" &&
+ $perf_tool report -i "$trace_file" --stdio -F comm,dso,sym | head -n -3 > "$trace_file.comp.output" &&
+ $perf_tool report -i "$trace_file.decomp" --stdio -F comm,dso,sym | head -n -3 > "$trace_file.decomp.output" &&
+ diff "$trace_file.comp.output" "$trace_file.decomp.output"
}
skip_if_no_z_record || exit 2
collect_z_record && check_compressed_stats && check_compressed_output
err=$?
-rm -f $trace_file*
+rm -f "$trace_file*"
exit $err
diff --git a/tools/perf/tests/shell/record_bpf_filter.sh b/tools/perf/tests/shell/record_bpf_filter.sh
new file mode 100755
index 000000000000..31c593966e8c
--- /dev/null
+++ b/tools/perf/tests/shell/record_bpf_filter.sh
@@ -0,0 +1,134 @@
+#!/bin/sh
+# perf record sample filtering (by BPF) tests
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+
+err=0
+perfdata=$(mktemp /tmp/__perf_test.perf.data.XXXXX)
+
+cleanup() {
+ rm -f "${perfdata}"
+ rm -f "${perfdata}".old
+ trap - EXIT TERM INT
+}
+
+trap_cleanup() {
+ cleanup
+ exit 1
+}
+trap trap_cleanup EXIT TERM INT
+
+test_bpf_filter_priv() {
+ echo "Checking BPF-filter privilege"
+
+ if [ "$(id -u)" != 0 ]
+ then
+ echo "bpf-filter test [Skipped permission]"
+ err=2
+ return
+ fi
+ if ! perf record -e task-clock --filter 'period > 1' \
+ -o /dev/null --quiet true 2>&1
+ then
+ echo "bpf-filter test [Skipped missing BPF support]"
+ err=2
+ return
+ fi
+}
+
+test_bpf_filter_basic() {
+ echo "Basic bpf-filter test"
+
+ if ! perf record -e task-clock -c 10000 --filter 'ip < 0xffffffff00000000' \
+ -o "${perfdata}" true 2> /dev/null
+ then
+ echo "Basic bpf-filter test [Failed record]"
+ err=1
+ return
+ fi
+ if perf script -i "${perfdata}" -F ip | grep 'ffffffff[0-9a-f]*'
+ then
+ if uname -r | grep -q ^6.2
+ then
+ echo "Basic bpf-filter test [Skipped unsupported kernel]"
+ err=2
+ return
+ fi
+ echo "Basic bpf-filter test [Failed invalid output]"
+ err=1
+ return
+ fi
+ echo "Basic bpf-filter test [Success]"
+}
+
+test_bpf_filter_fail() {
+ echo "Failing bpf-filter test"
+
+ # 'cpu' requires PERF_SAMPLE_CPU flag
+ if ! perf record -e task-clock --filter 'cpu > 0' \
+ -o /dev/null true 2>&1 | grep PERF_SAMPLE_CPU
+ then
+ echo "Failing bpf-filter test [Failed forbidden CPU]"
+ err=1
+ return
+ fi
+
+ if ! perf record --sample-cpu -e task-clock --filter 'cpu > 0' \
+ -o /dev/null true 2>/dev/null
+ then
+ echo "Failing bpf-filter test [Failed should succeed]"
+ err=1
+ return
+ fi
+
+ echo "Failing bpf-filter test [Success]"
+}
+
+test_bpf_filter_group() {
+ echo "Group bpf-filter test"
+
+ if ! perf record -e task-clock --filter 'period > 1000 || ip > 0' \
+ -o /dev/null true 2>/dev/null
+ then
+ echo "Group bpf-filter test [Failed should succeed]"
+ err=1
+ return
+ fi
+
+ if ! perf record -e task-clock --filter 'cpu > 0 || ip > 0' \
+ -o /dev/null true 2>&1 | grep PERF_SAMPLE_CPU
+ then
+ echo "Group bpf-filter test [Failed forbidden CPU]"
+ err=1
+ return
+ fi
+
+ if ! perf record -e task-clock --filter 'period > 0 || code_pgsz > 4096' \
+ -o /dev/null true 2>&1 | grep PERF_SAMPLE_CODE_PAGE_SIZE
+ then
+ echo "Group bpf-filter test [Failed forbidden CODE_PAGE_SIZE]"
+ err=1
+ return
+ fi
+
+ echo "Group bpf-filter test [Success]"
+}
+
+
+test_bpf_filter_priv
+
+if [ $err = 0 ]; then
+ test_bpf_filter_basic
+fi
+
+if [ $err = 0 ]; then
+ test_bpf_filter_fail
+fi
+
+if [ $err = 0 ]; then
+ test_bpf_filter_group
+fi
+
+cleanup
+exit $err
diff --git a/tools/perf/tests/shell/record_offcpu.sh b/tools/perf/tests/shell/record_offcpu.sh
index f062ae9a95e1..a0d14cd0aa79 100755
--- a/tools/perf/tests/shell/record_offcpu.sh
+++ b/tools/perf/tests/shell/record_offcpu.sh
@@ -10,19 +10,19 @@ perfdata=$(mktemp /tmp/__perf_test.perf.data.XXXXX)
cleanup() {
rm -f ${perfdata}
rm -f ${perfdata}.old
- trap - exit term int
+ trap - EXIT TERM INT
}
trap_cleanup() {
cleanup
exit 1
}
-trap trap_cleanup exit term int
+trap trap_cleanup EXIT TERM INT
test_offcpu_priv() {
echo "Checking off-cpu privilege"
- if [ `id -u` != 0 ]
+ if [ "$(id -u)" != 0 ]
then
echo "off-cpu test [Skipped permission]"
err=2
diff --git a/tools/perf/tests/shell/stat+csv_output.sh b/tools/perf/tests/shell/stat+csv_output.sh
index 34a0701fee05..d890eb26e914 100755
--- a/tools/perf/tests/shell/stat+csv_output.sh
+++ b/tools/perf/tests/shell/stat+csv_output.sh
@@ -6,7 +6,7 @@
set -e
-. $(dirname $0)/lib/stat_output.sh
+. "$(dirname $0)"/lib/stat_output.sh
csv_sep=@
diff --git a/tools/perf/tests/shell/stat+csv_summary.sh b/tools/perf/tests/shell/stat+csv_summary.sh
index 5571ff75eb42..8bae9c8a835e 100755
--- a/tools/perf/tests/shell/stat+csv_summary.sh
+++ b/tools/perf/tests/shell/stat+csv_summary.sh
@@ -10,7 +10,7 @@ set -e
#
perf stat -e cycles -x' ' -I1000 --interval-count 1 --summary 2>&1 | \
grep -e summary | \
-while read summary num event run pct
+while read summary _num _event _run _pct
do
if [ $summary != "summary" ]; then
exit 1
@@ -23,7 +23,7 @@ done
#
perf stat -e cycles -x' ' -I1000 --interval-count 1 --summary --no-csv-summary 2>&1 | \
grep -e summary | \
-while read num event run pct
+while read _num _event _run _pct
do
exit 1
done
diff --git a/tools/perf/tests/shell/stat+shadow_stat.sh b/tools/perf/tests/shell/stat+shadow_stat.sh
index 0e9cba84e757..a1918a15e36a 100755
--- a/tools/perf/tests/shell/stat+shadow_stat.sh
+++ b/tools/perf/tests/shell/stat+shadow_stat.sh
@@ -14,7 +14,7 @@ test_global_aggr()
{
perf stat -a --no-big-num -e cycles,instructions sleep 1 2>&1 | \
grep -e cycles -e instructions | \
- while read num evt hash ipc rest
+ while read num evt _hash ipc rest
do
# skip not counted events
if [ "$num" = "<not" ]; then
@@ -45,7 +45,7 @@ test_no_aggr()
{
perf stat -a -A --no-big-num -e cycles,instructions sleep 1 2>&1 | \
grep ^CPU | \
- while read cpu num evt hash ipc rest
+ while read cpu num evt _hash ipc rest
do
# skip not counted events
if [ "$num" = "<not" ]; then
diff --git a/tools/perf/tests/shell/stat+std_output.sh b/tools/perf/tests/shell/stat+std_output.sh
index f972b31fa0c2..fb2b10547a11 100755
--- a/tools/perf/tests/shell/stat+std_output.sh
+++ b/tools/perf/tests/shell/stat+std_output.sh
@@ -6,7 +6,7 @@
set -e
-. $(dirname $0)/lib/stat_output.sh
+. "$(dirname $0)"/lib/stat_output.sh
stat_output=$(mktemp /tmp/__perf_test.stat_output.std.XXXXX)
@@ -28,7 +28,6 @@ trap trap_cleanup EXIT TERM INT
function commachecker()
{
- local -i cnt=0
local prefix=1
case "$1"
diff --git a/tools/perf/tests/shell/stat_bpf_counters.sh b/tools/perf/tests/shell/stat_bpf_counters.sh
index 13473aeba489..a87bb2814b4c 100755
--- a/tools/perf/tests/shell/stat_bpf_counters.sh
+++ b/tools/perf/tests/shell/stat_bpf_counters.sh
@@ -22,21 +22,21 @@ compare_number()
}
# skip if --bpf-counters is not supported
-if ! perf stat --bpf-counters true > /dev/null 2>&1; then
+if ! perf stat -e cycles --bpf-counters true > /dev/null 2>&1; then
if [ "$1" = "-v" ]; then
echo "Skipping: --bpf-counters not supported"
- perf --no-pager stat --bpf-counters true || true
+ perf --no-pager stat -e cycles --bpf-counters true || true
fi
exit 2
fi
base_cycles=$(perf stat --no-big-num -e cycles -- perf bench sched messaging -g 1 -l 100 -t 2>&1 | awk '/cycles/ {print $1}')
-if [ "$base_cycles" == "<not" ]; then
+if [ "$base_cycles" = "<not" ]; then
echo "Skipping: cycles event not counted"
exit 2
fi
bpf_cycles=$(perf stat --no-big-num --bpf-counters -e cycles -- perf bench sched messaging -g 1 -l 100 -t 2>&1 | awk '/cycles/ {print $1}')
-if [ "$bpf_cycles" == "<not" ]; then
+if [ "$bpf_cycles" = "<not" ]; then
echo "Failed: cycles not counted with --bpf-counters"
exit 1
fi
diff --git a/tools/perf/tests/shell/stat_bpf_counters_cgrp.sh b/tools/perf/tests/shell/stat_bpf_counters_cgrp.sh
index d724855d097c..e75d0780dc78 100755
--- a/tools/perf/tests/shell/stat_bpf_counters_cgrp.sh
+++ b/tools/perf/tests/shell/stat_bpf_counters_cgrp.sh
@@ -25,22 +25,22 @@ check_bpf_counter()
find_cgroups()
{
# try usual systemd slices first
- if [ -d /sys/fs/cgroup/system.slice -a -d /sys/fs/cgroup/user.slice ]; then
+ if [ -d /sys/fs/cgroup/system.slice ] && [ -d /sys/fs/cgroup/user.slice ]; then
test_cgroups="system.slice,user.slice"
return
fi
# try root and self cgroups
- local self_cgrp=$(grep perf_event /proc/self/cgroup | cut -d: -f3)
- if [ -z ${self_cgrp} ]; then
+ find_cgroups_self_cgrp=$(grep perf_event /proc/self/cgroup | cut -d: -f3)
+ if [ -z ${find_cgroups_self_cgrp} ]; then
# cgroup v2 doesn't specify perf_event
- self_cgrp=$(grep ^0: /proc/self/cgroup | cut -d: -f3)
+ find_cgroups_self_cgrp=$(grep ^0: /proc/self/cgroup | cut -d: -f3)
fi
- if [ -z ${self_cgrp} ]; then
+ if [ -z ${find_cgroups_self_cgrp} ]; then
test_cgroups="/"
else
- test_cgroups="/,${self_cgrp}"
+ test_cgroups="/,${find_cgroups_self_cgrp}"
fi
}
@@ -48,13 +48,11 @@ find_cgroups()
# Just check if it runs without failure and has non-zero results.
check_system_wide_counted()
{
- local output
-
- output=$(perf stat -a --bpf-counters --for-each-cgroup ${test_cgroups} -e cpu-clock -x, sleep 1 2>&1)
- if echo ${output} | grep -q -F "<not "; then
+ check_system_wide_counted_output=$(perf stat -a --bpf-counters --for-each-cgroup ${test_cgroups} -e cpu-clock -x, sleep 1 2>&1)
+ if echo ${check_system_wide_counted_output} | grep -q -F "<not "; then
echo "Some system-wide events are not counted"
if [ "${verbose}" = "1" ]; then
- echo ${output}
+ echo ${check_system_wide_counted_output}
fi
exit 1
fi
@@ -62,13 +60,11 @@ check_system_wide_counted()
check_cpu_list_counted()
{
- local output
-
- output=$(perf stat -C 1 --bpf-counters --for-each-cgroup ${test_cgroups} -e cpu-clock -x, taskset -c 1 sleep 1 2>&1)
- if echo ${output} | grep -q -F "<not "; then
+ check_cpu_list_counted_output=$(perf stat -C 0,1 --bpf-counters --for-each-cgroup ${test_cgroups} -e cpu-clock -x, taskset -c 1 sleep 1 2>&1)
+ if echo ${check_cpu_list_counted_output} | grep -q -F "<not "; then
echo "Some CPU events are not counted"
if [ "${verbose}" = "1" ]; then
- echo ${output}
+ echo ${check_cpu_list_counted_output}
fi
exit 1
fi
diff --git a/tools/perf/tests/shell/test_arm_spe_fork.sh b/tools/perf/tests/shell/test_arm_spe_fork.sh
index fad361675a1d..1a7e6a82d0e3 100755
--- a/tools/perf/tests/shell/test_arm_spe_fork.sh
+++ b/tools/perf/tests/shell/test_arm_spe_fork.sh
@@ -22,7 +22,7 @@ cleanup_files()
rm -f ${PERF_DATA}
}
-trap cleanup_files exit term int
+trap cleanup_files EXIT TERM INT
echo "Recording workload..."
perf record -o ${PERF_DATA} -e arm_spe/period=65536/ -vvv -- $TEST_PROGRAM > ${PERF_RECORD_LOG} 2>&1 &
diff --git a/tools/perf/tests/shell/test_perf_data_converter_json.sh b/tools/perf/tests/shell/test_perf_data_converter_json.sh
index 72ac6c83231c..6ded58f98f55 100755
--- a/tools/perf/tests/shell/test_perf_data_converter_json.sh
+++ b/tools/perf/tests/shell/test_perf_data_converter_json.sh
@@ -39,7 +39,7 @@ test_json_converter_command()
echo "Testing Perf Data Convertion Command to JSON"
perf record -o "$perfdata" -F 99 -g -- perf test -w noploop > /dev/null 2>&1
perf data convert --to-json "$result" --force -i "$perfdata" >/dev/null 2>&1
- if [ $(cat "${result}" | wc -l) -gt "0" ] ; then
+ if [ "$(cat ${result} | wc -l)" -gt "0" ] ; then
echo "Perf Data Converter Command to JSON [SUCCESS]"
else
echo "Perf Data Converter Command to JSON [FAILED]"
diff --git a/tools/perf/tests/shell/test_task_analyzer.sh b/tools/perf/tests/shell/test_task_analyzer.sh
index 0095abbe20ca..92d15154ba79 100755
--- a/tools/perf/tests/shell/test_task_analyzer.sh
+++ b/tools/perf/tests/shell/test_task_analyzer.sh
@@ -52,7 +52,7 @@ find_str_or_fail() {
# check if perf is compiled with libtraceevent support
skip_no_probe_record_support() {
- perf record -e "sched:sched_switch" -a -- sleep 1 2>&1 | grep "libtraceevent is necessary for tracepoint support" && return 2
+ perf version --build-options | grep -q " OFF .* HAVE_LIBTRACEEVENT" && return 2
return 0
}
diff --git a/tools/perf/tests/shell/trace+probe_vfs_getname.sh b/tools/perf/tests/shell/trace+probe_vfs_getname.sh
index 0a4bac3dd77e..4014487cf4d9 100755
--- a/tools/perf/tests/shell/trace+probe_vfs_getname.sh
+++ b/tools/perf/tests/shell/trace+probe_vfs_getname.sh
@@ -10,17 +10,17 @@
# SPDX-License-Identifier: GPL-2.0
# Arnaldo Carvalho de Melo <acme@kernel.org>, 2017
-. $(dirname $0)/lib/probe.sh
+. "$(dirname $0)"/lib/probe.sh
skip_if_no_perf_probe || exit 2
skip_if_no_perf_trace || exit 2
-. $(dirname $0)/lib/probe_vfs_getname.sh
+. "$(dirname $0)"/lib/probe_vfs_getname.sh
trace_open_vfs_getname() {
- evts=$(echo $(perf list syscalls:sys_enter_open* 2>/dev/null | grep -E 'open(at)? ' | sed -r 's/.*sys_enter_([a-z]+) +\[.*$/\1/') | sed 's/ /,/')
+ evts="$(echo "$(perf list syscalls:sys_enter_open* 2>/dev/null | grep -E 'open(at)? ' | sed -r 's/.*sys_enter_([a-z]+) +\[.*$/\1/')" | sed ':a;N;s:\n:,:g')"
perf trace -e $evts touch $file 2>&1 | \
- grep -E " +[0-9]+\.[0-9]+ +\( +[0-9]+\.[0-9]+ ms\): +touch\/[0-9]+ open(at)?\((dfd: +CWD, +)?filename: +${file}, +flags: CREAT\|NOCTTY\|NONBLOCK\|WRONLY, +mode: +IRUGO\|IWUGO\) += +[0-9]+$"
+ grep -E " +[0-9]+\.[0-9]+ +\( +[0-9]+\.[0-9]+ ms\): +touch/[0-9]+ open(at)?\((dfd: +CWD, +)?filename: +\"?${file}\"?, +flags: CREAT\|NOCTTY\|NONBLOCK\|WRONLY, +mode: +IRUGO\|IWUGO\) += +[0-9]+$"
}
diff --git a/tools/perf/tests/stat.c b/tools/perf/tests/stat.c
index 500974040fe3..706780fb5695 100644
--- a/tools/perf/tests/stat.c
+++ b/tools/perf/tests/stat.c
@@ -27,7 +27,7 @@ static int process_stat_config_event(struct perf_tool *tool __maybe_unused,
struct machine *machine __maybe_unused)
{
struct perf_record_stat_config *config = &event->stat_config;
- struct perf_stat_config stat_config;
+ struct perf_stat_config stat_config = {};
#define HAS(term, val) \
has_term(config, PERF_STAT_CONFIG_TERM__##term, val)
diff --git a/tools/perf/tests/tests.h b/tools/perf/tests/tests.h
index f424c0b7f43f..f33cfc3c19a4 100644
--- a/tools/perf/tests/tests.h
+++ b/tools/perf/tests/tests.h
@@ -113,7 +113,6 @@ DECLARE_SUITE(fdarray__filter);
DECLARE_SUITE(fdarray__add);
DECLARE_SUITE(kmod_path__parse);
DECLARE_SUITE(thread_map);
-DECLARE_SUITE(llvm);
DECLARE_SUITE(bpf);
DECLARE_SUITE(session_topology);
DECLARE_SUITE(thread_map_synthesize);
@@ -129,7 +128,6 @@ DECLARE_SUITE(sdt_event);
DECLARE_SUITE(is_printable_array);
DECLARE_SUITE(bitmap_print);
DECLARE_SUITE(perf_hooks);
-DECLARE_SUITE(clang);
DECLARE_SUITE(unit_number__scnprint);
DECLARE_SUITE(mem2node);
DECLARE_SUITE(maps__merge_in);
diff --git a/tools/perf/trace/beauty/arch_errno_names.sh b/tools/perf/trace/beauty/arch_errno_names.sh
index 37c53bac5f56..cc09dcaa891e 100755
--- a/tools/perf/trace/beauty/arch_errno_names.sh
+++ b/tools/perf/trace/beauty/arch_errno_names.sh
@@ -17,8 +17,7 @@ arch_string()
asm_errno_file()
{
- local arch="$1"
- local header
+ arch="$1"
header="$toolsdir/arch/$arch/include/uapi/asm/errno.h"
if test -r "$header"; then
@@ -30,8 +29,7 @@ asm_errno_file()
create_errno_lookup_func()
{
- local arch=$(arch_string "$1")
- local nr name
+ arch=$(arch_string "$1")
printf "static const char *errno_to_name__%s(int err)\n{\n\tswitch (err) {\n" $arch
@@ -44,8 +42,8 @@ create_errno_lookup_func()
process_arch()
{
- local arch="$1"
- local asm_errno=$(asm_errno_file "$arch")
+ arch="$1"
+ asm_errno=$(asm_errno_file "$arch")
$gcc $CFLAGS $include_path -E -dM -x c $asm_errno \
|grep -hE '^#define[[:blank:]]+(E[^[:blank:]]+)[[:blank:]]+([[:digit:]]+).*' \
@@ -56,9 +54,8 @@ process_arch()
create_arch_errno_table_func()
{
- local archlist="$1"
- local default="$2"
- local arch
+ archlist="$1"
+ default="$2"
printf 'const char *arch_syscalls__strerrno(const char *arch, int err)\n'
printf '{\n'
diff --git a/tools/perf/trace/beauty/beauty.h b/tools/perf/trace/beauty/beauty.h
index 3d12bf0f6d07..788e8f6bd90e 100644
--- a/tools/perf/trace/beauty/beauty.h
+++ b/tools/perf/trace/beauty/beauty.h
@@ -67,15 +67,14 @@ extern struct strarray strarray__socket_level;
/**
* augmented_arg: extra payload for syscall pointer arguments
- * If perf_sample->raw_size is more than what a syscall sys_enter_FOO puts,
- * then its the arguments contents, so that we can show more than just a
+ * If perf_sample->raw_size is more than what a syscall sys_enter_FOO puts, then
+ * its the arguments contents, so that we can show more than just a
* pointer. This will be done initially with eBPF, the start of that is at the
- * tools/perf/examples/bpf/augmented_syscalls.c example for the openat, but
- * will eventually be done automagically caching the running kernel tracefs
- * events data into an eBPF C script, that then gets compiled and its .o file
- * cached for subsequent use. For char pointers like the ones for 'open' like
- * syscalls its easy, for the rest we should use DWARF or better, BTF, much
- * more compact.
+ * tools/perf/util/bpf_skel/augmented_syscalls.bpf.c that will eventually be
+ * done automagically caching the running kernel tracefs events data into an
+ * eBPF C script, that then gets compiled and its .o file cached for subsequent
+ * use. For char pointers like the ones for 'open' like syscalls its easy, for
+ * the rest we should use DWARF or better, BTF, much more compact.
*
* @size: 8 if all we need is an integer, otherwise all of the augmented arg.
* @int_arg: will be used for integer like pointer contents, like 'accept's 'upeer_addrlen'
diff --git a/tools/perf/trace/beauty/mmap_flags.sh b/tools/perf/trace/beauty/mmap_flags.sh
index 3022597c8c17..6ecdb3c5a99e 100755
--- a/tools/perf/trace/beauty/mmap_flags.sh
+++ b/tools/perf/trace/beauty/mmap_flags.sh
@@ -19,6 +19,7 @@ arch_mman=${arch_header_dir}/mman.h
printf "static const char *mmap_flags[] = {\n"
regex='^[[:space:]]*#[[:space:]]*define[[:space:]]+MAP_([[:alnum:]_]+)[[:space:]]+(0x[[:xdigit:]]+)[[:space:]]*.*'
+test -f ${arch_mman} && \
grep -E -q $regex ${arch_mman} && \
(grep -E $regex ${arch_mman} | \
sed -r "s/$regex/\2 \1 \1 \1 \2/g" | \
@@ -28,12 +29,14 @@ grep -E -q $regex ${linux_mman} && \
grep -E -vw 'MAP_(UNINITIALIZED|TYPE|SHARED_VALIDATE)' | \
sed -r "s/$regex/\2 \1 \1 \1 \2/g" | \
xargs printf "\t[ilog2(%s) + 1] = \"%s\",\n#ifndef MAP_%s\n#define MAP_%s %s\n#endif\n")
-([ ! -f ${arch_mman} ] || grep -E -q '#[[:space:]]*include[[:space:]]+.*uapi/asm-generic/mman.*' ${arch_mman}) &&
+( ! test -f ${arch_mman} || \
+grep -E -q '#[[:space:]]*include[[:space:]]+.*uapi/asm-generic/mman.*' ${arch_mman}) &&
(grep -E $regex ${header_dir}/mman-common.h | \
grep -E -vw 'MAP_(UNINITIALIZED|TYPE|SHARED_VALIDATE)' | \
sed -r "s/$regex/\2 \1 \1 \1 \2/g" | \
xargs printf "\t[ilog2(%s) + 1] = \"%s\",\n#ifndef MAP_%s\n#define MAP_%s %s\n#endif\n")
-([ ! -f ${arch_mman} ] || grep -E -q '#[[:space:]]*include[[:space:]]+.*uapi/asm-generic/mman.h>.*' ${arch_mman}) &&
+( ! test -f ${arch_mman} || \
+grep -E -q '#[[:space:]]*include[[:space:]]+.*uapi/asm-generic/mman.h>.*' ${arch_mman}) &&
(grep -E $regex ${header_dir}/mman.h | \
sed -r "s/$regex/\2 \1 \1 \1 \2/g" | \
xargs printf "\t[ilog2(%s) + 1] = \"%s\",\n#ifndef MAP_%s\n#define MAP_%s %s\n#endif\n")
diff --git a/tools/perf/trace/beauty/mmap_prot.sh b/tools/perf/trace/beauty/mmap_prot.sh
index 49e8c865214b..4436fcd6e861 100755
--- a/tools/perf/trace/beauty/mmap_prot.sh
+++ b/tools/perf/trace/beauty/mmap_prot.sh
@@ -17,12 +17,13 @@ prefix="PROT"
printf "static const char *mmap_prot[] = {\n"
regex=`printf '^[[:space:]]*#[[:space:]]*define[[:space:]]+%s_([[:alnum:]_]+)[[:space:]]+(0x[[:xdigit:]]+)[[:space:]]*.*' ${prefix}`
-([ ! -f ${arch_mman} ] || grep -E -q '#[[:space:]]*include[[:space:]]+.*uapi/asm-generic/mman.*' ${arch_mman}) &&
+( ! test -f ${arch_mman} \
+|| grep -E -q '#[[:space:]]*include[[:space:]]+.*uapi/asm-generic/mman.*' ${arch_mman}) &&
(grep -E $regex ${common_mman} | \
grep -E -vw PROT_NONE | \
sed -r "s/$regex/\2 \1 \1 \1 \2/g" | \
xargs printf "\t[ilog2(%s) + 1] = \"%s\",\n#ifndef ${prefix}_%s\n#define ${prefix}_%s %s\n#endif\n")
-[ -f ${arch_mman} ] && grep -E -q $regex ${arch_mman} &&
+test -f ${arch_mman} && grep -E -q $regex ${arch_mman} &&
(grep -E $regex ${arch_mman} | \
grep -E -vw PROT_NONE | \
sed -r "s/$regex/\2 \1 \1 \1 \2/g" | \
diff --git a/tools/perf/trace/beauty/x86_arch_prctl.sh b/tools/perf/trace/beauty/x86_arch_prctl.sh
index fd5c740512c5..b1596df251f0 100755
--- a/tools/perf/trace/beauty/x86_arch_prctl.sh
+++ b/tools/perf/trace/beauty/x86_arch_prctl.sh
@@ -7,9 +7,9 @@
prctl_arch_header=${x86_header_dir}/prctl.h
print_range () {
- local idx=$1
- local prefix=$2
- local first_entry=$3
+ idx=$1
+ prefix=$2
+ first_entry=$3
printf "#define x86_arch_prctl_codes_%d_offset %s\n" $idx $first_entry
printf "static const char *x86_arch_prctl_codes_%d[] = {\n" $idx
diff --git a/tools/perf/ui/Build b/tools/perf/ui/Build
index 3aff83c3275f..6b6d7143a37b 100644
--- a/tools/perf/ui/Build
+++ b/tools/perf/ui/Build
@@ -10,5 +10,3 @@ CFLAGS_setup.o += -DLIBDIR="BUILD_STR($(LIBDIR))"
perf-$(CONFIG_SLANG) += browser.o
perf-$(CONFIG_SLANG) += browsers/
perf-$(CONFIG_SLANG) += tui/
-
-CFLAGS_browser.o += -DENABLE_SLFUTURE_CONST
diff --git a/tools/perf/ui/browser.c b/tools/perf/ui/browser.c
index 78fb01d6ad63..603d11283cbd 100644
--- a/tools/perf/ui/browser.c
+++ b/tools/perf/ui/browser.c
@@ -57,12 +57,12 @@ void ui_browser__gotorc(struct ui_browser *browser, int y, int x)
void ui_browser__write_nstring(struct ui_browser *browser __maybe_unused, const char *msg,
unsigned int width)
{
- slsmg_write_nstring(msg, width);
+ SLsmg_write_nstring(msg, width);
}
void ui_browser__vprintf(struct ui_browser *browser __maybe_unused, const char *fmt, va_list args)
{
- slsmg_vprintf(fmt, args);
+ SLsmg_vprintf(fmt, args);
}
void ui_browser__printf(struct ui_browser *browser __maybe_unused, const char *fmt, ...)
@@ -808,6 +808,6 @@ void ui_browser__init(void)
while (ui_browser__colorsets[i].name) {
struct ui_browser_colorset *c = &ui_browser__colorsets[i++];
- sltt_set_color(c->colorset, c->name, c->fg, c->bg);
+ SLtt_set_color(c->colorset, c->name, c->fg, c->bg);
}
}
diff --git a/tools/perf/ui/browsers/Build b/tools/perf/ui/browsers/Build
index fdf86f7981ca..7a1d5ddaf688 100644
--- a/tools/perf/ui/browsers/Build
+++ b/tools/perf/ui/browsers/Build
@@ -4,8 +4,3 @@ perf-y += map.o
perf-y += scripts.o
perf-y += header.o
perf-y += res_sample.o
-
-CFLAGS_annotate.o += -DENABLE_SLFUTURE_CONST
-CFLAGS_hists.o += -DENABLE_SLFUTURE_CONST
-CFLAGS_map.o += -DENABLE_SLFUTURE_CONST
-CFLAGS_scripts.o += -DENABLE_SLFUTURE_CONST
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
index c7ad9e003080..70db5a717905 100644
--- a/tools/perf/ui/browsers/hists.c
+++ b/tools/perf/ui/browsers/hists.c
@@ -407,11 +407,6 @@ static bool hist_browser__selection_has_children(struct hist_browser *browser)
return container_of(ms, struct callchain_list, ms)->has_children;
}
-static bool hist_browser__he_selection_unfolded(struct hist_browser *browser)
-{
- return browser->he_selection ? browser->he_selection->unfolded : false;
-}
-
static bool hist_browser__selection_unfolded(struct hist_browser *browser)
{
struct hist_entry *he = browser->he_selection;
@@ -584,8 +579,8 @@ static int hierarchy_set_folding(struct hist_browser *hb, struct hist_entry *he,
return n;
}
-static void __hist_entry__set_folding(struct hist_entry *he,
- struct hist_browser *hb, bool unfold)
+static void hist_entry__set_folding(struct hist_entry *he,
+ struct hist_browser *hb, bool unfold)
{
hist_entry__init_have_children(he);
he->unfolded = unfold ? he->has_children : false;
@@ -603,34 +598,12 @@ static void __hist_entry__set_folding(struct hist_entry *he,
he->nr_rows = 0;
}
-static void hist_entry__set_folding(struct hist_entry *he,
- struct hist_browser *browser, bool unfold)
-{
- double percent;
-
- percent = hist_entry__get_percent_limit(he);
- if (he->filtered || percent < browser->min_pcnt)
- return;
-
- __hist_entry__set_folding(he, browser, unfold);
-
- if (!he->depth || unfold)
- browser->nr_hierarchy_entries++;
- if (he->leaf)
- browser->nr_callchain_rows += he->nr_rows;
- else if (unfold && !hist_entry__has_hierarchy_children(he, browser->min_pcnt)) {
- browser->nr_hierarchy_entries++;
- he->has_no_entry = true;
- he->nr_rows = 1;
- } else
- he->has_no_entry = false;
-}
-
static void
__hist_browser__set_folding(struct hist_browser *browser, bool unfold)
{
struct rb_node *nd;
struct hist_entry *he;
+ double percent;
nd = rb_first_cached(&browser->hists->entries);
while (nd) {
@@ -640,6 +613,21 @@ __hist_browser__set_folding(struct hist_browser *browser, bool unfold)
nd = __rb_hierarchy_next(nd, HMD_FORCE_CHILD);
hist_entry__set_folding(he, browser, unfold);
+
+ percent = hist_entry__get_percent_limit(he);
+ if (he->filtered || percent < browser->min_pcnt)
+ continue;
+
+ if (!he->depth || unfold)
+ browser->nr_hierarchy_entries++;
+ if (he->leaf)
+ browser->nr_callchain_rows += he->nr_rows;
+ else if (unfold && !hist_entry__has_hierarchy_children(he, browser->min_pcnt)) {
+ browser->nr_hierarchy_entries++;
+ he->has_no_entry = true;
+ he->nr_rows = 1;
+ } else
+ he->has_no_entry = false;
}
}
@@ -659,8 +647,10 @@ static void hist_browser__set_folding_selected(struct hist_browser *browser, boo
if (!browser->he_selection)
return;
- hist_entry__set_folding(browser->he_selection, browser, unfold);
- browser->b.nr_entries = hist_browser__nr_entries(browser);
+ if (unfold == browser->he_selection->unfolded)
+ return;
+
+ hist_browser__toggle_fold(browser);
}
static void ui_browser__warn_lost_events(struct ui_browser *browser)
@@ -732,8 +722,8 @@ static int hist_browser__handle_hotkey(struct hist_browser *browser, bool warn_l
hist_browser__set_folding(browser, true);
break;
case 'e':
- /* Expand the selected entry. */
- hist_browser__set_folding_selected(browser, !hist_browser__he_selection_unfolded(browser));
+ /* Toggle expand/collapse the selected entry. */
+ hist_browser__toggle_fold(browser);
break;
case 'H':
browser->show_headers = !browser->show_headers;
@@ -1779,7 +1769,7 @@ static void hists_browser__hierarchy_headers(struct hist_browser *browser)
hists_browser__scnprintf_hierarchy_headers(browser, headers,
sizeof(headers));
- ui_browser__gotorc(&browser->b, 0, 0);
+ ui_browser__gotorc_title(&browser->b, 0, 0);
ui_browser__set_color(&browser->b, HE_COLORSET_ROOT);
ui_browser__write_nstring(&browser->b, headers, browser->b.width + 1);
}
diff --git a/tools/perf/ui/libslang.h b/tools/perf/ui/libslang.h
index 991e692b9b46..1dff3020e9d5 100644
--- a/tools/perf/ui/libslang.h
+++ b/tools/perf/ui/libslang.h
@@ -11,28 +11,16 @@
#define HAVE_LONG_LONG __GLIBC_HAVE_LONG_LONG
#endif
+/* Enable future slang's corrected function prototypes. */
+#define ENABLE_SLFUTURE_CONST 1
+#define ENABLE_SLFUTURE_VOID 1
+
#ifdef HAVE_SLANG_INCLUDE_SUBDIR
#include <slang/slang.h>
#else
#include <slang.h>
#endif
-#if SLANG_VERSION < 20104
-#define slsmg_printf(msg, args...) \
- SLsmg_printf((char *)(msg), ##args)
-#define slsmg_vprintf(msg, vargs) \
- SLsmg_vprintf((char *)(msg), vargs)
-#define slsmg_write_nstring(msg, len) \
- SLsmg_write_nstring((char *)(msg), len)
-#define sltt_set_color(obj, name, fg, bg) \
- SLtt_set_color(obj,(char *)(name), (char *)(fg), (char *)(bg))
-#else
-#define slsmg_printf SLsmg_printf
-#define slsmg_vprintf SLsmg_vprintf
-#define slsmg_write_nstring SLsmg_write_nstring
-#define sltt_set_color SLtt_set_color
-#endif
-
#define SL_KEY_UNTAB 0x1000
#endif /* _PERF_UI_SLANG_H_ */
diff --git a/tools/perf/ui/tui/helpline.c b/tools/perf/ui/tui/helpline.c
index db4952f5990b..b39451314f43 100644
--- a/tools/perf/ui/tui/helpline.c
+++ b/tools/perf/ui/tui/helpline.c
@@ -22,7 +22,7 @@ static void tui_helpline__push(const char *msg)
SLsmg_gotorc(SLtt_Screen_Rows - 1, 0);
SLsmg_set_color(0);
- SLsmg_write_nstring((char *)msg, SLtt_Screen_Cols);
+ SLsmg_write_nstring(msg, SLtt_Screen_Cols);
SLsmg_refresh();
strlcpy(ui_helpline__current, msg, sz);
}
diff --git a/tools/perf/ui/tui/setup.c b/tools/perf/ui/tui/setup.c
index c1886aa184b3..605d9e175ea7 100644
--- a/tools/perf/ui/tui/setup.c
+++ b/tools/perf/ui/tui/setup.c
@@ -142,7 +142,7 @@ int ui__init(void)
goto out;
}
- SLkp_define_keysym((char *)"^(kB)", SL_KEY_UNTAB);
+ SLkp_define_keysym("^(kB)", SL_KEY_UNTAB);
signal(SIGSEGV, ui__signal_backtrace);
signal(SIGFPE, ui__signal_backtrace);
diff --git a/tools/perf/ui/tui/util.c b/tools/perf/ui/tui/util.c
index 3c5174854ac8..e4d322ce0b54 100644
--- a/tools/perf/ui/tui/util.c
+++ b/tools/perf/ui/tui/util.c
@@ -106,7 +106,7 @@ int ui_browser__input_window(const char *title, const char *text, char *input,
SLsmg_draw_box(y, x++, nr_lines, max_len);
if (title) {
SLsmg_gotorc(y, x + 1);
- SLsmg_write_string((char *)title);
+ SLsmg_write_string(title);
}
SLsmg_gotorc(++y, x);
nr_lines -= 7;
@@ -117,12 +117,12 @@ int ui_browser__input_window(const char *title, const char *text, char *input,
len = 5;
while (len--) {
SLsmg_gotorc(y + len - 1, x);
- SLsmg_write_nstring((char *)" ", max_len);
+ SLsmg_write_nstring(" ", max_len);
}
SLsmg_draw_box(y++, x + 1, 3, max_len - 2);
SLsmg_gotorc(y + 3, x);
- SLsmg_write_nstring((char *)exit_msg, max_len);
+ SLsmg_write_nstring(exit_msg, max_len);
SLsmg_refresh();
mutex_unlock(&ui__lock);
@@ -197,7 +197,7 @@ void __ui__info_window(const char *title, const char *text, const char *exit_msg
SLsmg_draw_box(y, x++, nr_lines, max_len);
if (title) {
SLsmg_gotorc(y, x + 1);
- SLsmg_write_string((char *)title);
+ SLsmg_write_string(title);
}
SLsmg_gotorc(++y, x);
if (exit_msg)
@@ -207,9 +207,9 @@ void __ui__info_window(const char *title, const char *text, const char *exit_msg
nr_lines, max_len, 1);
if (exit_msg) {
SLsmg_gotorc(y + nr_lines - 2, x);
- SLsmg_write_nstring((char *)" ", max_len);
+ SLsmg_write_nstring(" ", max_len);
SLsmg_gotorc(y + nr_lines - 1, x);
- SLsmg_write_nstring((char *)exit_msg, max_len);
+ SLsmg_write_nstring(exit_msg, max_len);
}
}
diff --git a/tools/perf/util/Build b/tools/perf/util/Build
index 96f4ea1d45c5..6d657c9927f7 100644
--- a/tools/perf/util/Build
+++ b/tools/perf/util/Build
@@ -1,3 +1,6 @@
+include $(srctree)/tools/scripts/Makefile.include
+include $(srctree)/tools/scripts/utilities.mak
+
perf-y += arm64-frame-pointer-unwind-support.o
perf-y += addr_location.o
perf-y += annotate.o
@@ -20,13 +23,13 @@ perf-y += evswitch.o
perf-y += find_bit.o
perf-y += get_current_dir_name.o
perf-y += levenshtein.o
-perf-y += llvm-utils.o
perf-y += mmap.o
perf-y += memswap.o
perf-y += parse-events.o
perf-y += print-events.o
perf-y += tracepoint.o
perf-y += perf_regs.o
+perf-y += perf-regs-arch/
perf-y += path.o
perf-y += print_binary.o
perf-y += rlimit.o
@@ -147,7 +150,6 @@ perf-y += list_sort.o
perf-y += mutex.o
perf-y += sharded_mutex.o
-perf-$(CONFIG_LIBBPF) += bpf-loader.o
perf-$(CONFIG_LIBBPF) += bpf_map.o
perf-$(CONFIG_PERF_BPF_SKEL) += bpf_counter.o
perf-$(CONFIG_PERF_BPF_SKEL) += bpf_counter_cgroup.o
@@ -165,7 +167,6 @@ ifeq ($(CONFIG_LIBTRACEEVENT),y)
perf-$(CONFIG_PERF_BPF_SKEL) += bpf_kwork.o
endif
-perf-$(CONFIG_BPF_PROLOGUE) += bpf-prologue.o
perf-$(CONFIG_LIBELF) += symbol-elf.o
perf-$(CONFIG_LIBELF) += probe-file.o
perf-$(CONFIG_LIBELF) += probe-event.o
@@ -229,12 +230,9 @@ perf-y += perf-hooks.o
perf-$(CONFIG_LIBBPF) += bpf-event.o
perf-$(CONFIG_LIBBPF) += bpf-utils.o
-perf-$(CONFIG_CXX) += c++/
-
perf-$(CONFIG_LIBPFM4) += pfm.o
CFLAGS_config.o += -DETC_PERFCONFIG="BUILD_STR($(ETC_PERFCONFIG_SQ))"
-CFLAGS_llvm-utils.o += -DLIBBPF_INCLUDE_DIR="BUILD_STR($(libbpf_include_dir_SQ))"
# avoid compiler warnings in 32-bit mode
CFLAGS_genelf_debug.o += -Wno-packed
@@ -246,7 +244,7 @@ $(OUTPUT)util/parse-events-flex.c $(OUTPUT)util/parse-events-flex.h: util/parse-
$(OUTPUT)util/parse-events-bison.c $(OUTPUT)util/parse-events-bison.h: util/parse-events.y
$(call rule_mkdir)
- $(Q)$(call echo-cmd,bison)$(BISON) -v $< -d $(PARSER_DEBUG_BISON) $(BISON_FILE_PREFIX_MAP) \
+ $(Q)$(call echo-cmd,bison)$(BISON) -v $< -d $(PARSER_DEBUG_BISON) $(BISON_FILE_PREFIX_MAP) $(BISON_FALLBACK_FLAGS) \
-o $(OUTPUT)util/parse-events-bison.c -p parse_events_
$(OUTPUT)util/expr-flex.c $(OUTPUT)util/expr-flex.h: util/expr.l $(OUTPUT)util/expr-bison.c
@@ -279,28 +277,58 @@ $(OUTPUT)util/bpf-filter-bison.c $(OUTPUT)util/bpf-filter-bison.h: util/bpf-filt
$(Q)$(call echo-cmd,bison)$(BISON) -v $< -d $(PARSER_DEBUG_BISON) $(BISON_FILE_PREFIX_MAP) \
-o $(OUTPUT)util/bpf-filter-bison.c -p perf_bpf_filter_
-FLEX_GE_26 := $(shell expr $(shell $(FLEX) --version | sed -e 's/flex \([0-9]\+\).\([0-9]\+\)/\1\2/g') \>\= 26)
-ifeq ($(FLEX_GE_26),1)
- flex_flags := -Wno-switch-enum -Wno-switch-default -Wno-unused-function -Wno-redundant-decls -Wno-sign-compare -Wno-unused-parameter -Wno-missing-prototypes -Wno-missing-declarations
- CC_HASNT_MISLEADING_INDENTATION := $(shell echo "int main(void) { return 0 }" | $(CC) -Werror -Wno-misleading-indentation -o /dev/null -xc - 2>&1 | grep -q -- -Wno-misleading-indentation ; echo $$?)
- ifeq ($(CC_HASNT_MISLEADING_INDENTATION), 1)
- flex_flags += -Wno-misleading-indentation
+FLEX_VERSION := $(shell $(FLEX) --version | cut -d' ' -f2)
+
+FLEX_GE_260 := $(call version-ge3,$(FLEX_VERSION),2.6.0)
+ifeq ($(FLEX_GE_260),1)
+ flex_flags := -Wno-redundant-decls -Wno-switch-default -Wno-unused-function -Wno-misleading-indentation
+
+ # Some newer clang and gcc version complain about this
+ # util/parse-events-bison.c:1317:9: error: variable 'parse_events_nerrs' set but not used [-Werror,-Wunused-but-set-variable]
+ # int yynerrs = 0;
+
+ flex_flags += -Wno-unused-but-set-variable
+
+ FLEX_LT_262 := $(call version-lt3,$(FLEX_VERSION),2.6.2)
+ ifeq ($(FLEX_LT_262),1)
+ flex_flags += -Wno-sign-compare
endif
else
flex_flags := -w
endif
-CFLAGS_parse-events-flex.o += $(flex_flags)
-CFLAGS_pmu-flex.o += $(flex_flags)
-CFLAGS_expr-flex.o += $(flex_flags)
-CFLAGS_bpf-filter-flex.o += $(flex_flags)
-bison_flags := -DYYENABLE_NLS=0
-BISON_GE_35 := $(shell expr $(shell $(BISON) --version | grep bison | sed -e 's/.\+ \([0-9]\+\).\([0-9]\+\)/\1\2/g') \>\= 35)
-ifeq ($(BISON_GE_35),1)
- bison_flags += -Wno-unused-parameter -Wno-nested-externs -Wno-implicit-function-declaration -Wno-switch-enum -Wno-unused-but-set-variable -Wno-unknown-warning-option
+# Some newer clang and gcc version complain about this
+# util/parse-events-bison.c:1317:9: error: variable 'parse_events_nerrs' set but not used [-Werror,-Wunused-but-set-variable]
+# int yynerrs = 0;
+
+bison_flags := -DYYENABLE_NLS=0 -Wno-unused-but-set-variable
+
+# Old clangs don't grok -Wno-unused-but-set-variable, remove it
+ifeq ($(CC_NO_CLANG), 0)
+ CLANG_VERSION := $(shell $(CLANG) --version | head -1 | sed 's/.*clang version \([[:digit:]]\+.[[:digit:]]\+.[[:digit:]]\+\).*/\1/g')
+ ifeq ($(call version-lt3,$(CLANG_VERSION),13.0.0),1)
+ bison_flags := $(subst -Wno-unused-but-set-variable,,$(bison_flags))
+ flex_flags := $(subst -Wno-unused-but-set-variable,,$(flex_flags))
+ endif
+endif
+
+BISON_GE_382 := $(shell expr $(shell $(BISON) --version | grep bison | sed -e 's/.\+ \([0-9]\+\).\([0-9]\+\).\([0-9]\+\)/\1\2\3/g') \>\= 382)
+ifeq ($(BISON_GE_382),1)
+ bison_flags += -Wno-switch-enum
else
bison_flags += -w
endif
+
+BISON_LT_381 := $(shell expr $(shell $(BISON) --version | grep bison | sed -e 's/.\+ \([0-9]\+\).\([0-9]\+\).\([0-9]\+\)/\1\2\3/g') \< 381)
+ifeq ($(BISON_LT_381),1)
+ bison_flags += -DYYNOMEM=YYABORT
+endif
+
+CFLAGS_parse-events-flex.o += $(flex_flags) -Wno-unused-label
+CFLAGS_pmu-flex.o += $(flex_flags)
+CFLAGS_expr-flex.o += $(flex_flags)
+CFLAGS_bpf-filter-flex.o += $(flex_flags)
+
CFLAGS_parse-events-bison.o += $(bison_flags)
CFLAGS_pmu-bison.o += -DYYLTYPE_IS_TRIVIAL=0 $(bison_flags)
CFLAGS_expr-bison.o += -DYYLTYPE_IS_TRIVIAL=0 $(bison_flags)
@@ -316,8 +344,6 @@ CFLAGS_find_bit.o += -Wno-unused-parameter -DETC_PERFCONFIG="BUILD_STR($(ET
CFLAGS_rbtree.o += -Wno-unused-parameter -DETC_PERFCONFIG="BUILD_STR($(ETC_PERFCONFIG_SQ))"
CFLAGS_libstring.o += -Wno-unused-parameter -DETC_PERFCONFIG="BUILD_STR($(ETC_PERFCONFIG_SQ))"
CFLAGS_hweight.o += -Wno-unused-parameter -DETC_PERFCONFIG="BUILD_STR($(ETC_PERFCONFIG_SQ))"
-CFLAGS_parse-events.o += -Wno-redundant-decls
-CFLAGS_expr.o += -Wno-redundant-decls
CFLAGS_header.o += -include $(OUTPUT)PERF-VERSION-FILE
CFLAGS_arm-spe.o += -I$(srctree)/tools/arch/arm64/include/
diff --git a/tools/perf/util/amd-sample-raw.c b/tools/perf/util/amd-sample-raw.c
index 6a6ddba76c75..9d0ce88e90e4 100644
--- a/tools/perf/util/amd-sample-raw.c
+++ b/tools/perf/util/amd-sample-raw.c
@@ -15,7 +15,6 @@
#include "session.h"
#include "evlist.h"
#include "sample-raw.h"
-#include "pmu-events/pmu-events.h"
#include "util/sample.h"
static u32 cpu_family, cpu_model, ibs_fetch_type, ibs_op_type;
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index ba988a13dacb..82956adf9963 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -1846,8 +1846,11 @@ static int symbol__disassemble_bpf(struct symbol *sym,
perf_exe(tpath, sizeof(tpath));
bfdf = bfd_openr(tpath, NULL);
- assert(bfdf);
- assert(bfd_check_format(bfdf, bfd_object));
+ if (bfdf == NULL)
+ abort();
+
+ if (!bfd_check_format(bfdf, bfd_object))
+ abort();
s = open_memstream(&buf, &buf_size);
if (!s) {
@@ -1895,7 +1898,8 @@ static int symbol__disassemble_bpf(struct symbol *sym,
#else
disassemble = disassembler(bfdf);
#endif
- assert(disassemble);
+ if (disassemble == NULL)
+ abort();
fflush(s);
do {
diff --git a/tools/perf/util/bpf-filter.c b/tools/perf/util/bpf-filter.c
index 0b30688d78a7..b51544996046 100644
--- a/tools/perf/util/bpf-filter.c
+++ b/tools/perf/util/bpf-filter.c
@@ -9,8 +9,8 @@
#include "util/evsel.h"
#include "util/bpf-filter.h"
-#include "util/bpf-filter-flex.h"
-#include "util/bpf-filter-bison.h"
+#include <util/bpf-filter-flex.h>
+#include <util/bpf-filter-bison.h>
#include "bpf_skel/sample-filter.h"
#include "bpf_skel/sample_filter.skel.h"
@@ -62,6 +62,16 @@ static int check_sample_flags(struct evsel *evsel, struct perf_bpf_filter_expr *
if (evsel->core.attr.sample_type & expr->sample_flags)
return 0;
+ if (expr->op == PBF_OP_GROUP_BEGIN) {
+ struct perf_bpf_filter_expr *group;
+
+ list_for_each_entry(group, &expr->groups, list) {
+ if (check_sample_flags(evsel, group) < 0)
+ return -1;
+ }
+ return 0;
+ }
+
info = get_sample_info(expr->sample_flags);
if (info == NULL) {
pr_err("Error: %s event does not have sample flags %lx\n",
diff --git a/tools/perf/util/bpf-filter.y b/tools/perf/util/bpf-filter.y
index 07d6c7926c13..5dfa948fc986 100644
--- a/tools/perf/util/bpf-filter.y
+++ b/tools/perf/util/bpf-filter.y
@@ -9,6 +9,8 @@
#include <linux/list.h>
#include "bpf-filter.h"
+int perf_bpf_filter_lex(void);
+
static void perf_bpf_filter_error(struct list_head *expr __maybe_unused,
char const *msg)
{
diff --git a/tools/perf/util/bpf-loader.c b/tools/perf/util/bpf-loader.c
deleted file mode 100644
index 44cde27d6389..000000000000
--- a/tools/perf/util/bpf-loader.c
+++ /dev/null
@@ -1,2110 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * bpf-loader.c
- *
- * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
- * Copyright (C) 2015 Huawei Inc.
- */
-
-#include <linux/bpf.h>
-#include <bpf/libbpf.h>
-#include <bpf/bpf.h>
-#include <linux/filter.h>
-#include <linux/err.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/zalloc.h>
-#include <errno.h>
-#include <stdlib.h>
-#include "debug.h"
-#include "evlist.h"
-#include "bpf-loader.h"
-#include "bpf-prologue.h"
-#include "probe-event.h"
-#include "probe-finder.h" // for MAX_PROBES
-#include "parse-events.h"
-#include "strfilter.h"
-#include "util.h"
-#include "llvm-utils.h"
-#include "c++/clang-c.h"
-#include "util/hashmap.h"
-#include "asm/bug.h"
-
-#include <internal/xyarray.h>
-
-/* temporarily disable libbpf deprecation warnings */
-#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
-
-static int libbpf_perf_print(enum libbpf_print_level level __attribute__((unused)),
- const char *fmt, va_list args)
-{
- return veprintf(1, verbose, pr_fmt(fmt), args);
-}
-
-struct bpf_prog_priv {
- bool is_tp;
- char *sys_name;
- char *evt_name;
- struct perf_probe_event pev;
- bool need_prologue;
- struct bpf_insn *insns_buf;
- int nr_types;
- int *type_mapping;
- int *prologue_fds;
-};
-
-struct bpf_perf_object {
- struct list_head list;
- struct bpf_object *obj;
-};
-
-struct bpf_preproc_result {
- struct bpf_insn *new_insn_ptr;
- int new_insn_cnt;
-};
-
-static LIST_HEAD(bpf_objects_list);
-static struct hashmap *bpf_program_hash;
-static struct hashmap *bpf_map_hash;
-
-static struct bpf_perf_object *
-bpf_perf_object__next(struct bpf_perf_object *prev)
-{
- if (!prev) {
- if (list_empty(&bpf_objects_list))
- return NULL;
-
- return list_first_entry(&bpf_objects_list, struct bpf_perf_object, list);
- }
- if (list_is_last(&prev->list, &bpf_objects_list))
- return NULL;
-
- return list_next_entry(prev, list);
-}
-
-#define bpf_perf_object__for_each(perf_obj, tmp) \
- for ((perf_obj) = bpf_perf_object__next(NULL), \
- (tmp) = bpf_perf_object__next(perf_obj); \
- (perf_obj) != NULL; \
- (perf_obj) = (tmp), (tmp) = bpf_perf_object__next(tmp))
-
-static bool libbpf_initialized;
-static int libbpf_sec_handler;
-
-static int bpf_perf_object__add(struct bpf_object *obj)
-{
- struct bpf_perf_object *perf_obj = zalloc(sizeof(*perf_obj));
-
- if (perf_obj) {
- INIT_LIST_HEAD(&perf_obj->list);
- perf_obj->obj = obj;
- list_add_tail(&perf_obj->list, &bpf_objects_list);
- }
- return perf_obj ? 0 : -ENOMEM;
-}
-
-static void *program_priv(const struct bpf_program *prog)
-{
- void *priv;
-
- if (IS_ERR_OR_NULL(bpf_program_hash))
- return NULL;
- if (!hashmap__find(bpf_program_hash, prog, &priv))
- return NULL;
- return priv;
-}
-
-static struct bpf_insn prologue_init_insn[] = {
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_MOV64_IMM(BPF_REG_5, 0),
-};
-
-static int libbpf_prog_prepare_load_fn(struct bpf_program *prog,
- struct bpf_prog_load_opts *opts __maybe_unused,
- long cookie __maybe_unused)
-{
- size_t init_size_cnt = ARRAY_SIZE(prologue_init_insn);
- size_t orig_insn_cnt, insn_cnt, init_size, orig_size;
- struct bpf_prog_priv *priv = program_priv(prog);
- const struct bpf_insn *orig_insn;
- struct bpf_insn *insn;
-
- if (IS_ERR_OR_NULL(priv)) {
- pr_debug("bpf: failed to get private field\n");
- return -BPF_LOADER_ERRNO__INTERNAL;
- }
-
- if (!priv->need_prologue)
- return 0;
-
- /* prepend initialization code to program instructions */
- orig_insn = bpf_program__insns(prog);
- orig_insn_cnt = bpf_program__insn_cnt(prog);
- init_size = init_size_cnt * sizeof(*insn);
- orig_size = orig_insn_cnt * sizeof(*insn);
-
- insn_cnt = orig_insn_cnt + init_size_cnt;
- insn = malloc(insn_cnt * sizeof(*insn));
- if (!insn)
- return -ENOMEM;
-
- memcpy(insn, prologue_init_insn, init_size);
- memcpy((char *) insn + init_size, orig_insn, orig_size);
- bpf_program__set_insns(prog, insn, insn_cnt);
- return 0;
-}
-
-static int libbpf_init(void)
-{
- LIBBPF_OPTS(libbpf_prog_handler_opts, handler_opts,
- .prog_prepare_load_fn = libbpf_prog_prepare_load_fn,
- );
-
- if (libbpf_initialized)
- return 0;
-
- libbpf_set_print(libbpf_perf_print);
- libbpf_sec_handler = libbpf_register_prog_handler(NULL, BPF_PROG_TYPE_KPROBE,
- 0, &handler_opts);
- if (libbpf_sec_handler < 0) {
- pr_debug("bpf: failed to register libbpf section handler: %d\n",
- libbpf_sec_handler);
- return -BPF_LOADER_ERRNO__INTERNAL;
- }
- libbpf_initialized = true;
- return 0;
-}
-
-struct bpf_object *
-bpf__prepare_load_buffer(void *obj_buf, size_t obj_buf_sz, const char *name)
-{
- LIBBPF_OPTS(bpf_object_open_opts, opts, .object_name = name);
- struct bpf_object *obj;
- int err;
-
- err = libbpf_init();
- if (err)
- return ERR_PTR(err);
-
- obj = bpf_object__open_mem(obj_buf, obj_buf_sz, &opts);
- if (IS_ERR_OR_NULL(obj)) {
- pr_debug("bpf: failed to load buffer\n");
- return ERR_PTR(-EINVAL);
- }
-
- if (bpf_perf_object__add(obj)) {
- bpf_object__close(obj);
- return ERR_PTR(-ENOMEM);
- }
-
- return obj;
-}
-
-static void bpf_perf_object__close(struct bpf_perf_object *perf_obj)
-{
- list_del(&perf_obj->list);
- bpf_object__close(perf_obj->obj);
- free(perf_obj);
-}
-
-struct bpf_object *bpf__prepare_load(const char *filename, bool source)
-{
- LIBBPF_OPTS(bpf_object_open_opts, opts, .object_name = filename);
- struct bpf_object *obj;
- int err;
-
- err = libbpf_init();
- if (err)
- return ERR_PTR(err);
-
- if (source) {
- void *obj_buf;
- size_t obj_buf_sz;
-
- perf_clang__init();
- err = perf_clang__compile_bpf(filename, &obj_buf, &obj_buf_sz);
- perf_clang__cleanup();
- if (err) {
- pr_debug("bpf: builtin compilation failed: %d, try external compiler\n", err);
- err = llvm__compile_bpf(filename, &obj_buf, &obj_buf_sz);
- if (err)
- return ERR_PTR(-BPF_LOADER_ERRNO__COMPILE);
- } else
- pr_debug("bpf: successful builtin compilation\n");
- obj = bpf_object__open_mem(obj_buf, obj_buf_sz, &opts);
-
- if (!IS_ERR_OR_NULL(obj) && llvm_param.dump_obj)
- llvm__dump_obj(filename, obj_buf, obj_buf_sz);
-
- free(obj_buf);
- } else {
- obj = bpf_object__open(filename);
- }
-
- if (IS_ERR_OR_NULL(obj)) {
- pr_debug("bpf: failed to load %s\n", filename);
- return obj;
- }
-
- if (bpf_perf_object__add(obj)) {
- bpf_object__close(obj);
- return ERR_PTR(-BPF_LOADER_ERRNO__COMPILE);
- }
-
- return obj;
-}
-
-static void close_prologue_programs(struct bpf_prog_priv *priv)
-{
- struct perf_probe_event *pev;
- int i, fd;
-
- if (!priv->need_prologue)
- return;
- pev = &priv->pev;
- for (i = 0; i < pev->ntevs; i++) {
- fd = priv->prologue_fds[i];
- if (fd != -1)
- close(fd);
- }
-}
-
-static void
-clear_prog_priv(const struct bpf_program *prog __maybe_unused,
- void *_priv)
-{
- struct bpf_prog_priv *priv = _priv;
-
- close_prologue_programs(priv);
- cleanup_perf_probe_events(&priv->pev, 1);
- zfree(&priv->insns_buf);
- zfree(&priv->prologue_fds);
- zfree(&priv->type_mapping);
- zfree(&priv->sys_name);
- zfree(&priv->evt_name);
- free(priv);
-}
-
-static void bpf_program_hash_free(void)
-{
- struct hashmap_entry *cur;
- size_t bkt;
-
- if (IS_ERR_OR_NULL(bpf_program_hash))
- return;
-
- hashmap__for_each_entry(bpf_program_hash, cur, bkt)
- clear_prog_priv(cur->pkey, cur->pvalue);
-
- hashmap__free(bpf_program_hash);
- bpf_program_hash = NULL;
-}
-
-static void bpf_map_hash_free(void);
-
-void bpf__clear(void)
-{
- struct bpf_perf_object *perf_obj, *tmp;
-
- bpf_perf_object__for_each(perf_obj, tmp) {
- bpf__unprobe(perf_obj->obj);
- bpf_perf_object__close(perf_obj);
- }
-
- bpf_program_hash_free();
- bpf_map_hash_free();
-}
-
-static size_t ptr_hash(const long __key, void *ctx __maybe_unused)
-{
- return __key;
-}
-
-static bool ptr_equal(long key1, long key2, void *ctx __maybe_unused)
-{
- return key1 == key2;
-}
-
-static int program_set_priv(struct bpf_program *prog, void *priv)
-{
- void *old_priv;
-
- /*
- * Should not happen, we warn about it in the
- * caller function - config_bpf_program
- */
- if (IS_ERR(bpf_program_hash))
- return PTR_ERR(bpf_program_hash);
-
- if (!bpf_program_hash) {
- bpf_program_hash = hashmap__new(ptr_hash, ptr_equal, NULL);
- if (IS_ERR(bpf_program_hash))
- return PTR_ERR(bpf_program_hash);
- }
-
- old_priv = program_priv(prog);
- if (old_priv) {
- clear_prog_priv(prog, old_priv);
- return hashmap__set(bpf_program_hash, prog, priv, NULL, NULL);
- }
- return hashmap__add(bpf_program_hash, prog, priv);
-}
-
-static int
-prog_config__exec(const char *value, struct perf_probe_event *pev)
-{
- pev->uprobes = true;
- pev->target = strdup(value);
- if (!pev->target)
- return -ENOMEM;
- return 0;
-}
-
-static int
-prog_config__module(const char *value, struct perf_probe_event *pev)
-{
- pev->uprobes = false;
- pev->target = strdup(value);
- if (!pev->target)
- return -ENOMEM;
- return 0;
-}
-
-static int
-prog_config__bool(const char *value, bool *pbool, bool invert)
-{
- int err;
- bool bool_value;
-
- if (!pbool)
- return -EINVAL;
-
- err = strtobool(value, &bool_value);
- if (err)
- return err;
-
- *pbool = invert ? !bool_value : bool_value;
- return 0;
-}
-
-static int
-prog_config__inlines(const char *value,
- struct perf_probe_event *pev __maybe_unused)
-{
- return prog_config__bool(value, &probe_conf.no_inlines, true);
-}
-
-static int
-prog_config__force(const char *value,
- struct perf_probe_event *pev __maybe_unused)
-{
- return prog_config__bool(value, &probe_conf.force_add, false);
-}
-
-static struct {
- const char *key;
- const char *usage;
- const char *desc;
- int (*func)(const char *, struct perf_probe_event *);
-} bpf_prog_config_terms[] = {
- {
- .key = "exec",
- .usage = "exec=<full path of file>",
- .desc = "Set uprobe target",
- .func = prog_config__exec,
- },
- {
- .key = "module",
- .usage = "module=<module name> ",
- .desc = "Set kprobe module",
- .func = prog_config__module,
- },
- {
- .key = "inlines",
- .usage = "inlines=[yes|no] ",
- .desc = "Probe at inline symbol",
- .func = prog_config__inlines,
- },
- {
- .key = "force",
- .usage = "force=[yes|no] ",
- .desc = "Forcibly add events with existing name",
- .func = prog_config__force,
- },
-};
-
-static int
-do_prog_config(const char *key, const char *value,
- struct perf_probe_event *pev)
-{
- unsigned int i;
-
- pr_debug("config bpf program: %s=%s\n", key, value);
- for (i = 0; i < ARRAY_SIZE(bpf_prog_config_terms); i++)
- if (strcmp(key, bpf_prog_config_terms[i].key) == 0)
- return bpf_prog_config_terms[i].func(value, pev);
-
- pr_debug("BPF: ERROR: invalid program config option: %s=%s\n",
- key, value);
-
- pr_debug("\nHint: Valid options are:\n");
- for (i = 0; i < ARRAY_SIZE(bpf_prog_config_terms); i++)
- pr_debug("\t%s:\t%s\n", bpf_prog_config_terms[i].usage,
- bpf_prog_config_terms[i].desc);
- pr_debug("\n");
-
- return -BPF_LOADER_ERRNO__PROGCONF_TERM;
-}
-
-static const char *
-parse_prog_config_kvpair(const char *config_str, struct perf_probe_event *pev)
-{
- char *text = strdup(config_str);
- char *sep, *line;
- const char *main_str = NULL;
- int err = 0;
-
- if (!text) {
- pr_debug("Not enough memory: dup config_str failed\n");
- return ERR_PTR(-ENOMEM);
- }
-
- line = text;
- while ((sep = strchr(line, ';'))) {
- char *equ;
-
- *sep = '\0';
- equ = strchr(line, '=');
- if (!equ) {
- pr_warning("WARNING: invalid config in BPF object: %s\n",
- line);
- pr_warning("\tShould be 'key=value'.\n");
- goto nextline;
- }
- *equ = '\0';
-
- err = do_prog_config(line, equ + 1, pev);
- if (err)
- break;
-nextline:
- line = sep + 1;
- }
-
- if (!err)
- main_str = config_str + (line - text);
- free(text);
-
- return err ? ERR_PTR(err) : main_str;
-}
-
-static int
-parse_prog_config(const char *config_str, const char **p_main_str,
- bool *is_tp, struct perf_probe_event *pev)
-{
- int err;
- const char *main_str = parse_prog_config_kvpair(config_str, pev);
-
- if (IS_ERR(main_str))
- return PTR_ERR(main_str);
-
- *p_main_str = main_str;
- if (!strchr(main_str, '=')) {
- /* Is a tracepoint event? */
- const char *s = strchr(main_str, ':');
-
- if (!s) {
- pr_debug("bpf: '%s' is not a valid tracepoint\n",
- config_str);
- return -BPF_LOADER_ERRNO__CONFIG;
- }
-
- *is_tp = true;
- return 0;
- }
-
- *is_tp = false;
- err = parse_perf_probe_command(main_str, pev);
- if (err < 0) {
- pr_debug("bpf: '%s' is not a valid config string\n",
- config_str);
- /* parse failed, don't need clear pev. */
- return -BPF_LOADER_ERRNO__CONFIG;
- }
- return 0;
-}
-
-static int
-config_bpf_program(struct bpf_program *prog)
-{
- struct perf_probe_event *pev = NULL;
- struct bpf_prog_priv *priv = NULL;
- const char *config_str, *main_str;
- bool is_tp = false;
- int err;
-
- /* Initialize per-program probing setting */
- probe_conf.no_inlines = false;
- probe_conf.force_add = false;
-
- priv = calloc(sizeof(*priv), 1);
- if (!priv) {
- pr_debug("bpf: failed to alloc priv\n");
- return -ENOMEM;
- }
- pev = &priv->pev;
-
- config_str = bpf_program__section_name(prog);
- pr_debug("bpf: config program '%s'\n", config_str);
- err = parse_prog_config(config_str, &main_str, &is_tp, pev);
- if (err)
- goto errout;
-
- if (is_tp) {
- char *s = strchr(main_str, ':');
-
- priv->is_tp = true;
- priv->sys_name = strndup(main_str, s - main_str);
- priv->evt_name = strdup(s + 1);
- goto set_priv;
- }
-
- if (pev->group && strcmp(pev->group, PERF_BPF_PROBE_GROUP)) {
- pr_debug("bpf: '%s': group for event is set and not '%s'.\n",
- config_str, PERF_BPF_PROBE_GROUP);
- err = -BPF_LOADER_ERRNO__GROUP;
- goto errout;
- } else if (!pev->group)
- pev->group = strdup(PERF_BPF_PROBE_GROUP);
-
- if (!pev->group) {
- pr_debug("bpf: strdup failed\n");
- err = -ENOMEM;
- goto errout;
- }
-
- if (!pev->event) {
- pr_debug("bpf: '%s': event name is missing. Section name should be 'key=value'\n",
- config_str);
- err = -BPF_LOADER_ERRNO__EVENTNAME;
- goto errout;
- }
- pr_debug("bpf: config '%s' is ok\n", config_str);
-
-set_priv:
- err = program_set_priv(prog, priv);
- if (err) {
- pr_debug("Failed to set priv for program '%s'\n", config_str);
- goto errout;
- }
-
- return 0;
-
-errout:
- if (pev)
- clear_perf_probe_event(pev);
- free(priv);
- return err;
-}
-
-static int bpf__prepare_probe(void)
-{
- static int err = 0;
- static bool initialized = false;
-
- /*
- * Make err static, so if init failed the first, bpf__prepare_probe()
- * fails each time without calling init_probe_symbol_maps multiple
- * times.
- */
- if (initialized)
- return err;
-
- initialized = true;
- err = init_probe_symbol_maps(false);
- if (err < 0)
- pr_debug("Failed to init_probe_symbol_maps\n");
- probe_conf.max_probes = MAX_PROBES;
- return err;
-}
-
-static int
-preproc_gen_prologue(struct bpf_program *prog, int n,
- const struct bpf_insn *orig_insns, int orig_insns_cnt,
- struct bpf_preproc_result *res)
-{
- struct bpf_prog_priv *priv = program_priv(prog);
- struct probe_trace_event *tev;
- struct perf_probe_event *pev;
- struct bpf_insn *buf;
- size_t prologue_cnt = 0;
- int i, err;
-
- if (IS_ERR_OR_NULL(priv) || priv->is_tp)
- goto errout;
-
- pev = &priv->pev;
-
- if (n < 0 || n >= priv->nr_types)
- goto errout;
-
- /* Find a tev belongs to that type */
- for (i = 0; i < pev->ntevs; i++) {
- if (priv->type_mapping[i] == n)
- break;
- }
-
- if (i >= pev->ntevs) {
- pr_debug("Internal error: prologue type %d not found\n", n);
- return -BPF_LOADER_ERRNO__PROLOGUE;
- }
-
- tev = &pev->tevs[i];
-
- buf = priv->insns_buf;
- err = bpf__gen_prologue(tev->args, tev->nargs,
- buf, &prologue_cnt,
- BPF_MAXINSNS - orig_insns_cnt);
- if (err) {
- const char *title;
-
- title = bpf_program__section_name(prog);
- pr_debug("Failed to generate prologue for program %s\n",
- title);
- return err;
- }
-
- memcpy(&buf[prologue_cnt], orig_insns,
- sizeof(struct bpf_insn) * orig_insns_cnt);
-
- res->new_insn_ptr = buf;
- res->new_insn_cnt = prologue_cnt + orig_insns_cnt;
- return 0;
-
-errout:
- pr_debug("Internal error in preproc_gen_prologue\n");
- return -BPF_LOADER_ERRNO__PROLOGUE;
-}
-
-/*
- * compare_tev_args is reflexive, transitive and antisymmetric.
- * I can proof it but this margin is too narrow to contain.
- */
-static int compare_tev_args(const void *ptev1, const void *ptev2)
-{
- int i, ret;
- const struct probe_trace_event *tev1 =
- *(const struct probe_trace_event **)ptev1;
- const struct probe_trace_event *tev2 =
- *(const struct probe_trace_event **)ptev2;
-
- ret = tev2->nargs - tev1->nargs;
- if (ret)
- return ret;
-
- for (i = 0; i < tev1->nargs; i++) {
- struct probe_trace_arg *arg1, *arg2;
- struct probe_trace_arg_ref *ref1, *ref2;
-
- arg1 = &tev1->args[i];
- arg2 = &tev2->args[i];
-
- ret = strcmp(arg1->value, arg2->value);
- if (ret)
- return ret;
-
- ref1 = arg1->ref;
- ref2 = arg2->ref;
-
- while (ref1 && ref2) {
- ret = ref2->offset - ref1->offset;
- if (ret)
- return ret;
-
- ref1 = ref1->next;
- ref2 = ref2->next;
- }
-
- if (ref1 || ref2)
- return ref2 ? 1 : -1;
- }
-
- return 0;
-}
-
-/*
- * Assign a type number to each tevs in a pev.
- * mapping is an array with same slots as tevs in that pev.
- * nr_types will be set to number of types.
- */
-static int map_prologue(struct perf_probe_event *pev, int *mapping,
- int *nr_types)
-{
- int i, type = 0;
- struct probe_trace_event **ptevs;
-
- size_t array_sz = sizeof(*ptevs) * pev->ntevs;
-
- ptevs = malloc(array_sz);
- if (!ptevs) {
- pr_debug("Not enough memory: alloc ptevs failed\n");
- return -ENOMEM;
- }
-
- pr_debug("In map_prologue, ntevs=%d\n", pev->ntevs);
- for (i = 0; i < pev->ntevs; i++)
- ptevs[i] = &pev->tevs[i];
-
- qsort(ptevs, pev->ntevs, sizeof(*ptevs),
- compare_tev_args);
-
- for (i = 0; i < pev->ntevs; i++) {
- int n;
-
- n = ptevs[i] - pev->tevs;
- if (i == 0) {
- mapping[n] = type;
- pr_debug("mapping[%d]=%d\n", n, type);
- continue;
- }
-
- if (compare_tev_args(ptevs + i, ptevs + i - 1) == 0)
- mapping[n] = type;
- else
- mapping[n] = ++type;
-
- pr_debug("mapping[%d]=%d\n", n, mapping[n]);
- }
- free(ptevs);
- *nr_types = type + 1;
-
- return 0;
-}
-
-static int hook_load_preprocessor(struct bpf_program *prog)
-{
- struct bpf_prog_priv *priv = program_priv(prog);
- struct perf_probe_event *pev;
- bool need_prologue = false;
- int i;
-
- if (IS_ERR_OR_NULL(priv)) {
- pr_debug("Internal error when hook preprocessor\n");
- return -BPF_LOADER_ERRNO__INTERNAL;
- }
-
- if (priv->is_tp) {
- priv->need_prologue = false;
- return 0;
- }
-
- pev = &priv->pev;
- for (i = 0; i < pev->ntevs; i++) {
- struct probe_trace_event *tev = &pev->tevs[i];
-
- if (tev->nargs > 0) {
- need_prologue = true;
- break;
- }
- }
-
- /*
- * Since all tevs don't have argument, we don't need generate
- * prologue.
- */
- if (!need_prologue) {
- priv->need_prologue = false;
- return 0;
- }
-
- priv->need_prologue = true;
- priv->insns_buf = malloc(sizeof(struct bpf_insn) * BPF_MAXINSNS);
- if (!priv->insns_buf) {
- pr_debug("Not enough memory: alloc insns_buf failed\n");
- return -ENOMEM;
- }
-
- priv->prologue_fds = malloc(sizeof(int) * pev->ntevs);
- if (!priv->prologue_fds) {
- pr_debug("Not enough memory: alloc prologue fds failed\n");
- return -ENOMEM;
- }
- memset(priv->prologue_fds, -1, sizeof(int) * pev->ntevs);
-
- priv->type_mapping = malloc(sizeof(int) * pev->ntevs);
- if (!priv->type_mapping) {
- pr_debug("Not enough memory: alloc type_mapping failed\n");
- return -ENOMEM;
- }
- memset(priv->type_mapping, -1,
- sizeof(int) * pev->ntevs);
-
- return map_prologue(pev, priv->type_mapping, &priv->nr_types);
-}
-
-int bpf__probe(struct bpf_object *obj)
-{
- int err = 0;
- struct bpf_program *prog;
- struct bpf_prog_priv *priv;
- struct perf_probe_event *pev;
-
- err = bpf__prepare_probe();
- if (err) {
- pr_debug("bpf__prepare_probe failed\n");
- return err;
- }
-
- bpf_object__for_each_program(prog, obj) {
- err = config_bpf_program(prog);
- if (err)
- goto out;
-
- priv = program_priv(prog);
- if (IS_ERR_OR_NULL(priv)) {
- if (!priv)
- err = -BPF_LOADER_ERRNO__INTERNAL;
- else
- err = PTR_ERR(priv);
- goto out;
- }
-
- if (priv->is_tp) {
- bpf_program__set_type(prog, BPF_PROG_TYPE_TRACEPOINT);
- continue;
- }
-
- bpf_program__set_type(prog, BPF_PROG_TYPE_KPROBE);
- pev = &priv->pev;
-
- err = convert_perf_probe_events(pev, 1);
- if (err < 0) {
- pr_debug("bpf_probe: failed to convert perf probe events\n");
- goto out;
- }
-
- err = apply_perf_probe_events(pev, 1);
- if (err < 0) {
- pr_debug("bpf_probe: failed to apply perf probe events\n");
- goto out;
- }
-
- /*
- * After probing, let's consider prologue, which
- * adds program fetcher to BPF programs.
- *
- * hook_load_preprocessor() hooks pre-processor
- * to bpf_program, let it generate prologue
- * dynamically during loading.
- */
- err = hook_load_preprocessor(prog);
- if (err)
- goto out;
- }
-out:
- return err < 0 ? err : 0;
-}
-
-#define EVENTS_WRITE_BUFSIZE 4096
-int bpf__unprobe(struct bpf_object *obj)
-{
- int err, ret = 0;
- struct bpf_program *prog;
-
- bpf_object__for_each_program(prog, obj) {
- struct bpf_prog_priv *priv = program_priv(prog);
- int i;
-
- if (IS_ERR_OR_NULL(priv) || priv->is_tp)
- continue;
-
- for (i = 0; i < priv->pev.ntevs; i++) {
- struct probe_trace_event *tev = &priv->pev.tevs[i];
- char name_buf[EVENTS_WRITE_BUFSIZE];
- struct strfilter *delfilter;
-
- snprintf(name_buf, EVENTS_WRITE_BUFSIZE,
- "%s:%s", tev->group, tev->event);
- name_buf[EVENTS_WRITE_BUFSIZE - 1] = '\0';
-
- delfilter = strfilter__new(name_buf, NULL);
- if (!delfilter) {
- pr_debug("Failed to create filter for unprobing\n");
- ret = -ENOMEM;
- continue;
- }
-
- err = del_perf_probe_events(delfilter);
- strfilter__delete(delfilter);
- if (err) {
- pr_debug("Failed to delete %s\n", name_buf);
- ret = err;
- continue;
- }
- }
- }
- return ret;
-}
-
-static int bpf_object__load_prologue(struct bpf_object *obj)
-{
- int init_cnt = ARRAY_SIZE(prologue_init_insn);
- const struct bpf_insn *orig_insns;
- struct bpf_preproc_result res;
- struct perf_probe_event *pev;
- struct bpf_program *prog;
- int orig_insns_cnt;
-
- bpf_object__for_each_program(prog, obj) {
- struct bpf_prog_priv *priv = program_priv(prog);
- int err, i, fd;
-
- if (IS_ERR_OR_NULL(priv)) {
- pr_debug("bpf: failed to get private field\n");
- return -BPF_LOADER_ERRNO__INTERNAL;
- }
-
- if (!priv->need_prologue)
- continue;
-
- /*
- * For each program that needs prologue we do following:
- *
- * - take its current instructions and use them
- * to generate the new code with prologue
- * - load new instructions with bpf_prog_load
- * and keep the fd in prologue_fds
- * - new fd will be used in bpf__foreach_event
- * to connect this program with perf evsel
- */
- orig_insns = bpf_program__insns(prog);
- orig_insns_cnt = bpf_program__insn_cnt(prog);
-
- pev = &priv->pev;
- for (i = 0; i < pev->ntevs; i++) {
- /*
- * Skipping artificall prologue_init_insn instructions
- * (init_cnt), so the prologue can be generated instead
- * of them.
- */
- err = preproc_gen_prologue(prog, i,
- orig_insns + init_cnt,
- orig_insns_cnt - init_cnt,
- &res);
- if (err)
- return err;
-
- fd = bpf_prog_load(bpf_program__get_type(prog),
- bpf_program__name(prog), "GPL",
- res.new_insn_ptr,
- res.new_insn_cnt, NULL);
- if (fd < 0) {
- char bf[128];
-
- libbpf_strerror(-errno, bf, sizeof(bf));
- pr_debug("bpf: load objects with prologue failed: err=%d: (%s)\n",
- -errno, bf);
- return -errno;
- }
- priv->prologue_fds[i] = fd;
- }
- /*
- * We no longer need the original program,
- * we can unload it.
- */
- bpf_program__unload(prog);
- }
- return 0;
-}
-
-int bpf__load(struct bpf_object *obj)
-{
- int err;
-
- err = bpf_object__load(obj);
- if (err) {
- char bf[128];
- libbpf_strerror(err, bf, sizeof(bf));
- pr_debug("bpf: load objects failed: err=%d: (%s)\n", err, bf);
- return err;
- }
- return bpf_object__load_prologue(obj);
-}
-
-int bpf__foreach_event(struct bpf_object *obj,
- bpf_prog_iter_callback_t func,
- void *arg)
-{
- struct bpf_program *prog;
- int err;
-
- bpf_object__for_each_program(prog, obj) {
- struct bpf_prog_priv *priv = program_priv(prog);
- struct probe_trace_event *tev;
- struct perf_probe_event *pev;
- int i, fd;
-
- if (IS_ERR_OR_NULL(priv)) {
- pr_debug("bpf: failed to get private field\n");
- return -BPF_LOADER_ERRNO__INTERNAL;
- }
-
- if (priv->is_tp) {
- fd = bpf_program__fd(prog);
- err = (*func)(priv->sys_name, priv->evt_name, fd, obj, arg);
- if (err) {
- pr_debug("bpf: tracepoint call back failed, stop iterate\n");
- return err;
- }
- continue;
- }
-
- pev = &priv->pev;
- for (i = 0; i < pev->ntevs; i++) {
- tev = &pev->tevs[i];
-
- if (priv->need_prologue)
- fd = priv->prologue_fds[i];
- else
- fd = bpf_program__fd(prog);
-
- if (fd < 0) {
- pr_debug("bpf: failed to get file descriptor\n");
- return fd;
- }
-
- err = (*func)(tev->group, tev->event, fd, obj, arg);
- if (err) {
- pr_debug("bpf: call back failed, stop iterate\n");
- return err;
- }
- }
- }
- return 0;
-}
-
-enum bpf_map_op_type {
- BPF_MAP_OP_SET_VALUE,
- BPF_MAP_OP_SET_EVSEL,
-};
-
-enum bpf_map_key_type {
- BPF_MAP_KEY_ALL,
- BPF_MAP_KEY_RANGES,
-};
-
-struct bpf_map_op {
- struct list_head list;
- enum bpf_map_op_type op_type;
- enum bpf_map_key_type key_type;
- union {
- struct parse_events_array array;
- } k;
- union {
- u64 value;
- struct evsel *evsel;
- } v;
-};
-
-struct bpf_map_priv {
- struct list_head ops_list;
-};
-
-static void
-bpf_map_op__delete(struct bpf_map_op *op)
-{
- if (!list_empty(&op->list))
- list_del_init(&op->list);
- if (op->key_type == BPF_MAP_KEY_RANGES)
- parse_events__clear_array(&op->k.array);
- free(op);
-}
-
-static void
-bpf_map_priv__purge(struct bpf_map_priv *priv)
-{
- struct bpf_map_op *pos, *n;
-
- list_for_each_entry_safe(pos, n, &priv->ops_list, list) {
- list_del_init(&pos->list);
- bpf_map_op__delete(pos);
- }
-}
-
-static void
-bpf_map_priv__clear(const struct bpf_map *map __maybe_unused,
- void *_priv)
-{
- struct bpf_map_priv *priv = _priv;
-
- bpf_map_priv__purge(priv);
- free(priv);
-}
-
-static void *map_priv(const struct bpf_map *map)
-{
- void *priv;
-
- if (IS_ERR_OR_NULL(bpf_map_hash))
- return NULL;
- if (!hashmap__find(bpf_map_hash, map, &priv))
- return NULL;
- return priv;
-}
-
-static void bpf_map_hash_free(void)
-{
- struct hashmap_entry *cur;
- size_t bkt;
-
- if (IS_ERR_OR_NULL(bpf_map_hash))
- return;
-
- hashmap__for_each_entry(bpf_map_hash, cur, bkt)
- bpf_map_priv__clear(cur->pkey, cur->pvalue);
-
- hashmap__free(bpf_map_hash);
- bpf_map_hash = NULL;
-}
-
-static int map_set_priv(struct bpf_map *map, void *priv)
-{
- void *old_priv;
-
- if (WARN_ON_ONCE(IS_ERR(bpf_map_hash)))
- return PTR_ERR(bpf_program_hash);
-
- if (!bpf_map_hash) {
- bpf_map_hash = hashmap__new(ptr_hash, ptr_equal, NULL);
- if (IS_ERR(bpf_map_hash))
- return PTR_ERR(bpf_map_hash);
- }
-
- old_priv = map_priv(map);
- if (old_priv) {
- bpf_map_priv__clear(map, old_priv);
- return hashmap__set(bpf_map_hash, map, priv, NULL, NULL);
- }
- return hashmap__add(bpf_map_hash, map, priv);
-}
-
-static int
-bpf_map_op_setkey(struct bpf_map_op *op, struct parse_events_term *term)
-{
- op->key_type = BPF_MAP_KEY_ALL;
- if (!term)
- return 0;
-
- if (term->array.nr_ranges) {
- size_t memsz = term->array.nr_ranges *
- sizeof(op->k.array.ranges[0]);
-
- op->k.array.ranges = memdup(term->array.ranges, memsz);
- if (!op->k.array.ranges) {
- pr_debug("Not enough memory to alloc indices for map\n");
- return -ENOMEM;
- }
- op->key_type = BPF_MAP_KEY_RANGES;
- op->k.array.nr_ranges = term->array.nr_ranges;
- }
- return 0;
-}
-
-static struct bpf_map_op *
-bpf_map_op__new(struct parse_events_term *term)
-{
- struct bpf_map_op *op;
- int err;
-
- op = zalloc(sizeof(*op));
- if (!op) {
- pr_debug("Failed to alloc bpf_map_op\n");
- return ERR_PTR(-ENOMEM);
- }
- INIT_LIST_HEAD(&op->list);
-
- err = bpf_map_op_setkey(op, term);
- if (err) {
- free(op);
- return ERR_PTR(err);
- }
- return op;
-}
-
-static struct bpf_map_op *
-bpf_map_op__clone(struct bpf_map_op *op)
-{
- struct bpf_map_op *newop;
-
- newop = memdup(op, sizeof(*op));
- if (!newop) {
- pr_debug("Failed to alloc bpf_map_op\n");
- return NULL;
- }
-
- INIT_LIST_HEAD(&newop->list);
- if (op->key_type == BPF_MAP_KEY_RANGES) {
- size_t memsz = op->k.array.nr_ranges *
- sizeof(op->k.array.ranges[0]);
-
- newop->k.array.ranges = memdup(op->k.array.ranges, memsz);
- if (!newop->k.array.ranges) {
- pr_debug("Failed to alloc indices for map\n");
- free(newop);
- return NULL;
- }
- }
-
- return newop;
-}
-
-static struct bpf_map_priv *
-bpf_map_priv__clone(struct bpf_map_priv *priv)
-{
- struct bpf_map_priv *newpriv;
- struct bpf_map_op *pos, *newop;
-
- newpriv = zalloc(sizeof(*newpriv));
- if (!newpriv) {
- pr_debug("Not enough memory to alloc map private\n");
- return NULL;
- }
- INIT_LIST_HEAD(&newpriv->ops_list);
-
- list_for_each_entry(pos, &priv->ops_list, list) {
- newop = bpf_map_op__clone(pos);
- if (!newop) {
- bpf_map_priv__purge(newpriv);
- return NULL;
- }
- list_add_tail(&newop->list, &newpriv->ops_list);
- }
-
- return newpriv;
-}
-
-static int
-bpf_map__add_op(struct bpf_map *map, struct bpf_map_op *op)
-{
- const char *map_name = bpf_map__name(map);
- struct bpf_map_priv *priv = map_priv(map);
-
- if (IS_ERR(priv)) {
- pr_debug("Failed to get private from map %s\n", map_name);
- return PTR_ERR(priv);
- }
-
- if (!priv) {
- priv = zalloc(sizeof(*priv));
- if (!priv) {
- pr_debug("Not enough memory to alloc map private\n");
- return -ENOMEM;
- }
- INIT_LIST_HEAD(&priv->ops_list);
-
- if (map_set_priv(map, priv)) {
- free(priv);
- return -BPF_LOADER_ERRNO__INTERNAL;
- }
- }
-
- list_add_tail(&op->list, &priv->ops_list);
- return 0;
-}
-
-static struct bpf_map_op *
-bpf_map__add_newop(struct bpf_map *map, struct parse_events_term *term)
-{
- struct bpf_map_op *op;
- int err;
-
- op = bpf_map_op__new(term);
- if (IS_ERR(op))
- return op;
-
- err = bpf_map__add_op(map, op);
- if (err) {
- bpf_map_op__delete(op);
- return ERR_PTR(err);
- }
- return op;
-}
-
-static int
-__bpf_map__config_value(struct bpf_map *map,
- struct parse_events_term *term)
-{
- struct bpf_map_op *op;
- const char *map_name = bpf_map__name(map);
-
- if (!map) {
- pr_debug("Map '%s' is invalid\n", map_name);
- return -BPF_LOADER_ERRNO__INTERNAL;
- }
-
- if (bpf_map__type(map) != BPF_MAP_TYPE_ARRAY) {
- pr_debug("Map %s type is not BPF_MAP_TYPE_ARRAY\n",
- map_name);
- return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE;
- }
- if (bpf_map__key_size(map) < sizeof(unsigned int)) {
- pr_debug("Map %s has incorrect key size\n", map_name);
- return -BPF_LOADER_ERRNO__OBJCONF_MAP_KEYSIZE;
- }
- switch (bpf_map__value_size(map)) {
- case 1:
- case 2:
- case 4:
- case 8:
- break;
- default:
- pr_debug("Map %s has incorrect value size\n", map_name);
- return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUESIZE;
- }
-
- op = bpf_map__add_newop(map, term);
- if (IS_ERR(op))
- return PTR_ERR(op);
- op->op_type = BPF_MAP_OP_SET_VALUE;
- op->v.value = term->val.num;
- return 0;
-}
-
-static int
-bpf_map__config_value(struct bpf_map *map,
- struct parse_events_term *term,
- struct evlist *evlist __maybe_unused)
-{
- if (!term->err_val) {
- pr_debug("Config value not set\n");
- return -BPF_LOADER_ERRNO__OBJCONF_CONF;
- }
-
- if (term->type_val != PARSE_EVENTS__TERM_TYPE_NUM) {
- pr_debug("ERROR: wrong value type for 'value'\n");
- return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUE;
- }
-
- return __bpf_map__config_value(map, term);
-}
-
-static int
-__bpf_map__config_event(struct bpf_map *map,
- struct parse_events_term *term,
- struct evlist *evlist)
-{
- struct bpf_map_op *op;
- const char *map_name = bpf_map__name(map);
- struct evsel *evsel = evlist__find_evsel_by_str(evlist, term->val.str);
-
- if (!evsel) {
- pr_debug("Event (for '%s') '%s' doesn't exist\n",
- map_name, term->val.str);
- return -BPF_LOADER_ERRNO__OBJCONF_MAP_NOEVT;
- }
-
- if (!map) {
- pr_debug("Map '%s' is invalid\n", map_name);
- return PTR_ERR(map);
- }
-
- /*
- * No need to check key_size and value_size:
- * kernel has already checked them.
- */
- if (bpf_map__type(map) != BPF_MAP_TYPE_PERF_EVENT_ARRAY) {
- pr_debug("Map %s type is not BPF_MAP_TYPE_PERF_EVENT_ARRAY\n",
- map_name);
- return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE;
- }
-
- op = bpf_map__add_newop(map, term);
- if (IS_ERR(op))
- return PTR_ERR(op);
- op->op_type = BPF_MAP_OP_SET_EVSEL;
- op->v.evsel = evsel;
- return 0;
-}
-
-static int
-bpf_map__config_event(struct bpf_map *map,
- struct parse_events_term *term,
- struct evlist *evlist)
-{
- if (!term->err_val) {
- pr_debug("Config value not set\n");
- return -BPF_LOADER_ERRNO__OBJCONF_CONF;
- }
-
- if (term->type_val != PARSE_EVENTS__TERM_TYPE_STR) {
- pr_debug("ERROR: wrong value type for 'event'\n");
- return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUE;
- }
-
- return __bpf_map__config_event(map, term, evlist);
-}
-
-struct bpf_obj_config__map_func {
- const char *config_opt;
- int (*config_func)(struct bpf_map *, struct parse_events_term *,
- struct evlist *);
-};
-
-struct bpf_obj_config__map_func bpf_obj_config__map_funcs[] = {
- {"value", bpf_map__config_value},
- {"event", bpf_map__config_event},
-};
-
-static int
-config_map_indices_range_check(struct parse_events_term *term,
- struct bpf_map *map,
- const char *map_name)
-{
- struct parse_events_array *array = &term->array;
- unsigned int i;
-
- if (!array->nr_ranges)
- return 0;
- if (!array->ranges) {
- pr_debug("ERROR: map %s: array->nr_ranges is %d but range array is NULL\n",
- map_name, (int)array->nr_ranges);
- return -BPF_LOADER_ERRNO__INTERNAL;
- }
-
- if (!map) {
- pr_debug("Map '%s' is invalid\n", map_name);
- return -BPF_LOADER_ERRNO__INTERNAL;
- }
-
- for (i = 0; i < array->nr_ranges; i++) {
- unsigned int start = array->ranges[i].start;
- size_t length = array->ranges[i].length;
- unsigned int idx = start + length - 1;
-
- if (idx >= bpf_map__max_entries(map)) {
- pr_debug("ERROR: index %d too large\n", idx);
- return -BPF_LOADER_ERRNO__OBJCONF_MAP_IDX2BIG;
- }
- }
- return 0;
-}
-
-static int
-bpf__obj_config_map(struct bpf_object *obj,
- struct parse_events_term *term,
- struct evlist *evlist,
- int *key_scan_pos)
-{
- /* key is "map:<mapname>.<config opt>" */
- char *map_name = strdup(term->config + sizeof("map:") - 1);
- struct bpf_map *map;
- int err = -BPF_LOADER_ERRNO__OBJCONF_OPT;
- char *map_opt;
- size_t i;
-
- if (!map_name)
- return -ENOMEM;
-
- map_opt = strchr(map_name, '.');
- if (!map_opt) {
- pr_debug("ERROR: Invalid map config: %s\n", map_name);
- goto out;
- }
-
- *map_opt++ = '\0';
- if (*map_opt == '\0') {
- pr_debug("ERROR: Invalid map option: %s\n", term->config);
- goto out;
- }
-
- map = bpf_object__find_map_by_name(obj, map_name);
- if (!map) {
- pr_debug("ERROR: Map %s doesn't exist\n", map_name);
- err = -BPF_LOADER_ERRNO__OBJCONF_MAP_NOTEXIST;
- goto out;
- }
-
- *key_scan_pos += strlen(map_opt);
- err = config_map_indices_range_check(term, map, map_name);
- if (err)
- goto out;
- *key_scan_pos -= strlen(map_opt);
-
- for (i = 0; i < ARRAY_SIZE(bpf_obj_config__map_funcs); i++) {
- struct bpf_obj_config__map_func *func =
- &bpf_obj_config__map_funcs[i];
-
- if (strcmp(map_opt, func->config_opt) == 0) {
- err = func->config_func(map, term, evlist);
- goto out;
- }
- }
-
- pr_debug("ERROR: Invalid map config option '%s'\n", map_opt);
- err = -BPF_LOADER_ERRNO__OBJCONF_MAP_OPT;
-out:
- if (!err)
- *key_scan_pos += strlen(map_opt);
-
- free(map_name);
- return err;
-}
-
-int bpf__config_obj(struct bpf_object *obj,
- struct parse_events_term *term,
- struct evlist *evlist,
- int *error_pos)
-{
- int key_scan_pos = 0;
- int err;
-
- if (!obj || !term || !term->config)
- return -EINVAL;
-
- if (strstarts(term->config, "map:")) {
- key_scan_pos = sizeof("map:") - 1;
- err = bpf__obj_config_map(obj, term, evlist, &key_scan_pos);
- goto out;
- }
- err = -BPF_LOADER_ERRNO__OBJCONF_OPT;
-out:
- if (error_pos)
- *error_pos = key_scan_pos;
- return err;
-
-}
-
-typedef int (*map_config_func_t)(const char *name, int map_fd,
- const struct bpf_map *map,
- struct bpf_map_op *op,
- void *pkey, void *arg);
-
-static int
-foreach_key_array_all(map_config_func_t func,
- void *arg, const char *name,
- int map_fd, const struct bpf_map *map,
- struct bpf_map_op *op)
-{
- unsigned int i;
- int err;
-
- for (i = 0; i < bpf_map__max_entries(map); i++) {
- err = func(name, map_fd, map, op, &i, arg);
- if (err) {
- pr_debug("ERROR: failed to insert value to %s[%u]\n",
- name, i);
- return err;
- }
- }
- return 0;
-}
-
-static int
-foreach_key_array_ranges(map_config_func_t func, void *arg,
- const char *name, int map_fd,
- const struct bpf_map *map,
- struct bpf_map_op *op)
-{
- unsigned int i, j;
- int err;
-
- for (i = 0; i < op->k.array.nr_ranges; i++) {
- unsigned int start = op->k.array.ranges[i].start;
- size_t length = op->k.array.ranges[i].length;
-
- for (j = 0; j < length; j++) {
- unsigned int idx = start + j;
-
- err = func(name, map_fd, map, op, &idx, arg);
- if (err) {
- pr_debug("ERROR: failed to insert value to %s[%u]\n",
- name, idx);
- return err;
- }
- }
- }
- return 0;
-}
-
-static int
-bpf_map_config_foreach_key(struct bpf_map *map,
- map_config_func_t func,
- void *arg)
-{
- int err, map_fd, type;
- struct bpf_map_op *op;
- const char *name = bpf_map__name(map);
- struct bpf_map_priv *priv = map_priv(map);
-
- if (IS_ERR(priv)) {
- pr_debug("ERROR: failed to get private from map %s\n", name);
- return -BPF_LOADER_ERRNO__INTERNAL;
- }
- if (!priv || list_empty(&priv->ops_list)) {
- pr_debug("INFO: nothing to config for map %s\n", name);
- return 0;
- }
-
- if (!map) {
- pr_debug("Map '%s' is invalid\n", name);
- return -BPF_LOADER_ERRNO__INTERNAL;
- }
- map_fd = bpf_map__fd(map);
- if (map_fd < 0) {
- pr_debug("ERROR: failed to get fd from map %s\n", name);
- return map_fd;
- }
-
- type = bpf_map__type(map);
- list_for_each_entry(op, &priv->ops_list, list) {
- switch (type) {
- case BPF_MAP_TYPE_ARRAY:
- case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
- switch (op->key_type) {
- case BPF_MAP_KEY_ALL:
- err = foreach_key_array_all(func, arg, name,
- map_fd, map, op);
- break;
- case BPF_MAP_KEY_RANGES:
- err = foreach_key_array_ranges(func, arg, name,
- map_fd, map, op);
- break;
- default:
- pr_debug("ERROR: keytype for map '%s' invalid\n",
- name);
- return -BPF_LOADER_ERRNO__INTERNAL;
- }
- if (err)
- return err;
- break;
- default:
- pr_debug("ERROR: type of '%s' incorrect\n", name);
- return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE;
- }
- }
-
- return 0;
-}
-
-static int
-apply_config_value_for_key(int map_fd, void *pkey,
- size_t val_size, u64 val)
-{
- int err = 0;
-
- switch (val_size) {
- case 1: {
- u8 _val = (u8)(val);
- err = bpf_map_update_elem(map_fd, pkey, &_val, BPF_ANY);
- break;
- }
- case 2: {
- u16 _val = (u16)(val);
- err = bpf_map_update_elem(map_fd, pkey, &_val, BPF_ANY);
- break;
- }
- case 4: {
- u32 _val = (u32)(val);
- err = bpf_map_update_elem(map_fd, pkey, &_val, BPF_ANY);
- break;
- }
- case 8: {
- err = bpf_map_update_elem(map_fd, pkey, &val, BPF_ANY);
- break;
- }
- default:
- pr_debug("ERROR: invalid value size\n");
- return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUESIZE;
- }
- if (err && errno)
- err = -errno;
- return err;
-}
-
-static int
-apply_config_evsel_for_key(const char *name, int map_fd, void *pkey,
- struct evsel *evsel)
-{
- struct xyarray *xy = evsel->core.fd;
- struct perf_event_attr *attr;
- unsigned int key, events;
- bool check_pass = false;
- int *evt_fd;
- int err;
-
- if (!xy) {
- pr_debug("ERROR: evsel not ready for map %s\n", name);
- return -BPF_LOADER_ERRNO__INTERNAL;
- }
-
- if (xy->row_size / xy->entry_size != 1) {
- pr_debug("ERROR: Dimension of target event is incorrect for map %s\n",
- name);
- return -BPF_LOADER_ERRNO__OBJCONF_MAP_EVTDIM;
- }
-
- attr = &evsel->core.attr;
- if (attr->inherit) {
- pr_debug("ERROR: Can't put inherit event into map %s\n", name);
- return -BPF_LOADER_ERRNO__OBJCONF_MAP_EVTINH;
- }
-
- if (evsel__is_bpf_output(evsel))
- check_pass = true;
- if (attr->type == PERF_TYPE_RAW)
- check_pass = true;
- if (attr->type == PERF_TYPE_HARDWARE)
- check_pass = true;
- if (!check_pass) {
- pr_debug("ERROR: Event type is wrong for map %s\n", name);
- return -BPF_LOADER_ERRNO__OBJCONF_MAP_EVTTYPE;
- }
-
- events = xy->entries / (xy->row_size / xy->entry_size);
- key = *((unsigned int *)pkey);
- if (key >= events) {
- pr_debug("ERROR: there is no event %d for map %s\n",
- key, name);
- return -BPF_LOADER_ERRNO__OBJCONF_MAP_MAPSIZE;
- }
- evt_fd = xyarray__entry(xy, key, 0);
- err = bpf_map_update_elem(map_fd, pkey, evt_fd, BPF_ANY);
- if (err && errno)
- err = -errno;
- return err;
-}
-
-static int
-apply_obj_config_map_for_key(const char *name, int map_fd,
- const struct bpf_map *map,
- struct bpf_map_op *op,
- void *pkey, void *arg __maybe_unused)
-{
- int err;
-
- switch (op->op_type) {
- case BPF_MAP_OP_SET_VALUE:
- err = apply_config_value_for_key(map_fd, pkey,
- bpf_map__value_size(map),
- op->v.value);
- break;
- case BPF_MAP_OP_SET_EVSEL:
- err = apply_config_evsel_for_key(name, map_fd, pkey,
- op->v.evsel);
- break;
- default:
- pr_debug("ERROR: unknown value type for '%s'\n", name);
- err = -BPF_LOADER_ERRNO__INTERNAL;
- }
- return err;
-}
-
-static int
-apply_obj_config_map(struct bpf_map *map)
-{
- return bpf_map_config_foreach_key(map,
- apply_obj_config_map_for_key,
- NULL);
-}
-
-static int
-apply_obj_config_object(struct bpf_object *obj)
-{
- struct bpf_map *map;
- int err;
-
- bpf_object__for_each_map(map, obj) {
- err = apply_obj_config_map(map);
- if (err)
- return err;
- }
- return 0;
-}
-
-int bpf__apply_obj_config(void)
-{
- struct bpf_perf_object *perf_obj, *tmp;
- int err;
-
- bpf_perf_object__for_each(perf_obj, tmp) {
- err = apply_obj_config_object(perf_obj->obj);
- if (err)
- return err;
- }
-
- return 0;
-}
-
-#define bpf__perf_for_each_map(map, pobj, tmp) \
- bpf_perf_object__for_each(pobj, tmp) \
- bpf_object__for_each_map(map, pobj->obj)
-
-#define bpf__perf_for_each_map_named(map, pobj, pobjtmp, name) \
- bpf__perf_for_each_map(map, pobj, pobjtmp) \
- if (bpf_map__name(map) && (strcmp(name, bpf_map__name(map)) == 0))
-
-struct evsel *bpf__setup_output_event(struct evlist *evlist, const char *name)
-{
- struct bpf_map_priv *tmpl_priv = NULL;
- struct bpf_perf_object *perf_obj, *tmp;
- struct evsel *evsel = NULL;
- struct bpf_map *map;
- int err;
- bool need_init = false;
-
- bpf__perf_for_each_map_named(map, perf_obj, tmp, name) {
- struct bpf_map_priv *priv = map_priv(map);
-
- if (IS_ERR(priv))
- return ERR_PTR(-BPF_LOADER_ERRNO__INTERNAL);
-
- /*
- * No need to check map type: type should have been
- * verified by kernel.
- */
- if (!need_init && !priv)
- need_init = !priv;
- if (!tmpl_priv && priv)
- tmpl_priv = priv;
- }
-
- if (!need_init)
- return NULL;
-
- if (!tmpl_priv) {
- char *event_definition = NULL;
-
- if (asprintf(&event_definition, "bpf-output/no-inherit=1,name=%s/", name) < 0)
- return ERR_PTR(-ENOMEM);
-
- err = parse_event(evlist, event_definition);
- free(event_definition);
-
- if (err) {
- pr_debug("ERROR: failed to create the \"%s\" bpf-output event\n", name);
- return ERR_PTR(-err);
- }
-
- evsel = evlist__last(evlist);
- }
-
- bpf__perf_for_each_map_named(map, perf_obj, tmp, name) {
- struct bpf_map_priv *priv = map_priv(map);
-
- if (IS_ERR(priv))
- return ERR_PTR(-BPF_LOADER_ERRNO__INTERNAL);
- if (priv)
- continue;
-
- if (tmpl_priv) {
- priv = bpf_map_priv__clone(tmpl_priv);
- if (!priv)
- return ERR_PTR(-ENOMEM);
-
- err = map_set_priv(map, priv);
- if (err) {
- bpf_map_priv__clear(map, priv);
- return ERR_PTR(err);
- }
- } else if (evsel) {
- struct bpf_map_op *op;
-
- op = bpf_map__add_newop(map, NULL);
- if (IS_ERR(op))
- return ERR_CAST(op);
- op->op_type = BPF_MAP_OP_SET_EVSEL;
- op->v.evsel = evsel;
- }
- }
-
- return evsel;
-}
-
-int bpf__setup_stdout(struct evlist *evlist)
-{
- struct evsel *evsel = bpf__setup_output_event(evlist, "__bpf_stdout__");
- return PTR_ERR_OR_ZERO(evsel);
-}
-
-#define ERRNO_OFFSET(e) ((e) - __BPF_LOADER_ERRNO__START)
-#define ERRCODE_OFFSET(c) ERRNO_OFFSET(BPF_LOADER_ERRNO__##c)
-#define NR_ERRNO (__BPF_LOADER_ERRNO__END - __BPF_LOADER_ERRNO__START)
-
-static const char *bpf_loader_strerror_table[NR_ERRNO] = {
- [ERRCODE_OFFSET(CONFIG)] = "Invalid config string",
- [ERRCODE_OFFSET(GROUP)] = "Invalid group name",
- [ERRCODE_OFFSET(EVENTNAME)] = "No event name found in config string",
- [ERRCODE_OFFSET(INTERNAL)] = "BPF loader internal error",
- [ERRCODE_OFFSET(COMPILE)] = "Error when compiling BPF scriptlet",
- [ERRCODE_OFFSET(PROGCONF_TERM)] = "Invalid program config term in config string",
- [ERRCODE_OFFSET(PROLOGUE)] = "Failed to generate prologue",
- [ERRCODE_OFFSET(PROLOGUE2BIG)] = "Prologue too big for program",
- [ERRCODE_OFFSET(PROLOGUEOOB)] = "Offset out of bound for prologue",
- [ERRCODE_OFFSET(OBJCONF_OPT)] = "Invalid object config option",
- [ERRCODE_OFFSET(OBJCONF_CONF)] = "Config value not set (missing '=')",
- [ERRCODE_OFFSET(OBJCONF_MAP_OPT)] = "Invalid object map config option",
- [ERRCODE_OFFSET(OBJCONF_MAP_NOTEXIST)] = "Target map doesn't exist",
- [ERRCODE_OFFSET(OBJCONF_MAP_VALUE)] = "Incorrect value type for map",
- [ERRCODE_OFFSET(OBJCONF_MAP_TYPE)] = "Incorrect map type",
- [ERRCODE_OFFSET(OBJCONF_MAP_KEYSIZE)] = "Incorrect map key size",
- [ERRCODE_OFFSET(OBJCONF_MAP_VALUESIZE)] = "Incorrect map value size",
- [ERRCODE_OFFSET(OBJCONF_MAP_NOEVT)] = "Event not found for map setting",
- [ERRCODE_OFFSET(OBJCONF_MAP_MAPSIZE)] = "Invalid map size for event setting",
- [ERRCODE_OFFSET(OBJCONF_MAP_EVTDIM)] = "Event dimension too large",
- [ERRCODE_OFFSET(OBJCONF_MAP_EVTINH)] = "Doesn't support inherit event",
- [ERRCODE_OFFSET(OBJCONF_MAP_EVTTYPE)] = "Wrong event type for map",
- [ERRCODE_OFFSET(OBJCONF_MAP_IDX2BIG)] = "Index too large",
-};
-
-static int
-bpf_loader_strerror(int err, char *buf, size_t size)
-{
- char sbuf[STRERR_BUFSIZE];
- const char *msg;
-
- if (!buf || !size)
- return -1;
-
- err = err > 0 ? err : -err;
-
- if (err >= __LIBBPF_ERRNO__START)
- return libbpf_strerror(err, buf, size);
-
- if (err >= __BPF_LOADER_ERRNO__START && err < __BPF_LOADER_ERRNO__END) {
- msg = bpf_loader_strerror_table[ERRNO_OFFSET(err)];
- snprintf(buf, size, "%s", msg);
- buf[size - 1] = '\0';
- return 0;
- }
-
- if (err >= __BPF_LOADER_ERRNO__END)
- snprintf(buf, size, "Unknown bpf loader error %d", err);
- else
- snprintf(buf, size, "%s",
- str_error_r(err, sbuf, sizeof(sbuf)));
-
- buf[size - 1] = '\0';
- return -1;
-}
-
-#define bpf__strerror_head(err, buf, size) \
- char sbuf[STRERR_BUFSIZE], *emsg;\
- if (!size)\
- return 0;\
- if (err < 0)\
- err = -err;\
- bpf_loader_strerror(err, sbuf, sizeof(sbuf));\
- emsg = sbuf;\
- switch (err) {\
- default:\
- scnprintf(buf, size, "%s", emsg);\
- break;
-
-#define bpf__strerror_entry(val, fmt...)\
- case val: {\
- scnprintf(buf, size, fmt);\
- break;\
- }
-
-#define bpf__strerror_end(buf, size)\
- }\
- buf[size - 1] = '\0';
-
-int bpf__strerror_prepare_load(const char *filename, bool source,
- int err, char *buf, size_t size)
-{
- size_t n;
- int ret;
-
- n = snprintf(buf, size, "Failed to load %s%s: ",
- filename, source ? " from source" : "");
- if (n >= size) {
- buf[size - 1] = '\0';
- return 0;
- }
- buf += n;
- size -= n;
-
- ret = bpf_loader_strerror(err, buf, size);
- buf[size - 1] = '\0';
- return ret;
-}
-
-int bpf__strerror_probe(struct bpf_object *obj __maybe_unused,
- int err, char *buf, size_t size)
-{
- bpf__strerror_head(err, buf, size);
- case BPF_LOADER_ERRNO__PROGCONF_TERM: {
- scnprintf(buf, size, "%s (add -v to see detail)", emsg);
- break;
- }
- bpf__strerror_entry(EEXIST, "Probe point exist. Try 'perf probe -d \"*\"' and set 'force=yes'");
- bpf__strerror_entry(EACCES, "You need to be root");
- bpf__strerror_entry(EPERM, "You need to be root, and /proc/sys/kernel/kptr_restrict should be 0");
- bpf__strerror_entry(ENOENT, "You need to check probing points in BPF file");
- bpf__strerror_end(buf, size);
- return 0;
-}
-
-int bpf__strerror_load(struct bpf_object *obj,
- int err, char *buf, size_t size)
-{
- bpf__strerror_head(err, buf, size);
- case LIBBPF_ERRNO__KVER: {
- unsigned int obj_kver = bpf_object__kversion(obj);
- unsigned int real_kver;
-
- if (fetch_kernel_version(&real_kver, NULL, 0)) {
- scnprintf(buf, size, "Unable to fetch kernel version");
- break;
- }
-
- if (obj_kver != real_kver) {
- scnprintf(buf, size,
- "'version' ("KVER_FMT") doesn't match running kernel ("KVER_FMT")",
- KVER_PARAM(obj_kver),
- KVER_PARAM(real_kver));
- break;
- }
-
- scnprintf(buf, size, "Failed to load program for unknown reason");
- break;
- }
- bpf__strerror_end(buf, size);
- return 0;
-}
-
-int bpf__strerror_config_obj(struct bpf_object *obj __maybe_unused,
- struct parse_events_term *term __maybe_unused,
- struct evlist *evlist __maybe_unused,
- int *error_pos __maybe_unused, int err,
- char *buf, size_t size)
-{
- bpf__strerror_head(err, buf, size);
- bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE,
- "Can't use this config term with this map type");
- bpf__strerror_end(buf, size);
- return 0;
-}
-
-int bpf__strerror_apply_obj_config(int err, char *buf, size_t size)
-{
- bpf__strerror_head(err, buf, size);
- bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_EVTDIM,
- "Cannot set event to BPF map in multi-thread tracing");
- bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_EVTINH,
- "%s (Hint: use -i to turn off inherit)", emsg);
- bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_EVTTYPE,
- "Can only put raw, hardware and BPF output event into a BPF map");
- bpf__strerror_end(buf, size);
- return 0;
-}
-
-int bpf__strerror_setup_output_event(struct evlist *evlist __maybe_unused,
- int err, char *buf, size_t size)
-{
- bpf__strerror_head(err, buf, size);
- bpf__strerror_end(buf, size);
- return 0;
-}
diff --git a/tools/perf/util/bpf-loader.h b/tools/perf/util/bpf-loader.h
deleted file mode 100644
index 5d1c725cea29..000000000000
--- a/tools/perf/util/bpf-loader.h
+++ /dev/null
@@ -1,216 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (C) 2015, Wang Nan <wangnan0@huawei.com>
- * Copyright (C) 2015, Huawei Inc.
- */
-#ifndef __BPF_LOADER_H
-#define __BPF_LOADER_H
-
-#include <linux/compiler.h>
-#include <linux/err.h>
-
-#ifdef HAVE_LIBBPF_SUPPORT
-#include <bpf/libbpf.h>
-
-enum bpf_loader_errno {
- __BPF_LOADER_ERRNO__START = __LIBBPF_ERRNO__START - 100,
- /* Invalid config string */
- BPF_LOADER_ERRNO__CONFIG = __BPF_LOADER_ERRNO__START,
- BPF_LOADER_ERRNO__GROUP, /* Invalid group name */
- BPF_LOADER_ERRNO__EVENTNAME, /* Event name is missing */
- BPF_LOADER_ERRNO__INTERNAL, /* BPF loader internal error */
- BPF_LOADER_ERRNO__COMPILE, /* Error when compiling BPF scriptlet */
- BPF_LOADER_ERRNO__PROGCONF_TERM,/* Invalid program config term in config string */
- BPF_LOADER_ERRNO__PROLOGUE, /* Failed to generate prologue */
- BPF_LOADER_ERRNO__PROLOGUE2BIG, /* Prologue too big for program */
- BPF_LOADER_ERRNO__PROLOGUEOOB, /* Offset out of bound for prologue */
- BPF_LOADER_ERRNO__OBJCONF_OPT, /* Invalid object config option */
- BPF_LOADER_ERRNO__OBJCONF_CONF, /* Config value not set (lost '=')) */
- BPF_LOADER_ERRNO__OBJCONF_MAP_OPT, /* Invalid object map config option */
- BPF_LOADER_ERRNO__OBJCONF_MAP_NOTEXIST, /* Target map not exist */
- BPF_LOADER_ERRNO__OBJCONF_MAP_VALUE, /* Incorrect value type for map */
- BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE, /* Incorrect map type */
- BPF_LOADER_ERRNO__OBJCONF_MAP_KEYSIZE, /* Incorrect map key size */
- BPF_LOADER_ERRNO__OBJCONF_MAP_VALUESIZE,/* Incorrect map value size */
- BPF_LOADER_ERRNO__OBJCONF_MAP_NOEVT, /* Event not found for map setting */
- BPF_LOADER_ERRNO__OBJCONF_MAP_MAPSIZE, /* Invalid map size for event setting */
- BPF_LOADER_ERRNO__OBJCONF_MAP_EVTDIM, /* Event dimension too large */
- BPF_LOADER_ERRNO__OBJCONF_MAP_EVTINH, /* Doesn't support inherit event */
- BPF_LOADER_ERRNO__OBJCONF_MAP_EVTTYPE, /* Wrong event type for map */
- BPF_LOADER_ERRNO__OBJCONF_MAP_IDX2BIG, /* Index too large */
- __BPF_LOADER_ERRNO__END,
-};
-#endif // HAVE_LIBBPF_SUPPORT
-
-struct evsel;
-struct evlist;
-struct bpf_object;
-struct parse_events_term;
-#define PERF_BPF_PROBE_GROUP "perf_bpf_probe"
-
-typedef int (*bpf_prog_iter_callback_t)(const char *group, const char *event,
- int fd, struct bpf_object *obj, void *arg);
-
-#ifdef HAVE_LIBBPF_SUPPORT
-struct bpf_object *bpf__prepare_load(const char *filename, bool source);
-int bpf__strerror_prepare_load(const char *filename, bool source,
- int err, char *buf, size_t size);
-
-struct bpf_object *bpf__prepare_load_buffer(void *obj_buf, size_t obj_buf_sz,
- const char *name);
-
-void bpf__clear(void);
-
-int bpf__probe(struct bpf_object *obj);
-int bpf__unprobe(struct bpf_object *obj);
-int bpf__strerror_probe(struct bpf_object *obj, int err,
- char *buf, size_t size);
-
-int bpf__load(struct bpf_object *obj);
-int bpf__strerror_load(struct bpf_object *obj, int err,
- char *buf, size_t size);
-int bpf__foreach_event(struct bpf_object *obj,
- bpf_prog_iter_callback_t func, void *arg);
-
-int bpf__config_obj(struct bpf_object *obj, struct parse_events_term *term,
- struct evlist *evlist, int *error_pos);
-int bpf__strerror_config_obj(struct bpf_object *obj,
- struct parse_events_term *term,
- struct evlist *evlist,
- int *error_pos, int err, char *buf,
- size_t size);
-int bpf__apply_obj_config(void);
-int bpf__strerror_apply_obj_config(int err, char *buf, size_t size);
-
-int bpf__setup_stdout(struct evlist *evlist);
-struct evsel *bpf__setup_output_event(struct evlist *evlist, const char *name);
-int bpf__strerror_setup_output_event(struct evlist *evlist, int err, char *buf, size_t size);
-#else
-#include <errno.h>
-#include <string.h>
-#include "debug.h"
-
-static inline struct bpf_object *
-bpf__prepare_load(const char *filename __maybe_unused,
- bool source __maybe_unused)
-{
- pr_debug("ERROR: eBPF object loading is disabled during compiling.\n");
- return ERR_PTR(-ENOTSUP);
-}
-
-static inline struct bpf_object *
-bpf__prepare_load_buffer(void *obj_buf __maybe_unused,
- size_t obj_buf_sz __maybe_unused)
-{
- return ERR_PTR(-ENOTSUP);
-}
-
-static inline void bpf__clear(void) { }
-
-static inline int bpf__probe(struct bpf_object *obj __maybe_unused) { return 0;}
-static inline int bpf__unprobe(struct bpf_object *obj __maybe_unused) { return 0;}
-static inline int bpf__load(struct bpf_object *obj __maybe_unused) { return 0; }
-
-static inline int
-bpf__foreach_event(struct bpf_object *obj __maybe_unused,
- bpf_prog_iter_callback_t func __maybe_unused,
- void *arg __maybe_unused)
-{
- return 0;
-}
-
-static inline int
-bpf__config_obj(struct bpf_object *obj __maybe_unused,
- struct parse_events_term *term __maybe_unused,
- struct evlist *evlist __maybe_unused,
- int *error_pos __maybe_unused)
-{
- return 0;
-}
-
-static inline int
-bpf__apply_obj_config(void)
-{
- return 0;
-}
-
-static inline int
-bpf__setup_stdout(struct evlist *evlist __maybe_unused)
-{
- return 0;
-}
-
-static inline struct evsel *
-bpf__setup_output_event(struct evlist *evlist __maybe_unused, const char *name __maybe_unused)
-{
- return NULL;
-}
-
-static inline int
-__bpf_strerror(char *buf, size_t size)
-{
- if (!size)
- return 0;
- strncpy(buf,
- "ERROR: eBPF object loading is disabled during compiling.\n",
- size);
- buf[size - 1] = '\0';
- return 0;
-}
-
-static inline
-int bpf__strerror_prepare_load(const char *filename __maybe_unused,
- bool source __maybe_unused,
- int err __maybe_unused,
- char *buf, size_t size)
-{
- return __bpf_strerror(buf, size);
-}
-
-static inline int
-bpf__strerror_probe(struct bpf_object *obj __maybe_unused,
- int err __maybe_unused,
- char *buf, size_t size)
-{
- return __bpf_strerror(buf, size);
-}
-
-static inline int bpf__strerror_load(struct bpf_object *obj __maybe_unused,
- int err __maybe_unused,
- char *buf, size_t size)
-{
- return __bpf_strerror(buf, size);
-}
-
-static inline int
-bpf__strerror_config_obj(struct bpf_object *obj __maybe_unused,
- struct parse_events_term *term __maybe_unused,
- struct evlist *evlist __maybe_unused,
- int *error_pos __maybe_unused,
- int err __maybe_unused,
- char *buf, size_t size)
-{
- return __bpf_strerror(buf, size);
-}
-
-static inline int
-bpf__strerror_apply_obj_config(int err __maybe_unused,
- char *buf, size_t size)
-{
- return __bpf_strerror(buf, size);
-}
-
-static inline int
-bpf__strerror_setup_output_event(struct evlist *evlist __maybe_unused,
- int err __maybe_unused, char *buf, size_t size)
-{
- return __bpf_strerror(buf, size);
-}
-
-#endif
-
-static inline int bpf__strerror_setup_stdout(struct evlist *evlist, int err, char *buf, size_t size)
-{
- return bpf__strerror_setup_output_event(evlist, err, buf, size);
-}
-#endif
diff --git a/tools/perf/examples/bpf/augmented_raw_syscalls.c b/tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c
index 9a03189d33d3..90ce22f9c1a9 100644
--- a/tools/perf/examples/bpf/augmented_raw_syscalls.c
+++ b/tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c
@@ -2,22 +2,26 @@
/*
* Augment the raw_syscalls tracepoints with the contents of the pointer arguments.
*
- * Test it with:
- *
- * perf trace -e tools/perf/examples/bpf/augmented_raw_syscalls.c cat /etc/passwd > /dev/null
- *
* This exactly matches what is marshalled into the raw_syscall:sys_enter
* payload expected by the 'perf trace' beautifiers.
- *
- * For now it just uses the existing tracepoint augmentation code in 'perf
- * trace', in the next csets we'll hook up these with the sys_enter/sys_exit
- * code that will combine entry/exit in a strace like way.
*/
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include <linux/limits.h>
+/**
+ * is_power_of_2() - check if a value is a power of two
+ * @n: the value to check
+ *
+ * Determine whether some value is a power of two, where zero is *not*
+ * considered a power of two. Return: true if @n is a power of 2, otherwise
+ * false.
+ */
+#define is_power_of_2(n) (n != 0 && ((n & (n - 1)) == 0))
+
+#define MAX_CPUS 4096
+
// FIXME: These should come from system headers
typedef char bool;
typedef int pid_t;
@@ -34,7 +38,7 @@ struct __augmented_syscalls__ {
__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
__type(key, int);
__type(value, __u32);
- __uint(max_entries, __NR_CPUS__);
+ __uint(max_entries, MAX_CPUS);
} __augmented_syscalls__ SEC(".maps");
/*
@@ -156,6 +160,7 @@ unsigned int augmented_arg__read_str(struct augmented_arg *augmented_arg, const
*/
if (string_len > 0) {
augmented_len -= sizeof(augmented_arg->value) - string_len;
+ _Static_assert(is_power_of_2(sizeof(augmented_arg->value)), "sizeof(augmented_arg->value) needs to be a power of two");
augmented_len &= sizeof(augmented_arg->value) - 1;
augmented_arg->size = string_len;
} else {
@@ -170,7 +175,7 @@ unsigned int augmented_arg__read_str(struct augmented_arg *augmented_arg, const
return augmented_len;
}
-SEC("!raw_syscalls:unaugmented")
+SEC("tp/raw_syscalls/sys_enter")
int syscall_unaugmented(struct syscall_enter_args *args)
{
return 1;
@@ -182,7 +187,7 @@ int syscall_unaugmented(struct syscall_enter_args *args)
* on from there, reading the first syscall arg as a string, i.e. open's
* filename.
*/
-SEC("!syscalls:sys_enter_connect")
+SEC("tp/syscalls/sys_enter_connect")
int sys_enter_connect(struct syscall_enter_args *args)
{
struct augmented_args_payload *augmented_args = augmented_args_payload();
@@ -193,15 +198,15 @@ int sys_enter_connect(struct syscall_enter_args *args)
if (augmented_args == NULL)
return 1; /* Failure: don't filter */
- if (socklen > sizeof(augmented_args->saddr))
- socklen = sizeof(augmented_args->saddr);
+ _Static_assert(is_power_of_2(sizeof(augmented_args->saddr)), "sizeof(augmented_args->saddr) needs to be a power of two");
+ socklen &= sizeof(augmented_args->saddr) - 1;
bpf_probe_read(&augmented_args->saddr, socklen, sockaddr_arg);
return augmented__output(args, augmented_args, len + socklen);
}
-SEC("!syscalls:sys_enter_sendto")
+SEC("tp/syscalls/sys_enter_sendto")
int sys_enter_sendto(struct syscall_enter_args *args)
{
struct augmented_args_payload *augmented_args = augmented_args_payload();
@@ -212,15 +217,14 @@ int sys_enter_sendto(struct syscall_enter_args *args)
if (augmented_args == NULL)
return 1; /* Failure: don't filter */
- if (socklen > sizeof(augmented_args->saddr))
- socklen = sizeof(augmented_args->saddr);
+ socklen &= sizeof(augmented_args->saddr) - 1;
bpf_probe_read(&augmented_args->saddr, socklen, sockaddr_arg);
return augmented__output(args, augmented_args, len + socklen);
}
-SEC("!syscalls:sys_enter_open")
+SEC("tp/syscalls/sys_enter_open")
int sys_enter_open(struct syscall_enter_args *args)
{
struct augmented_args_payload *augmented_args = augmented_args_payload();
@@ -235,7 +239,7 @@ int sys_enter_open(struct syscall_enter_args *args)
return augmented__output(args, augmented_args, len);
}
-SEC("!syscalls:sys_enter_openat")
+SEC("tp/syscalls/sys_enter_openat")
int sys_enter_openat(struct syscall_enter_args *args)
{
struct augmented_args_payload *augmented_args = augmented_args_payload();
@@ -250,7 +254,7 @@ int sys_enter_openat(struct syscall_enter_args *args)
return augmented__output(args, augmented_args, len);
}
-SEC("!syscalls:sys_enter_rename")
+SEC("tp/syscalls/sys_enter_rename")
int sys_enter_rename(struct syscall_enter_args *args)
{
struct augmented_args_payload *augmented_args = augmented_args_payload();
@@ -267,7 +271,7 @@ int sys_enter_rename(struct syscall_enter_args *args)
return augmented__output(args, augmented_args, len);
}
-SEC("!syscalls:sys_enter_renameat")
+SEC("tp/syscalls/sys_enter_renameat")
int sys_enter_renameat(struct syscall_enter_args *args)
{
struct augmented_args_payload *augmented_args = augmented_args_payload();
@@ -295,7 +299,7 @@ struct perf_event_attr_size {
__u32 size;
};
-SEC("!syscalls:sys_enter_perf_event_open")
+SEC("tp/syscalls/sys_enter_perf_event_open")
int sys_enter_perf_event_open(struct syscall_enter_args *args)
{
struct augmented_args_payload *augmented_args = augmented_args_payload();
@@ -327,7 +331,7 @@ failure:
return 1; /* Failure: don't filter */
}
-SEC("!syscalls:sys_enter_clock_nanosleep")
+SEC("tp/syscalls/sys_enter_clock_nanosleep")
int sys_enter_clock_nanosleep(struct syscall_enter_args *args)
{
struct augmented_args_payload *augmented_args = augmented_args_payload();
@@ -358,7 +362,7 @@ static bool pid_filter__has(struct pids_filtered *pids, pid_t pid)
return bpf_map_lookup_elem(pids, &pid) != NULL;
}
-SEC("raw_syscalls:sys_enter")
+SEC("tp/raw_syscalls/sys_enter")
int sys_enter(struct syscall_enter_args *args)
{
struct augmented_args_payload *augmented_args;
@@ -371,7 +375,6 @@ int sys_enter(struct syscall_enter_args *args)
* We'll add to this as we add augmented syscalls right after that
* initial, non-augmented raw_syscalls:sys_enter payload.
*/
- unsigned int len = sizeof(augmented_args->args);
if (pid_filter__has(&pids_filtered, getpid()))
return 0;
@@ -393,7 +396,7 @@ int sys_enter(struct syscall_enter_args *args)
return 0;
}
-SEC("raw_syscalls:sys_exit")
+SEC("tp/raw_syscalls/sys_exit")
int sys_exit(struct syscall_exit_args *args)
{
struct syscall_exit_args exit_args;
diff --git a/tools/perf/util/bpf_skel/bench_uprobe.bpf.c b/tools/perf/util/bpf_skel/bench_uprobe.bpf.c
new file mode 100644
index 000000000000..2c55896bb33c
--- /dev/null
+++ b/tools/perf/util/bpf_skel/bench_uprobe.bpf.c
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+// Copyright (c) 2023 Red Hat
+#include "vmlinux.h"
+#include <bpf/bpf_tracing.h>
+
+unsigned int nr_uprobes;
+
+SEC("uprobe")
+int BPF_UPROBE(empty)
+{
+ return 0;
+}
+
+SEC("uprobe")
+int BPF_UPROBE(trace_printk)
+{
+ char fmt[] = "perf bench uprobe %u";
+
+ bpf_trace_printk(fmt, sizeof(fmt), ++nr_uprobes);
+ return 0;
+}
+
+char LICENSE[] SEC("license") = "Dual BSD/GPL";
diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c
index 36728222a5b4..03c64b85383b 100644
--- a/tools/perf/util/build-id.c
+++ b/tools/perf/util/build-id.c
@@ -560,7 +560,7 @@ char *build_id_cache__cachedir(const char *sbuild_id, const char *name,
struct nsinfo *nsi, bool is_kallsyms,
bool is_vdso)
{
- char *realname = (char *)name, *filename;
+ char *realname = NULL, *filename;
bool slash = is_kallsyms || is_vdso;
if (!slash)
@@ -571,9 +571,7 @@ char *build_id_cache__cachedir(const char *sbuild_id, const char *name,
sbuild_id ? "/" : "", sbuild_id ?: "") < 0)
filename = NULL;
- if (!slash)
- free(realname);
-
+ free(realname);
return filename;
}
diff --git a/tools/perf/util/c++/Build b/tools/perf/util/c++/Build
deleted file mode 100644
index 613ecfd76527..000000000000
--- a/tools/perf/util/c++/Build
+++ /dev/null
@@ -1,2 +0,0 @@
-perf-$(CONFIG_CLANGLLVM) += clang.o
-perf-$(CONFIG_CLANGLLVM) += clang-test.o
diff --git a/tools/perf/util/c++/clang-c.h b/tools/perf/util/c++/clang-c.h
deleted file mode 100644
index d3731a876b6c..000000000000
--- a/tools/perf/util/c++/clang-c.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef PERF_UTIL_CLANG_C_H
-#define PERF_UTIL_CLANG_C_H
-
-#include <stddef.h> /* for size_t */
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#ifdef HAVE_LIBCLANGLLVM_SUPPORT
-extern void perf_clang__init(void);
-extern void perf_clang__cleanup(void);
-
-struct test_suite;
-extern int test__clang_to_IR(struct test_suite *test, int subtest);
-extern int test__clang_to_obj(struct test_suite *test, int subtest);
-
-extern int perf_clang__compile_bpf(const char *filename,
- void **p_obj_buf,
- size_t *p_obj_buf_sz);
-#else
-
-#include <errno.h>
-#include <linux/compiler.h> /* for __maybe_unused */
-
-static inline void perf_clang__init(void) { }
-static inline void perf_clang__cleanup(void) { }
-
-static inline int
-perf_clang__compile_bpf(const char *filename __maybe_unused,
- void **p_obj_buf __maybe_unused,
- size_t *p_obj_buf_sz __maybe_unused)
-{
- return -ENOTSUP;
-}
-
-#endif
-
-#ifdef __cplusplus
-}
-#endif
-#endif
diff --git a/tools/perf/util/c++/clang-test.cpp b/tools/perf/util/c++/clang-test.cpp
deleted file mode 100644
index a4683ca53697..000000000000
--- a/tools/perf/util/c++/clang-test.cpp
+++ /dev/null
@@ -1,67 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include "clang.h"
-#include "clang-c.h"
-extern "C" {
-#include "../util.h"
-}
-#include "llvm/IR/Function.h"
-#include "llvm/IR/LLVMContext.h"
-
-#include <tests/llvm.h>
-#include <string>
-
-class perf_clang_scope {
-public:
- explicit perf_clang_scope() {perf_clang__init();}
- ~perf_clang_scope() {perf_clang__cleanup();}
-};
-
-static std::unique_ptr<llvm::Module>
-__test__clang_to_IR(void)
-{
- unsigned int kernel_version;
-
- if (fetch_kernel_version(&kernel_version, NULL, 0))
- return std::unique_ptr<llvm::Module>(nullptr);
-
- std::string cflag_kver("-DLINUX_VERSION_CODE=" +
- std::to_string(kernel_version));
-
- std::unique_ptr<llvm::Module> M =
- perf::getModuleFromSource({cflag_kver.c_str()},
- "perf-test.c",
- test_llvm__bpf_base_prog);
- return M;
-}
-
-extern "C" {
-int test__clang_to_IR(struct test_suite *test __maybe_unused,
- int subtest __maybe_unused)
-{
- perf_clang_scope _scope;
-
- auto M = __test__clang_to_IR();
- if (!M)
- return -1;
- for (llvm::Function& F : *M)
- if (F.getName() == "bpf_func__SyS_epoll_pwait")
- return 0;
- return -1;
-}
-
-int test__clang_to_obj(struct test_suite *test __maybe_unused,
- int subtest __maybe_unused)
-{
- perf_clang_scope _scope;
-
- auto M = __test__clang_to_IR();
- if (!M)
- return -1;
-
- auto Buffer = perf::getBPFObjectFromModule(&*M);
- if (!Buffer)
- return -1;
- return 0;
-}
-
-}
diff --git a/tools/perf/util/c++/clang.cpp b/tools/perf/util/c++/clang.cpp
deleted file mode 100644
index 1aad7d6d34aa..000000000000
--- a/tools/perf/util/c++/clang.cpp
+++ /dev/null
@@ -1,225 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * llvm C frontend for perf. Support dynamically compile C file
- *
- * Inspired by clang example code:
- * http://llvm.org/svn/llvm-project/cfe/trunk/examples/clang-interpreter/main.cpp
- *
- * Copyright (C) 2016 Wang Nan <wangnan0@huawei.com>
- * Copyright (C) 2016 Huawei Inc.
- */
-
-#include "clang/Basic/Version.h"
-#include "clang/CodeGen/CodeGenAction.h"
-#include "clang/Frontend/CompilerInvocation.h"
-#include "clang/Frontend/CompilerInstance.h"
-#include "clang/Frontend/TextDiagnosticPrinter.h"
-#include "clang/Tooling/Tooling.h"
-#include "llvm/IR/LegacyPassManager.h"
-#include "llvm/IR/Module.h"
-#include "llvm/Option/Option.h"
-#include "llvm/Support/FileSystem.h"
-#include "llvm/Support/ManagedStatic.h"
-#if CLANG_VERSION_MAJOR >= 14
-#include "llvm/MC/TargetRegistry.h"
-#else
-#include "llvm/Support/TargetRegistry.h"
-#endif
-#include "llvm/Support/TargetSelect.h"
-#include "llvm/Target/TargetMachine.h"
-#include "llvm/Target/TargetOptions.h"
-#include <memory>
-
-#include "clang.h"
-#include "clang-c.h"
-
-namespace perf {
-
-static std::unique_ptr<llvm::LLVMContext> LLVMCtx;
-
-using namespace clang;
-
-static CompilerInvocation *
-createCompilerInvocation(llvm::opt::ArgStringList CFlags, StringRef& Path,
- DiagnosticsEngine& Diags)
-{
- llvm::opt::ArgStringList CCArgs {
- "-cc1",
- "-triple", "bpf-pc-linux",
- "-fsyntax-only",
- "-O2",
- "-nostdsysteminc",
- "-nobuiltininc",
- "-vectorize-loops",
- "-vectorize-slp",
- "-Wno-unused-value",
- "-Wno-pointer-sign",
- "-x", "c"};
-
- CCArgs.append(CFlags.begin(), CFlags.end());
- CompilerInvocation *CI = tooling::newInvocation(&Diags, CCArgs
-#if CLANG_VERSION_MAJOR >= 11
- ,/*BinaryName=*/nullptr
-#endif
- );
-
- FrontendOptions& Opts = CI->getFrontendOpts();
- Opts.Inputs.clear();
- Opts.Inputs.emplace_back(Path,
- FrontendOptions::getInputKindForExtension("c"));
- return CI;
-}
-
-static std::unique_ptr<llvm::Module>
-getModuleFromSource(llvm::opt::ArgStringList CFlags,
- StringRef Path, IntrusiveRefCntPtr<vfs::FileSystem> VFS)
-{
- CompilerInstance Clang;
- Clang.createDiagnostics();
-
-#if CLANG_VERSION_MAJOR < 9
- Clang.setVirtualFileSystem(&*VFS);
-#else
- Clang.createFileManager(&*VFS);
-#endif
-
-#if CLANG_VERSION_MAJOR < 4
- IntrusiveRefCntPtr<CompilerInvocation> CI =
- createCompilerInvocation(std::move(CFlags), Path,
- Clang.getDiagnostics());
- Clang.setInvocation(&*CI);
-#else
- std::shared_ptr<CompilerInvocation> CI(
- createCompilerInvocation(std::move(CFlags), Path,
- Clang.getDiagnostics()));
- Clang.setInvocation(CI);
-#endif
-
- std::unique_ptr<CodeGenAction> Act(new EmitLLVMOnlyAction(&*LLVMCtx));
- if (!Clang.ExecuteAction(*Act))
- return std::unique_ptr<llvm::Module>(nullptr);
-
- return Act->takeModule();
-}
-
-std::unique_ptr<llvm::Module>
-getModuleFromSource(llvm::opt::ArgStringList CFlags,
- StringRef Name, StringRef Content)
-{
- using namespace vfs;
-
- llvm::IntrusiveRefCntPtr<OverlayFileSystem> OverlayFS(
- new OverlayFileSystem(getRealFileSystem()));
- llvm::IntrusiveRefCntPtr<InMemoryFileSystem> MemFS(
- new InMemoryFileSystem(true));
-
- /*
- * pushOverlay helps setting working dir for MemFS. Must call
- * before addFile.
- */
- OverlayFS->pushOverlay(MemFS);
- MemFS->addFile(Twine(Name), 0, llvm::MemoryBuffer::getMemBuffer(Content));
-
- return getModuleFromSource(std::move(CFlags), Name, OverlayFS);
-}
-
-std::unique_ptr<llvm::Module>
-getModuleFromSource(llvm::opt::ArgStringList CFlags, StringRef Path)
-{
- IntrusiveRefCntPtr<vfs::FileSystem> VFS(vfs::getRealFileSystem());
- return getModuleFromSource(std::move(CFlags), Path, VFS);
-}
-
-std::unique_ptr<llvm::SmallVectorImpl<char>>
-getBPFObjectFromModule(llvm::Module *Module)
-{
- using namespace llvm;
-
- std::string TargetTriple("bpf-pc-linux");
- std::string Error;
- const Target* Target = TargetRegistry::lookupTarget(TargetTriple, Error);
- if (!Target) {
- llvm::errs() << Error;
- return std::unique_ptr<llvm::SmallVectorImpl<char>>(nullptr);
- }
-
- llvm::TargetOptions Opt;
- TargetMachine *TargetMachine =
- Target->createTargetMachine(TargetTriple,
- "generic", "",
- Opt, Reloc::Static);
-
- Module->setDataLayout(TargetMachine->createDataLayout());
- Module->setTargetTriple(TargetTriple);
-
- std::unique_ptr<SmallVectorImpl<char>> Buffer(new SmallVector<char, 0>());
- raw_svector_ostream ostream(*Buffer);
-
- legacy::PassManager PM;
- bool NotAdded;
- NotAdded = TargetMachine->addPassesToEmitFile(PM, ostream
-#if CLANG_VERSION_MAJOR >= 7
- , /*DwoOut=*/nullptr
-#endif
-#if CLANG_VERSION_MAJOR < 10
- , TargetMachine::CGFT_ObjectFile
-#else
- , llvm::CGFT_ObjectFile
-#endif
- );
- if (NotAdded) {
- llvm::errs() << "TargetMachine can't emit a file of this type\n";
- return std::unique_ptr<llvm::SmallVectorImpl<char>>(nullptr);
- }
- PM.run(*Module);
-
- return Buffer;
-}
-
-}
-
-extern "C" {
-void perf_clang__init(void)
-{
- perf::LLVMCtx.reset(new llvm::LLVMContext());
- LLVMInitializeBPFTargetInfo();
- LLVMInitializeBPFTarget();
- LLVMInitializeBPFTargetMC();
- LLVMInitializeBPFAsmPrinter();
-}
-
-void perf_clang__cleanup(void)
-{
- perf::LLVMCtx.reset(nullptr);
- llvm::llvm_shutdown();
-}
-
-int perf_clang__compile_bpf(const char *filename,
- void **p_obj_buf,
- size_t *p_obj_buf_sz)
-{
- using namespace perf;
-
- if (!p_obj_buf || !p_obj_buf_sz)
- return -EINVAL;
-
- llvm::opt::ArgStringList CFlags;
- auto M = getModuleFromSource(std::move(CFlags), filename);
- if (!M)
- return -EINVAL;
- auto O = getBPFObjectFromModule(&*M);
- if (!O)
- return -EINVAL;
-
- size_t size = O->size_in_bytes();
- void *buffer;
-
- buffer = malloc(size);
- if (!buffer)
- return -ENOMEM;
- memcpy(buffer, O->data(), size);
- *p_obj_buf = buffer;
- *p_obj_buf_sz = size;
- return 0;
-}
-}
diff --git a/tools/perf/util/c++/clang.h b/tools/perf/util/c++/clang.h
deleted file mode 100644
index 6ce33e22f23c..000000000000
--- a/tools/perf/util/c++/clang.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef PERF_UTIL_CLANG_H
-#define PERF_UTIL_CLANG_H
-
-#include "llvm/ADT/StringRef.h"
-#include "llvm/IR/LLVMContext.h"
-#include "llvm/IR/Module.h"
-#include "llvm/Option/Option.h"
-#include <memory>
-
-namespace perf {
-
-using namespace llvm;
-
-std::unique_ptr<Module>
-getModuleFromSource(opt::ArgStringList CFlags,
- StringRef Name, StringRef Content);
-
-std::unique_ptr<Module>
-getModuleFromSource(opt::ArgStringList CFlags,
- StringRef Path);
-
-std::unique_ptr<llvm::SmallVectorImpl<char>>
-getBPFObjectFromModule(llvm::Module *Module);
-
-}
-#endif
diff --git a/tools/perf/util/config.c b/tools/perf/util/config.c
index 46f144c46827..7a650de0db83 100644
--- a/tools/perf/util/config.c
+++ b/tools/perf/util/config.c
@@ -16,7 +16,6 @@
#include <subcmd/exec-cmd.h>
#include "util/event.h" /* proc_map_timeout */
#include "util/hist.h" /* perf_hist_config */
-#include "util/llvm-utils.h" /* perf_llvm_config */
#include "util/stat.h" /* perf_stat__set_big_num */
#include "util/evsel.h" /* evsel__hw_names, evsel__use_bpf_counters */
#include "util/srcline.h" /* addr2line_timeout_ms */
@@ -486,9 +485,6 @@ int perf_default_config(const char *var, const char *value,
if (strstarts(var, "call-graph."))
return perf_callchain_config(var, value);
- if (strstarts(var, "llvm."))
- return perf_llvm_config(var, value);
-
if (strstarts(var, "buildid."))
return perf_buildid_config(var, value);
diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c
index 1419b40dfbe8..9729d006550d 100644
--- a/tools/perf/util/cs-etm.c
+++ b/tools/perf/util/cs-etm.c
@@ -6,10 +6,11 @@
* Author: Mathieu Poirier <mathieu.poirier@linaro.org>
*/
+#include <linux/kernel.h>
+#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/coresight-pmu.h>
#include <linux/err.h>
-#include <linux/kernel.h>
#include <linux/log2.h>
#include <linux/types.h>
#include <linux/zalloc.h>
@@ -282,17 +283,6 @@ static int cs_etm__metadata_set_trace_id(u8 trace_chan_id, u64 *cpu_metadata)
}
/*
- * FIELD_GET (linux/bitfield.h) not available outside kernel code,
- * and the header contains too many dependencies to just copy over,
- * so roll our own based on the original
- */
-#define __bf_shf(x) (__builtin_ffsll(x) - 1)
-#define FIELD_GET(_mask, _reg) \
- ({ \
- (typeof(_mask))(((_reg) & (_mask)) >> __bf_shf(_mask)); \
- })
-
-/*
* Get a metadata for a specific cpu from an array.
*
*/
diff --git a/tools/perf/util/dlfilter.c b/tools/perf/util/dlfilter.c
index 46f74b2344db..1dbf27822ee2 100644
--- a/tools/perf/util/dlfilter.c
+++ b/tools/perf/util/dlfilter.c
@@ -10,6 +10,8 @@
#include <subcmd/exec-cmd.h>
#include <linux/zalloc.h>
#include <linux/build_bug.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
#include "debug.h"
#include "event.h"
@@ -63,6 +65,7 @@ static void al_to_d_al(struct addr_location *al, struct perf_dlfilter_al *d_al)
d_al->addr = al->addr;
d_al->comm = NULL;
d_al->filtered = 0;
+ d_al->priv = NULL;
}
static struct addr_location *get_al(struct dlfilter *d)
@@ -151,6 +154,11 @@ static char **dlfilter__args(void *ctx, int *dlargc)
return d->dlargv;
}
+static bool has_priv(struct perf_dlfilter_al *d_al_p)
+{
+ return d_al_p->size >= offsetof(struct perf_dlfilter_al, priv) + sizeof(d_al_p->priv);
+}
+
static __s32 dlfilter__resolve_address(void *ctx, __u64 address, struct perf_dlfilter_al *d_al_p)
{
struct dlfilter *d = (struct dlfilter *)ctx;
@@ -166,6 +174,7 @@ static __s32 dlfilter__resolve_address(void *ctx, __u64 address, struct perf_dlf
if (!thread)
return -1;
+ addr_location__init(&al);
thread__find_symbol_fb(thread, d->sample->cpumode, address, &al);
al_to_d_al(&al, &d_al);
@@ -176,9 +185,31 @@ static __s32 dlfilter__resolve_address(void *ctx, __u64 address, struct perf_dlf
memcpy(d_al_p, &d_al, min((size_t)sz, sizeof(d_al)));
d_al_p->size = sz;
+ if (has_priv(d_al_p))
+ d_al_p->priv = memdup(&al, sizeof(al));
+ else /* Avoid leak for v0 API */
+ addr_location__exit(&al);
+
return 0;
}
+static void dlfilter__al_cleanup(void *ctx __maybe_unused, struct perf_dlfilter_al *d_al_p)
+{
+ struct addr_location *al;
+
+ /* Ensure backward compatibility */
+ if (!has_priv(d_al_p) || !d_al_p->priv)
+ return;
+
+ al = d_al_p->priv;
+
+ d_al_p->priv = NULL;
+
+ addr_location__exit(al);
+
+ free(al);
+}
+
static const __u8 *dlfilter__insn(void *ctx, __u32 *len)
{
struct dlfilter *d = (struct dlfilter *)ctx;
@@ -296,6 +327,7 @@ static const struct perf_dlfilter_fns perf_dlfilter_fns = {
.resolve_addr = dlfilter__resolve_addr,
.args = dlfilter__args,
.resolve_address = dlfilter__resolve_address,
+ .al_cleanup = dlfilter__al_cleanup,
.insn = dlfilter__insn,
.srcline = dlfilter__srcline,
.attr = dlfilter__attr,
diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c
index 9eabf3ec56e9..a164164001fb 100644
--- a/tools/perf/util/env.c
+++ b/tools/perf/util/env.c
@@ -324,11 +324,9 @@ int perf_env__read_pmu_mappings(struct perf_env *env)
u32 pmu_num = 0;
struct strbuf sb;
- while ((pmu = perf_pmus__scan(pmu))) {
- if (!pmu->name)
- continue;
+ while ((pmu = perf_pmus__scan(pmu)))
pmu_num++;
- }
+
if (!pmu_num) {
pr_debug("pmu mappings not available\n");
return -ENOENT;
@@ -339,8 +337,6 @@ int perf_env__read_pmu_mappings(struct perf_env *env)
return -ENOMEM;
while ((pmu = perf_pmus__scan(pmu))) {
- if (!pmu->name)
- continue;
if (strbuf_addf(&sb, "%u:%s", pmu->type, pmu->name) < 0)
goto error;
/* include a NULL character at the end */
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index 4cbb092e0684..923c0fb15122 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -93,8 +93,8 @@ struct process_symbol_args {
u64 start;
};
-static int find_symbol_cb(void *arg, const char *name, char type,
- u64 start)
+static int find_func_symbol_cb(void *arg, const char *name, char type,
+ u64 start)
{
struct process_symbol_args *args = arg;
@@ -110,12 +110,36 @@ static int find_symbol_cb(void *arg, const char *name, char type,
return 1;
}
+static int find_any_symbol_cb(void *arg, const char *name,
+ char type __maybe_unused, u64 start)
+{
+ struct process_symbol_args *args = arg;
+
+ if (strcmp(name, args->name))
+ return 0;
+
+ args->start = start;
+ return 1;
+}
+
int kallsyms__get_function_start(const char *kallsyms_filename,
const char *symbol_name, u64 *addr)
{
struct process_symbol_args args = { .name = symbol_name, };
- if (kallsyms__parse(kallsyms_filename, &args, find_symbol_cb) <= 0)
+ if (kallsyms__parse(kallsyms_filename, &args, find_func_symbol_cb) <= 0)
+ return -1;
+
+ *addr = args.start;
+ return 0;
+}
+
+int kallsyms__get_symbol_start(const char *kallsyms_filename,
+ const char *symbol_name, u64 *addr)
+{
+ struct process_symbol_args args = { .name = symbol_name, };
+
+ if (kallsyms__parse(kallsyms_filename, &args, find_any_symbol_cb) <= 0)
return -1;
*addr = args.start;
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index de20e01c9d72..d8bcee2e9b93 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -360,6 +360,8 @@ size_t perf_event__fprintf(union perf_event *event, struct machine *machine, FIL
int kallsyms__get_function_start(const char *kallsyms_filename,
const char *symbol_name, u64 *addr);
+int kallsyms__get_symbol_start(const char *kallsyms_filename,
+ const char *symbol_name, u64 *addr);
void event_attr_init(struct perf_event_attr *attr);
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 762e2b2634a5..a8a5ff87cc1f 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -845,6 +845,7 @@ static void __evsel__config_callchain(struct evsel *evsel, struct record_opts *o
{
bool function = evsel__is_function_event(evsel);
struct perf_event_attr *attr = &evsel->core.attr;
+ const char *arch = perf_env__arch(evsel__env(evsel));
evsel__set_sample_bit(evsel, CALLCHAIN);
@@ -877,8 +878,9 @@ static void __evsel__config_callchain(struct evsel *evsel, struct record_opts *o
if (!function) {
evsel__set_sample_bit(evsel, REGS_USER);
evsel__set_sample_bit(evsel, STACK_USER);
- if (opts->sample_user_regs && DWARF_MINIMAL_REGS != PERF_REGS_MASK) {
- attr->sample_regs_user |= DWARF_MINIMAL_REGS;
+ if (opts->sample_user_regs &&
+ DWARF_MINIMAL_REGS(arch) != arch__user_reg_mask()) {
+ attr->sample_regs_user |= DWARF_MINIMAL_REGS(arch);
pr_warning("WARNING: The use of --call-graph=dwarf may require all the user registers, "
"specifying a subset with --user-regs may render DWARF unwinding unreliable, "
"so the minimal registers set (IP, SP) is explicitly forced.\n");
@@ -1474,6 +1476,7 @@ void evsel__exit(struct evsel *evsel)
perf_thread_map__put(evsel->core.threads);
zfree(&evsel->group_name);
zfree(&evsel->name);
+ zfree(&evsel->filter);
zfree(&evsel->pmu_name);
zfree(&evsel->group_pmu_name);
zfree(&evsel->unit);
@@ -2826,9 +2829,6 @@ u64 evsel__intval(struct evsel *evsel, struct perf_sample *sample, const char *n
{
struct tep_format_field *field = evsel__field(evsel, name);
- if (!field)
- return 0;
-
return field ? format_field__intval(field, sample, evsel->needs_swap) : 0;
}
#endif
diff --git a/tools/perf/util/expr.c b/tools/perf/util/expr.c
index 4814262e3805..4488f306de78 100644
--- a/tools/perf/util/expr.c
+++ b/tools/perf/util/expr.c
@@ -10,9 +10,11 @@
#include "debug.h"
#include "evlist.h"
#include "expr.h"
-#include "expr-bison.h"
-#include "expr-flex.h"
+#include <util/expr-bison.h>
+#include <util/expr-flex.h>
#include "util/hashmap.h"
+#include "util/header.h"
+#include "util/pmu.h"
#include "smt.h"
#include "tsc.h"
#include <api/fs/fs.h>
@@ -425,6 +427,13 @@ double expr__get_literal(const char *literal, const struct expr_scanner_ctx *ctx
result = cpu__max_present_cpu().cpu;
goto out;
}
+ if (!strcmp("#num_cpus_online", literal)) {
+ struct perf_cpu_map *online = cpu_map__online();
+
+ if (online)
+ result = perf_cpu_map__nr(online);
+ goto out;
+ }
if (!strcasecmp("#system_tsc_freq", literal)) {
result = arch_get_tsc_freq();
@@ -495,3 +504,19 @@ double expr__has_event(const struct expr_parse_ctx *ctx, bool compute_ids, const
evlist__delete(tmp);
return ret;
}
+
+double expr__strcmp_cpuid_str(const struct expr_parse_ctx *ctx __maybe_unused,
+ bool compute_ids __maybe_unused, const char *test_id)
+{
+ double ret;
+ struct perf_pmu *pmu = pmu__find_core_pmu();
+ char *cpuid = perf_pmu__getcpuid(pmu);
+
+ if (!cpuid)
+ return NAN;
+
+ ret = !strcmp_cpuid_str(test_id, cpuid);
+
+ free(cpuid);
+ return ret;
+}
diff --git a/tools/perf/util/expr.h b/tools/perf/util/expr.h
index 3c1e49b3e35d..c0cec29ddc29 100644
--- a/tools/perf/util/expr.h
+++ b/tools/perf/util/expr.h
@@ -55,5 +55,6 @@ double expr_id_data__value(const struct expr_id_data *data);
double expr_id_data__source_count(const struct expr_id_data *data);
double expr__get_literal(const char *literal, const struct expr_scanner_ctx *ctx);
double expr__has_event(const struct expr_parse_ctx *ctx, bool compute_ids, const char *id);
+double expr__strcmp_cpuid_str(const struct expr_parse_ctx *ctx, bool compute_ids, const char *id);
#endif
diff --git a/tools/perf/util/expr.l b/tools/perf/util/expr.l
index dbb117414710..0feef0726c48 100644
--- a/tools/perf/util/expr.l
+++ b/tools/perf/util/expr.l
@@ -114,6 +114,7 @@ if { return IF; }
else { return ELSE; }
source_count { return SOURCE_COUNT; }
has_event { return HAS_EVENT; }
+strcmp_cpuid_str { return STRCMP_CPUID_STR; }
{literal} { return literal(yyscanner, sctx); }
{number} { return value(yyscanner); }
{symbol} { return str(yyscanner, ID, sctx->runtime); }
diff --git a/tools/perf/util/expr.y b/tools/perf/util/expr.y
index dd504afd8f36..6c93b358cc2d 100644
--- a/tools/perf/util/expr.y
+++ b/tools/perf/util/expr.y
@@ -7,6 +7,8 @@
#include "util/debug.h"
#define IN_EXPR_Y 1
#include "expr.h"
+#include "expr-bison.h"
+int expr_lex(YYSTYPE * yylval_param , void *yyscanner);
%}
%define api.pure full
@@ -37,7 +39,7 @@
} ids;
}
-%token ID NUMBER MIN MAX IF ELSE LITERAL D_RATIO SOURCE_COUNT HAS_EVENT EXPR_ERROR
+%token ID NUMBER MIN MAX IF ELSE LITERAL D_RATIO SOURCE_COUNT HAS_EVENT STRCMP_CPUID_STR EXPR_ERROR
%left MIN MAX IF
%left '|'
%left '^'
@@ -56,7 +58,7 @@
static void expr_error(double *final_val __maybe_unused,
struct expr_parse_ctx *ctx __maybe_unused,
bool compute_ids __maybe_unused,
- void *scanner,
+ void *scanner __maybe_unused,
const char *s)
{
pr_debug("%s\n", s);
@@ -205,6 +207,12 @@ expr: NUMBER
$$.ids = NULL;
free($3);
}
+| STRCMP_CPUID_STR '(' ID ')'
+{
+ $$.val = expr__strcmp_cpuid_str(ctx, compute_ids, $3);
+ $$.ids = NULL;
+ free($3);
+}
| expr '|' expr
{
if (is_const($1.val) && is_const($3.val)) {
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 52fbf526fe74..d812e1e371a7 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -456,6 +456,8 @@ static int write_cpudesc(struct feat_fd *ff,
#define CPUINFO_PROC { "Processor", }
#elif defined(__xtensa__)
#define CPUINFO_PROC { "core ID", }
+#elif defined(__loongarch__)
+#define CPUINFO_PROC { "Model Name", }
#else
#define CPUINFO_PROC { "model name", }
#endif
@@ -746,20 +748,14 @@ static int write_pmu_mappings(struct feat_fd *ff,
* Do a first pass to count number of pmu to avoid lseek so this
* works in pipe mode as well.
*/
- while ((pmu = perf_pmus__scan(pmu))) {
- if (!pmu->name)
- continue;
+ while ((pmu = perf_pmus__scan(pmu)))
pmu_num++;
- }
ret = do_write(ff, &pmu_num, sizeof(pmu_num));
if (ret < 0)
return ret;
while ((pmu = perf_pmus__scan(pmu))) {
- if (!pmu->name)
- continue;
-
ret = do_write(ff, &pmu->type, sizeof(pmu->type));
if (ret < 0)
return ret;
@@ -1605,8 +1601,15 @@ static int write_pmu_caps(struct feat_fd *ff,
int ret;
while ((pmu = perf_pmus__scan(pmu))) {
- if (!pmu->name || !strcmp(pmu->name, "cpu") ||
- perf_pmu__caps_parse(pmu) <= 0)
+ if (!strcmp(pmu->name, "cpu")) {
+ /*
+ * The "cpu" PMU is special and covered by
+ * HEADER_CPU_PMU_CAPS. Note, core PMUs are
+ * counted/written here for ARM, s390 and Intel hybrid.
+ */
+ continue;
+ }
+ if (perf_pmu__caps_parse(pmu) <= 0)
continue;
nr_pmu++;
}
@@ -1619,23 +1622,17 @@ static int write_pmu_caps(struct feat_fd *ff,
return 0;
/*
- * Write hybrid pmu caps first to maintain compatibility with
- * older perf tool.
+ * Note older perf tools assume core PMUs come first, this is a property
+ * of perf_pmus__scan.
*/
- if (perf_pmus__num_core_pmus() > 1) {
- pmu = NULL;
- while ((pmu = perf_pmus__scan_core(pmu))) {
- ret = __write_pmu_caps(ff, pmu, true);
- if (ret < 0)
- return ret;
- }
- }
-
pmu = NULL;
while ((pmu = perf_pmus__scan(pmu))) {
- if (pmu->is_core || !pmu->nr_caps)
+ if (!strcmp(pmu->name, "cpu")) {
+ /* Skip as above. */
+ continue;
+ }
+ if (perf_pmu__caps_parse(pmu) <= 0)
continue;
-
ret = __write_pmu_caps(ff, pmu, true);
if (ret < 0)
return ret;
@@ -4381,7 +4378,8 @@ int perf_event__process_attr(struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct evlist **pevlist)
{
- u32 i, ids, n_ids;
+ u32 i, n_ids;
+ u64 *ids;
struct evsel *evsel;
struct evlist *evlist = *pevlist;
@@ -4397,9 +4395,8 @@ int perf_event__process_attr(struct perf_tool *tool __maybe_unused,
evlist__add(evlist, evsel);
- ids = event->header.size;
- ids -= (void *)&event->attr.id - (void *)event;
- n_ids = ids / sizeof(u64);
+ n_ids = event->header.size - sizeof(event->header) - event->attr.attr.size;
+ n_ids = n_ids / sizeof(u64);
/*
* We don't have the cpu and thread maps on the header, so
* for allocating the perf_sample_id table we fake 1 cpu and
@@ -4408,8 +4405,9 @@ int perf_event__process_attr(struct perf_tool *tool __maybe_unused,
if (perf_evsel__alloc_id(&evsel->core, 1, n_ids))
return -ENOMEM;
+ ids = perf_record_header_attr_id(event);
for (i = 0; i < n_ids; i++) {
- perf_evlist__id_add(&evlist->core, &evsel->core, 0, i, event->attr.id[i]);
+ perf_evlist__id_add(&evlist->core, &evsel->core, 0, i, ids[i]);
}
return 0;
diff --git a/tools/perf/util/libunwind/arm64.c b/tools/perf/util/libunwind/arm64.c
index 014d82159656..37ecef0c53b9 100644
--- a/tools/perf/util/libunwind/arm64.c
+++ b/tools/perf/util/libunwind/arm64.c
@@ -18,8 +18,6 @@
* defined before including "unwind.h"
*/
#define LIBUNWIND__ARCH_REG_ID(regnum) libunwind__arm64_reg_id(regnum)
-#define LIBUNWIND__ARCH_REG_IP PERF_REG_ARM64_PC
-#define LIBUNWIND__ARCH_REG_SP PERF_REG_ARM64_SP
#include "unwind.h"
#include "libunwind-aarch64.h"
diff --git a/tools/perf/util/libunwind/x86_32.c b/tools/perf/util/libunwind/x86_32.c
index b2b92d030aef..1697dece1b74 100644
--- a/tools/perf/util/libunwind/x86_32.c
+++ b/tools/perf/util/libunwind/x86_32.c
@@ -18,8 +18,6 @@
* defined before including "unwind.h"
*/
#define LIBUNWIND__ARCH_REG_ID(regnum) libunwind__x86_reg_id(regnum)
-#define LIBUNWIND__ARCH_REG_IP PERF_REG_X86_IP
-#define LIBUNWIND__ARCH_REG_SP PERF_REG_X86_SP
#include "unwind.h"
#include "libunwind-x86.h"
diff --git a/tools/perf/util/llvm-utils.c b/tools/perf/util/llvm-utils.c
deleted file mode 100644
index c6c9c2228578..000000000000
--- a/tools/perf/util/llvm-utils.c
+++ /dev/null
@@ -1,612 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2015, Wang Nan <wangnan0@huawei.com>
- * Copyright (C) 2015, Huawei Inc.
- */
-
-#include <errno.h>
-#include <limits.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <unistd.h>
-#include <linux/err.h>
-#include <linux/string.h>
-#include <linux/zalloc.h>
-#include "debug.h"
-#include "llvm-utils.h"
-#include "config.h"
-#include "util.h"
-#include <sys/wait.h>
-#include <subcmd/exec-cmd.h>
-
-#define CLANG_BPF_CMD_DEFAULT_TEMPLATE \
- "$CLANG_EXEC -D__KERNEL__ -D__NR_CPUS__=$NR_CPUS "\
- "-DLINUX_VERSION_CODE=$LINUX_VERSION_CODE " \
- "$CLANG_OPTIONS $PERF_BPF_INC_OPTIONS $KERNEL_INC_OPTIONS " \
- "-Wno-unused-value -Wno-pointer-sign " \
- "-working-directory $WORKING_DIR " \
- "-c \"$CLANG_SOURCE\" --target=bpf $CLANG_EMIT_LLVM -g -O2 -o - $LLVM_OPTIONS_PIPE"
-
-struct llvm_param llvm_param = {
- .clang_path = "clang",
- .llc_path = "llc",
- .clang_bpf_cmd_template = CLANG_BPF_CMD_DEFAULT_TEMPLATE,
- .clang_opt = NULL,
- .opts = NULL,
- .kbuild_dir = NULL,
- .kbuild_opts = NULL,
- .user_set_param = false,
-};
-
-static void version_notice(void);
-
-int perf_llvm_config(const char *var, const char *value)
-{
- if (!strstarts(var, "llvm."))
- return 0;
- var += sizeof("llvm.") - 1;
-
- if (!strcmp(var, "clang-path"))
- llvm_param.clang_path = strdup(value);
- else if (!strcmp(var, "clang-bpf-cmd-template"))
- llvm_param.clang_bpf_cmd_template = strdup(value);
- else if (!strcmp(var, "clang-opt"))
- llvm_param.clang_opt = strdup(value);
- else if (!strcmp(var, "kbuild-dir"))
- llvm_param.kbuild_dir = strdup(value);
- else if (!strcmp(var, "kbuild-opts"))
- llvm_param.kbuild_opts = strdup(value);
- else if (!strcmp(var, "dump-obj"))
- llvm_param.dump_obj = !!perf_config_bool(var, value);
- else if (!strcmp(var, "opts"))
- llvm_param.opts = strdup(value);
- else {
- pr_debug("Invalid LLVM config option: %s\n", value);
- return -1;
- }
- llvm_param.user_set_param = true;
- return 0;
-}
-
-static int
-search_program(const char *def, const char *name,
- char *output)
-{
- char *env, *path, *tmp = NULL;
- char buf[PATH_MAX];
- int ret;
-
- output[0] = '\0';
- if (def && def[0] != '\0') {
- if (def[0] == '/') {
- if (access(def, F_OK) == 0) {
- strlcpy(output, def, PATH_MAX);
- return 0;
- }
- } else if (def[0] != '\0')
- name = def;
- }
-
- env = getenv("PATH");
- if (!env)
- return -1;
- env = strdup(env);
- if (!env)
- return -1;
-
- ret = -ENOENT;
- path = strtok_r(env, ":", &tmp);
- while (path) {
- scnprintf(buf, sizeof(buf), "%s/%s", path, name);
- if (access(buf, F_OK) == 0) {
- strlcpy(output, buf, PATH_MAX);
- ret = 0;
- break;
- }
- path = strtok_r(NULL, ":", &tmp);
- }
-
- free(env);
- return ret;
-}
-
-static int search_program_and_warn(const char *def, const char *name,
- char *output)
-{
- int ret = search_program(def, name, output);
-
- if (ret) {
- pr_err("ERROR:\tunable to find %s.\n"
- "Hint:\tTry to install latest clang/llvm to support BPF. Check your $PATH\n"
- " \tand '%s-path' option in [llvm] section of ~/.perfconfig.\n",
- name, name);
- version_notice();
- }
- return ret;
-}
-
-#define READ_SIZE 4096
-static int
-read_from_pipe(const char *cmd, void **p_buf, size_t *p_read_sz)
-{
- int err = 0;
- void *buf = NULL;
- FILE *file = NULL;
- size_t read_sz = 0, buf_sz = 0;
- char serr[STRERR_BUFSIZE];
-
- file = popen(cmd, "r");
- if (!file) {
- pr_err("ERROR: unable to popen cmd: %s\n",
- str_error_r(errno, serr, sizeof(serr)));
- return -EINVAL;
- }
-
- while (!feof(file) && !ferror(file)) {
- /*
- * Make buf_sz always have obe byte extra space so we
- * can put '\0' there.
- */
- if (buf_sz - read_sz < READ_SIZE + 1) {
- void *new_buf;
-
- buf_sz = read_sz + READ_SIZE + 1;
- new_buf = realloc(buf, buf_sz);
-
- if (!new_buf) {
- pr_err("ERROR: failed to realloc memory\n");
- err = -ENOMEM;
- goto errout;
- }
-
- buf = new_buf;
- }
- read_sz += fread(buf + read_sz, 1, READ_SIZE, file);
- }
-
- if (buf_sz - read_sz < 1) {
- pr_err("ERROR: internal error\n");
- err = -EINVAL;
- goto errout;
- }
-
- if (ferror(file)) {
- pr_err("ERROR: error occurred when reading from pipe: %s\n",
- str_error_r(errno, serr, sizeof(serr)));
- err = -EIO;
- goto errout;
- }
-
- err = WEXITSTATUS(pclose(file));
- file = NULL;
- if (err) {
- err = -EINVAL;
- goto errout;
- }
-
- /*
- * If buf is string, give it terminal '\0' to make our life
- * easier. If buf is not string, that '\0' is out of space
- * indicated by read_sz so caller won't even notice it.
- */
- ((char *)buf)[read_sz] = '\0';
-
- if (!p_buf)
- free(buf);
- else
- *p_buf = buf;
-
- if (p_read_sz)
- *p_read_sz = read_sz;
- return 0;
-
-errout:
- if (file)
- pclose(file);
- free(buf);
- if (p_buf)
- *p_buf = NULL;
- if (p_read_sz)
- *p_read_sz = 0;
- return err;
-}
-
-static inline void
-force_set_env(const char *var, const char *value)
-{
- if (value) {
- setenv(var, value, 1);
- pr_debug("set env: %s=%s\n", var, value);
- } else {
- unsetenv(var);
- pr_debug("unset env: %s\n", var);
- }
-}
-
-static void
-version_notice(void)
-{
- pr_err(
-" \tLLVM 3.7 or newer is required. Which can be found from http://llvm.org\n"
-" \tYou may want to try git trunk:\n"
-" \t\tgit clone http://llvm.org/git/llvm.git\n"
-" \t\t and\n"
-" \t\tgit clone http://llvm.org/git/clang.git\n\n"
-" \tOr fetch the latest clang/llvm 3.7 from pre-built llvm packages for\n"
-" \tdebian/ubuntu:\n"
-" \t\thttps://apt.llvm.org/\n\n"
-" \tIf you are using old version of clang, change 'clang-bpf-cmd-template'\n"
-" \toption in [llvm] section of ~/.perfconfig to:\n\n"
-" \t \"$CLANG_EXEC $CLANG_OPTIONS $KERNEL_INC_OPTIONS $PERF_BPF_INC_OPTIONS \\\n"
-" \t -working-directory $WORKING_DIR -c $CLANG_SOURCE \\\n"
-" \t -emit-llvm -o - | /path/to/llc -march=bpf -filetype=obj -o -\"\n"
-" \t(Replace /path/to/llc with path to your llc)\n\n"
-);
-}
-
-static int detect_kbuild_dir(char **kbuild_dir)
-{
- const char *test_dir = llvm_param.kbuild_dir;
- const char *prefix_dir = "";
- const char *suffix_dir = "";
-
- /* _UTSNAME_LENGTH is 65 */
- char release[128];
-
- char *autoconf_path;
-
- int err;
-
- if (!test_dir) {
- err = fetch_kernel_version(NULL, release,
- sizeof(release));
- if (err)
- return -EINVAL;
-
- test_dir = release;
- prefix_dir = "/lib/modules/";
- suffix_dir = "/build";
- }
-
- err = asprintf(&autoconf_path, "%s%s%s/include/generated/autoconf.h",
- prefix_dir, test_dir, suffix_dir);
- if (err < 0)
- return -ENOMEM;
-
- if (access(autoconf_path, R_OK) == 0) {
- free(autoconf_path);
-
- err = asprintf(kbuild_dir, "%s%s%s", prefix_dir, test_dir,
- suffix_dir);
- if (err < 0)
- return -ENOMEM;
- return 0;
- }
- pr_debug("%s: Couldn't find \"%s\", missing kernel-devel package?.\n",
- __func__, autoconf_path);
- free(autoconf_path);
- return -ENOENT;
-}
-
-static const char *kinc_fetch_script =
-"#!/usr/bin/env sh\n"
-"if ! test -d \"$KBUILD_DIR\"\n"
-"then\n"
-" exit 1\n"
-"fi\n"
-"if ! test -f \"$KBUILD_DIR/include/generated/autoconf.h\"\n"
-"then\n"
-" exit 1\n"
-"fi\n"
-"TMPDIR=`mktemp -d`\n"
-"if test -z \"$TMPDIR\"\n"
-"then\n"
-" exit 1\n"
-"fi\n"
-"cat << EOF > $TMPDIR/Makefile\n"
-"obj-y := dummy.o\n"
-"\\$(obj)/%.o: \\$(src)/%.c\n"
-"\t@echo -n \"\\$(NOSTDINC_FLAGS) \\$(LINUXINCLUDE) \\$(EXTRA_CFLAGS)\"\n"
-"\t\\$(CC) -c -o \\$@ \\$<\n"
-"EOF\n"
-"touch $TMPDIR/dummy.c\n"
-"make -s -C $KBUILD_DIR M=$TMPDIR $KBUILD_OPTS dummy.o 2>/dev/null\n"
-"RET=$?\n"
-"rm -rf $TMPDIR\n"
-"exit $RET\n";
-
-void llvm__get_kbuild_opts(char **kbuild_dir, char **kbuild_include_opts)
-{
- static char *saved_kbuild_dir;
- static char *saved_kbuild_include_opts;
- int err;
-
- if (!kbuild_dir || !kbuild_include_opts)
- return;
-
- *kbuild_dir = NULL;
- *kbuild_include_opts = NULL;
-
- if (saved_kbuild_dir && saved_kbuild_include_opts &&
- !IS_ERR(saved_kbuild_dir) && !IS_ERR(saved_kbuild_include_opts)) {
- *kbuild_dir = strdup(saved_kbuild_dir);
- *kbuild_include_opts = strdup(saved_kbuild_include_opts);
-
- if (*kbuild_dir && *kbuild_include_opts)
- return;
-
- zfree(kbuild_dir);
- zfree(kbuild_include_opts);
- /*
- * Don't fall through: it may breaks saved_kbuild_dir and
- * saved_kbuild_include_opts if detect them again when
- * memory is low.
- */
- return;
- }
-
- if (llvm_param.kbuild_dir && !llvm_param.kbuild_dir[0]) {
- pr_debug("[llvm.kbuild-dir] is set to \"\" deliberately.\n");
- pr_debug("Skip kbuild options detection.\n");
- goto errout;
- }
-
- err = detect_kbuild_dir(kbuild_dir);
- if (err) {
- pr_warning(
-"WARNING:\tunable to get correct kernel building directory.\n"
-"Hint:\tSet correct kbuild directory using 'kbuild-dir' option in [llvm]\n"
-" \tsection of ~/.perfconfig or set it to \"\" to suppress kbuild\n"
-" \tdetection.\n\n");
- goto errout;
- }
-
- pr_debug("Kernel build dir is set to %s\n", *kbuild_dir);
- force_set_env("KBUILD_DIR", *kbuild_dir);
- force_set_env("KBUILD_OPTS", llvm_param.kbuild_opts);
- err = read_from_pipe(kinc_fetch_script,
- (void **)kbuild_include_opts,
- NULL);
- if (err) {
- pr_warning(
-"WARNING:\tunable to get kernel include directories from '%s'\n"
-"Hint:\tTry set clang include options using 'clang-bpf-cmd-template'\n"
-" \toption in [llvm] section of ~/.perfconfig and set 'kbuild-dir'\n"
-" \toption in [llvm] to \"\" to suppress this detection.\n\n",
- *kbuild_dir);
-
- zfree(kbuild_dir);
- goto errout;
- }
-
- pr_debug("include option is set to %s\n", *kbuild_include_opts);
-
- saved_kbuild_dir = strdup(*kbuild_dir);
- saved_kbuild_include_opts = strdup(*kbuild_include_opts);
-
- if (!saved_kbuild_dir || !saved_kbuild_include_opts) {
- zfree(&saved_kbuild_dir);
- zfree(&saved_kbuild_include_opts);
- }
- return;
-errout:
- saved_kbuild_dir = ERR_PTR(-EINVAL);
- saved_kbuild_include_opts = ERR_PTR(-EINVAL);
-}
-
-int llvm__get_nr_cpus(void)
-{
- static int nr_cpus_avail = 0;
- char serr[STRERR_BUFSIZE];
-
- if (nr_cpus_avail > 0)
- return nr_cpus_avail;
-
- nr_cpus_avail = sysconf(_SC_NPROCESSORS_CONF);
- if (nr_cpus_avail <= 0) {
- pr_err(
-"WARNING:\tunable to get available CPUs in this system: %s\n"
-" \tUse 128 instead.\n", str_error_r(errno, serr, sizeof(serr)));
- nr_cpus_avail = 128;
- }
- return nr_cpus_avail;
-}
-
-void llvm__dump_obj(const char *path, void *obj_buf, size_t size)
-{
- char *obj_path = strdup(path);
- FILE *fp;
- char *p;
-
- if (!obj_path) {
- pr_warning("WARNING: Not enough memory, skip object dumping\n");
- return;
- }
-
- p = strrchr(obj_path, '.');
- if (!p || (strcmp(p, ".c") != 0)) {
- pr_warning("WARNING: invalid llvm source path: '%s', skip object dumping\n",
- obj_path);
- goto out;
- }
-
- p[1] = 'o';
- fp = fopen(obj_path, "wb");
- if (!fp) {
- pr_warning("WARNING: failed to open '%s': %s, skip object dumping\n",
- obj_path, strerror(errno));
- goto out;
- }
-
- pr_debug("LLVM: dumping %s\n", obj_path);
- if (fwrite(obj_buf, size, 1, fp) != 1)
- pr_debug("WARNING: failed to write to file '%s': %s, skip object dumping\n", obj_path, strerror(errno));
- fclose(fp);
-out:
- free(obj_path);
-}
-
-int llvm__compile_bpf(const char *path, void **p_obj_buf,
- size_t *p_obj_buf_sz)
-{
- size_t obj_buf_sz;
- void *obj_buf = NULL;
- int err, nr_cpus_avail;
- unsigned int kernel_version;
- char linux_version_code_str[64];
- const char *clang_opt = llvm_param.clang_opt;
- char clang_path[PATH_MAX], llc_path[PATH_MAX], abspath[PATH_MAX], nr_cpus_avail_str[64];
- char serr[STRERR_BUFSIZE];
- char *kbuild_dir = NULL, *kbuild_include_opts = NULL,
- *perf_bpf_include_opts = NULL;
- const char *template = llvm_param.clang_bpf_cmd_template;
- char *pipe_template = NULL;
- const char *opts = llvm_param.opts;
- char *command_echo = NULL, *command_out;
- char *libbpf_include_dir = system_path(LIBBPF_INCLUDE_DIR);
-
- if (path[0] != '-' && realpath(path, abspath) == NULL) {
- err = errno;
- pr_err("ERROR: problems with path %s: %s\n",
- path, str_error_r(err, serr, sizeof(serr)));
- return -err;
- }
-
- if (!template)
- template = CLANG_BPF_CMD_DEFAULT_TEMPLATE;
-
- err = search_program_and_warn(llvm_param.clang_path,
- "clang", clang_path);
- if (err)
- return -ENOENT;
-
- /*
- * This is an optional work. Even it fail we can continue our
- * work. Needn't check error return.
- */
- llvm__get_kbuild_opts(&kbuild_dir, &kbuild_include_opts);
-
- nr_cpus_avail = llvm__get_nr_cpus();
- snprintf(nr_cpus_avail_str, sizeof(nr_cpus_avail_str), "%d",
- nr_cpus_avail);
-
- if (fetch_kernel_version(&kernel_version, NULL, 0))
- kernel_version = 0;
-
- snprintf(linux_version_code_str, sizeof(linux_version_code_str),
- "0x%x", kernel_version);
- if (asprintf(&perf_bpf_include_opts, "-I%s/", libbpf_include_dir) < 0)
- goto errout;
- force_set_env("NR_CPUS", nr_cpus_avail_str);
- force_set_env("LINUX_VERSION_CODE", linux_version_code_str);
- force_set_env("CLANG_EXEC", clang_path);
- force_set_env("CLANG_OPTIONS", clang_opt);
- force_set_env("KERNEL_INC_OPTIONS", kbuild_include_opts);
- force_set_env("PERF_BPF_INC_OPTIONS", perf_bpf_include_opts);
- force_set_env("WORKING_DIR", kbuild_dir ? : ".");
-
- if (opts) {
- err = search_program_and_warn(llvm_param.llc_path, "llc", llc_path);
- if (err)
- goto errout;
-
- err = -ENOMEM;
- if (asprintf(&pipe_template, "%s -emit-llvm | %s -march=bpf %s -filetype=obj -o -",
- template, llc_path, opts) < 0) {
- pr_err("ERROR:\tnot enough memory to setup command line\n");
- goto errout;
- }
-
- template = pipe_template;
-
- }
-
- /*
- * Since we may reset clang's working dir, path of source file
- * should be transferred into absolute path, except we want
- * stdin to be source file (testing).
- */
- force_set_env("CLANG_SOURCE",
- (path[0] == '-') ? path : abspath);
-
- pr_debug("llvm compiling command template: %s\n", template);
-
- /*
- * Below, substitute control characters for values that can cause the
- * echo to misbehave, then substitute the values back.
- */
- err = -ENOMEM;
- if (asprintf(&command_echo, "echo -n \a%s\a", template) < 0)
- goto errout;
-
-#define SWAP_CHAR(a, b) do { if (*p == a) *p = b; } while (0)
- for (char *p = command_echo; *p; p++) {
- SWAP_CHAR('<', '\001');
- SWAP_CHAR('>', '\002');
- SWAP_CHAR('"', '\003');
- SWAP_CHAR('\'', '\004');
- SWAP_CHAR('|', '\005');
- SWAP_CHAR('&', '\006');
- SWAP_CHAR('\a', '"');
- }
- err = read_from_pipe(command_echo, (void **) &command_out, NULL);
- if (err)
- goto errout;
-
- for (char *p = command_out; *p; p++) {
- SWAP_CHAR('\001', '<');
- SWAP_CHAR('\002', '>');
- SWAP_CHAR('\003', '"');
- SWAP_CHAR('\004', '\'');
- SWAP_CHAR('\005', '|');
- SWAP_CHAR('\006', '&');
- }
-#undef SWAP_CHAR
- pr_debug("llvm compiling command : %s\n", command_out);
-
- err = read_from_pipe(template, &obj_buf, &obj_buf_sz);
- if (err) {
- pr_err("ERROR:\tunable to compile %s\n", path);
- pr_err("Hint:\tCheck error message shown above.\n");
- pr_err("Hint:\tYou can also pre-compile it into .o using:\n");
- pr_err(" \t\tclang --target=bpf -O2 -c %s\n", path);
- pr_err(" \twith proper -I and -D options.\n");
- goto errout;
- }
-
- free(command_echo);
- free(command_out);
- free(kbuild_dir);
- free(kbuild_include_opts);
- free(perf_bpf_include_opts);
- free(libbpf_include_dir);
-
- if (!p_obj_buf)
- free(obj_buf);
- else
- *p_obj_buf = obj_buf;
-
- if (p_obj_buf_sz)
- *p_obj_buf_sz = obj_buf_sz;
- return 0;
-errout:
- free(command_echo);
- free(kbuild_dir);
- free(kbuild_include_opts);
- free(obj_buf);
- free(perf_bpf_include_opts);
- free(libbpf_include_dir);
- free(pipe_template);
- if (p_obj_buf)
- *p_obj_buf = NULL;
- if (p_obj_buf_sz)
- *p_obj_buf_sz = 0;
- return err;
-}
-
-int llvm__search_clang(void)
-{
- char clang_path[PATH_MAX];
-
- return search_program_and_warn(llvm_param.clang_path, "clang", clang_path);
-}
diff --git a/tools/perf/util/llvm-utils.h b/tools/perf/util/llvm-utils.h
deleted file mode 100644
index 7878a0e3fa98..000000000000
--- a/tools/perf/util/llvm-utils.h
+++ /dev/null
@@ -1,69 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (C) 2015, Wang Nan <wangnan0@huawei.com>
- * Copyright (C) 2015, Huawei Inc.
- */
-#ifndef __LLVM_UTILS_H
-#define __LLVM_UTILS_H
-
-#include <stdbool.h>
-
-struct llvm_param {
- /* Path of clang executable */
- const char *clang_path;
- /* Path of llc executable */
- const char *llc_path;
- /*
- * Template of clang bpf compiling. 5 env variables
- * can be used:
- * $CLANG_EXEC: Path to clang.
- * $CLANG_OPTIONS: Extra options to clang.
- * $KERNEL_INC_OPTIONS: Kernel include directories.
- * $WORKING_DIR: Kernel source directory.
- * $CLANG_SOURCE: Source file to be compiled.
- */
- const char *clang_bpf_cmd_template;
- /* Will be filled in $CLANG_OPTIONS */
- const char *clang_opt;
- /*
- * If present it'll add -emit-llvm to $CLANG_OPTIONS to pipe
- * the clang output to llc, useful for new llvm options not
- * yet selectable via 'clang -mllvm option', such as -mattr=dwarfris
- * in clang 6.0/llvm 7
- */
- const char *opts;
- /* Where to find kbuild system */
- const char *kbuild_dir;
- /*
- * Arguments passed to make, like 'ARCH=arm' if doing cross
- * compiling. Should not be used for dynamic compiling.
- */
- const char *kbuild_opts;
- /*
- * Default is false. If set to true, write compiling result
- * to object file.
- */
- bool dump_obj;
- /*
- * Default is false. If one of the above fields is set by user
- * explicitly then user_set_llvm is set to true. This is used
- * for perf test. If user doesn't set anything in .perfconfig
- * and clang is not found, don't trigger llvm test.
- */
- bool user_set_param;
-};
-
-extern struct llvm_param llvm_param;
-int perf_llvm_config(const char *var, const char *value);
-
-int llvm__compile_bpf(const char *path, void **p_obj_buf, size_t *p_obj_buf_sz);
-
-/* This function is for test__llvm() use only */
-int llvm__search_clang(void);
-
-/* Following functions are reused by builtin clang support */
-void llvm__get_kbuild_opts(char **kbuild_dir, char **kbuild_include_opts);
-int llvm__get_nr_cpus(void);
-
-void llvm__dump_obj(const char *path, void *obj_buf, size_t size);
-#endif
diff --git a/tools/perf/util/lzma.c b/tools/perf/util/lzma.c
index 51424cdc3b68..af9a97612f9d 100644
--- a/tools/perf/util/lzma.c
+++ b/tools/perf/util/lzma.c
@@ -45,15 +45,13 @@ int lzma_decompress_to_file(const char *input, int output_fd)
infile = fopen(input, "rb");
if (!infile) {
- pr_err("lzma: fopen failed on %s: '%s'\n",
- input, strerror(errno));
+ pr_debug("lzma: fopen failed on %s: '%s'\n", input, strerror(errno));
return -1;
}
ret = lzma_stream_decoder(&strm, UINT64_MAX, LZMA_CONCATENATED);
if (ret != LZMA_OK) {
- pr_err("lzma: lzma_stream_decoder failed %s (%d)\n",
- lzma_strerror(ret), ret);
+ pr_debug("lzma: lzma_stream_decoder failed %s (%d)\n", lzma_strerror(ret), ret);
goto err_fclose;
}
@@ -68,7 +66,7 @@ int lzma_decompress_to_file(const char *input, int output_fd)
strm.avail_in = fread(buf_in, 1, sizeof(buf_in), infile);
if (ferror(infile)) {
- pr_err("lzma: read error: %s\n", strerror(errno));
+ pr_debug("lzma: read error: %s\n", strerror(errno));
goto err_lzma_end;
}
@@ -82,7 +80,7 @@ int lzma_decompress_to_file(const char *input, int output_fd)
ssize_t write_size = sizeof(buf_out) - strm.avail_out;
if (writen(output_fd, buf_out, write_size) != write_size) {
- pr_err("lzma: write error: %s\n", strerror(errno));
+ pr_debug("lzma: write error: %s\n", strerror(errno));
goto err_lzma_end;
}
@@ -94,7 +92,7 @@ int lzma_decompress_to_file(const char *input, int output_fd)
if (ret == LZMA_STREAM_END)
break;
- pr_err("lzma: failed %s\n", lzma_strerror(ret));
+ pr_debug("lzma: failed %s\n", lzma_strerror(ret));
goto err_lzma_end;
}
}
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index f4cb41ee23cd..88f31b3a63ac 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -1215,7 +1215,9 @@ static int machine__get_running_kernel_start(struct machine *machine,
*start = addr;
- err = kallsyms__get_function_start(filename, "_etext", &addr);
+ err = kallsyms__get_symbol_start(filename, "_edata", &addr);
+ if (err)
+ err = kallsyms__get_function_start(filename, "_etext", &addr);
if (!err)
*end = addr;
diff --git a/tools/perf/util/mem-events.c b/tools/perf/util/mem-events.c
index c07fe3a90722..39ffe8ceb380 100644
--- a/tools/perf/util/mem-events.c
+++ b/tools/perf/util/mem-events.c
@@ -37,7 +37,7 @@ struct perf_mem_event * __weak perf_mem_events__ptr(int i)
return &perf_mem_events[i];
}
-char * __weak perf_mem_events__name(int i, char *pmu_name __maybe_unused)
+const char * __weak perf_mem_events__name(int i, const char *pmu_name __maybe_unused)
{
struct perf_mem_event *e = perf_mem_events__ptr(i);
@@ -53,7 +53,7 @@ char * __weak perf_mem_events__name(int i, char *pmu_name __maybe_unused)
return mem_loads_name;
}
- return (char *)e->name;
+ return e->name;
}
__weak bool is_mem_loads_aux_event(struct evsel *leader __maybe_unused)
@@ -186,7 +186,6 @@ int perf_mem_events__record_args(const char **rec_argv, int *argv_nr,
int i = *argv_nr, k = 0;
struct perf_mem_event *e;
struct perf_pmu *pmu;
- char *s;
for (int j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
e = perf_mem_events__ptr(j);
@@ -209,15 +208,16 @@ int perf_mem_events__record_args(const char **rec_argv, int *argv_nr,
}
while ((pmu = perf_pmus__scan(pmu)) != NULL) {
+ const char *s = perf_mem_events__name(j, pmu->name);
+
rec_argv[i++] = "-e";
- s = perf_mem_events__name(j, pmu->name);
if (s) {
- s = strdup(s);
- if (!s)
+ char *copy = strdup(s);
+ if (!copy)
return -1;
- rec_argv[i++] = s;
- rec_tmp[k++] = s;
+ rec_argv[i++] = copy;
+ rec_tmp[k++] = copy;
}
}
}
diff --git a/tools/perf/util/mem-events.h b/tools/perf/util/mem-events.h
index 12372309d60e..b40ad6ea93fc 100644
--- a/tools/perf/util/mem-events.h
+++ b/tools/perf/util/mem-events.h
@@ -38,7 +38,7 @@ extern unsigned int perf_mem_events__loads_ldlat;
int perf_mem_events__parse(const char *str);
int perf_mem_events__init(void);
-char *perf_mem_events__name(int i, char *pmu_name);
+const char *perf_mem_events__name(int i, const char *pmu_name);
struct perf_mem_event *perf_mem_events__ptr(int i);
bool is_mem_loads_aux_event(struct evsel *leader);
diff --git a/tools/perf/util/metricgroup.c b/tools/perf/util/metricgroup.c
index a6a5ed44a679..6231044a491e 100644
--- a/tools/perf/util/metricgroup.c
+++ b/tools/perf/util/metricgroup.c
@@ -527,7 +527,7 @@ void metricgroup__print(const struct print_callbacks *print_cb, void *print_stat
groups.node_delete = mep_delete;
table = pmu_metrics_table__find();
if (table) {
- pmu_metrics_table_for_each_metric(table,
+ pmu_metrics_table__for_each_metric(table,
metricgroup__add_to_mep_groups_callback,
&groups);
}
@@ -1069,7 +1069,7 @@ static bool metricgroup__find_metric(const char *pmu,
.pm = pm,
};
- return pmu_metrics_table_for_each_metric(table, metricgroup__find_metric_callback, &data)
+ return pmu_metrics_table__for_each_metric(table, metricgroup__find_metric_callback, &data)
? true : false;
}
@@ -1255,7 +1255,7 @@ static int metricgroup__add_metric(const char *pmu, const char *metric_name, con
* Iterate over all metrics seeing if metric matches either the
* name or group. When it does add the metric to the list.
*/
- ret = pmu_metrics_table_for_each_metric(table, metricgroup__add_metric_callback,
+ ret = pmu_metrics_table__for_each_metric(table, metricgroup__add_metric_callback,
&data);
if (ret)
goto out;
@@ -1740,7 +1740,7 @@ bool metricgroup__has_metric(const char *pmu, const char *metric)
if (!table)
return false;
- return pmu_metrics_table_for_each_metric(table, metricgroup__has_metric_callback, &data)
+ return pmu_metrics_table__for_each_metric(table, metricgroup__has_metric_callback, &data)
? true : false;
}
@@ -1770,7 +1770,7 @@ unsigned int metricgroups__topdown_max_level(void)
if (!table)
return false;
- pmu_metrics_table_for_each_metric(table, metricgroup__topdown_max_level_callback,
+ pmu_metrics_table__for_each_metric(table, metricgroup__topdown_max_level_callback,
&max_level);
return max_level;
}
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index c9ec0cafb69d..65608a3cba81 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -13,13 +13,12 @@
#include <subcmd/parse-options.h>
#include "parse-events.h"
#include "string2.h"
-#include "strlist.h"
-#include "bpf-loader.h"
+#include "strbuf.h"
#include "debug.h"
#include <api/fs/tracing_path.h>
#include <perf/cpumap.h>
-#include "parse-events-bison.h"
-#include "parse-events-flex.h"
+#include <util/parse-events-bison.h>
+#include <util/parse-events-flex.h>
#include "pmu.h"
#include "pmus.h"
#include "asm/bug.h"
@@ -35,7 +34,6 @@
#ifdef PARSER_DEBUG
extern int parse_events_debug;
#endif
-int parse_events_parse(void *parse_state, void *scanner);
static int get_config_terms(struct list_head *head_config,
struct list_head *head_terms __maybe_unused);
@@ -155,7 +153,7 @@ const char *event_type(int type)
return "unknown";
}
-static char *get_config_str(struct list_head *head_terms, int type_term)
+static char *get_config_str(struct list_head *head_terms, enum parse_events__term_type type_term)
{
struct parse_events_term *term;
@@ -195,38 +193,31 @@ static void fix_raw(struct list_head *config_terms, struct perf_pmu *pmu)
struct parse_events_term *term;
list_for_each_entry(term, config_terms, list) {
- struct perf_pmu_alias *alias;
- bool matched = false;
+ u64 num;
if (term->type_term != PARSE_EVENTS__TERM_TYPE_RAW)
continue;
- list_for_each_entry(alias, &pmu->aliases, list) {
- if (!strcmp(alias->name, term->val.str)) {
- free(term->config);
- term->config = term->val.str;
- term->type_val = PARSE_EVENTS__TERM_TYPE_NUM;
- term->type_term = PARSE_EVENTS__TERM_TYPE_USER;
- term->val.num = 1;
- term->no_value = true;
- matched = true;
- break;
- }
- }
- if (!matched) {
- u64 num;
-
- free(term->config);
- term->config = strdup("config");
- errno = 0;
- num = strtoull(term->val.str + 1, NULL, 16);
- assert(errno == 0);
- free(term->val.str);
+ if (perf_pmu__have_event(pmu, term->val.str)) {
+ zfree(&term->config);
+ term->config = term->val.str;
term->type_val = PARSE_EVENTS__TERM_TYPE_NUM;
- term->type_term = PARSE_EVENTS__TERM_TYPE_CONFIG;
- term->val.num = num;
- term->no_value = false;
+ term->type_term = PARSE_EVENTS__TERM_TYPE_USER;
+ term->val.num = 1;
+ term->no_value = true;
+ continue;
}
+
+ zfree(&term->config);
+ term->config = strdup("config");
+ errno = 0;
+ num = strtoull(term->val.str + 1, NULL, 16);
+ assert(errno == 0);
+ free(term->val.str);
+ term->type_val = PARSE_EVENTS__TERM_TYPE_NUM;
+ term->type_term = PARSE_EVENTS__TERM_TYPE_CONFIG;
+ term->val.num = num;
+ term->no_value = false;
}
}
@@ -271,7 +262,7 @@ __add_event(struct list_head *list, int *idx,
evsel->core.is_pmu_core = pmu ? pmu->is_core : false;
evsel->auto_merge_stats = auto_merge_stats;
evsel->pmu = pmu;
- evsel->pmu_name = pmu && pmu->name ? strdup(pmu->name) : NULL;
+ evsel->pmu_name = pmu ? strdup(pmu->name) : NULL;
if (name)
evsel->name = strdup(name);
@@ -446,9 +437,6 @@ bool parse_events__filter_pmu(const struct parse_events_state *parse_state,
if (parse_state->pmu_filter == NULL)
return false;
- if (pmu->name == NULL)
- return true;
-
return strcmp(parse_state->pmu_filter, pmu->name) != 0;
}
@@ -499,7 +487,7 @@ int parse_events_add_cache(struct list_head *list, int *idx, const char *name,
#ifdef HAVE_LIBTRACEEVENT
static void tracepoint_error(struct parse_events_error *e, int err,
- const char *sys, const char *name)
+ const char *sys, const char *name, int column)
{
const char *str;
char help[BUFSIZ];
@@ -526,18 +514,19 @@ static void tracepoint_error(struct parse_events_error *e, int err,
}
tracing_path__strerror_open_tp(err, help, sizeof(help), sys, name);
- parse_events_error__handle(e, 0, strdup(str), strdup(help));
+ parse_events_error__handle(e, column, strdup(str), strdup(help));
}
static int add_tracepoint(struct list_head *list, int *idx,
const char *sys_name, const char *evt_name,
struct parse_events_error *err,
- struct list_head *head_config)
+ struct list_head *head_config, void *loc_)
{
+ YYLTYPE *loc = loc_;
struct evsel *evsel = evsel__newtp_idx(sys_name, evt_name, (*idx)++);
if (IS_ERR(evsel)) {
- tracepoint_error(err, PTR_ERR(evsel), sys_name, evt_name);
+ tracepoint_error(err, PTR_ERR(evsel), sys_name, evt_name, loc->first_column);
return PTR_ERR(evsel);
}
@@ -556,7 +545,7 @@ static int add_tracepoint(struct list_head *list, int *idx,
static int add_tracepoint_multi_event(struct list_head *list, int *idx,
const char *sys_name, const char *evt_name,
struct parse_events_error *err,
- struct list_head *head_config)
+ struct list_head *head_config, YYLTYPE *loc)
{
char *evt_path;
struct dirent *evt_ent;
@@ -565,13 +554,13 @@ static int add_tracepoint_multi_event(struct list_head *list, int *idx,
evt_path = get_events_file(sys_name);
if (!evt_path) {
- tracepoint_error(err, errno, sys_name, evt_name);
+ tracepoint_error(err, errno, sys_name, evt_name, loc->first_column);
return -1;
}
evt_dir = opendir(evt_path);
if (!evt_dir) {
put_events_file(evt_path);
- tracepoint_error(err, errno, sys_name, evt_name);
+ tracepoint_error(err, errno, sys_name, evt_name, loc->first_column);
return -1;
}
@@ -588,11 +577,11 @@ static int add_tracepoint_multi_event(struct list_head *list, int *idx,
found++;
ret = add_tracepoint(list, idx, sys_name, evt_ent->d_name,
- err, head_config);
+ err, head_config, loc);
}
if (!found) {
- tracepoint_error(err, ENOENT, sys_name, evt_name);
+ tracepoint_error(err, ENOENT, sys_name, evt_name, loc->first_column);
ret = -1;
}
@@ -604,19 +593,19 @@ static int add_tracepoint_multi_event(struct list_head *list, int *idx,
static int add_tracepoint_event(struct list_head *list, int *idx,
const char *sys_name, const char *evt_name,
struct parse_events_error *err,
- struct list_head *head_config)
+ struct list_head *head_config, YYLTYPE *loc)
{
return strpbrk(evt_name, "*?") ?
- add_tracepoint_multi_event(list, idx, sys_name, evt_name,
- err, head_config) :
- add_tracepoint(list, idx, sys_name, evt_name,
- err, head_config);
+ add_tracepoint_multi_event(list, idx, sys_name, evt_name,
+ err, head_config, loc) :
+ add_tracepoint(list, idx, sys_name, evt_name,
+ err, head_config, loc);
}
static int add_tracepoint_multi_sys(struct list_head *list, int *idx,
const char *sys_name, const char *evt_name,
struct parse_events_error *err,
- struct list_head *head_config)
+ struct list_head *head_config, YYLTYPE *loc)
{
struct dirent *events_ent;
DIR *events_dir;
@@ -624,7 +613,7 @@ static int add_tracepoint_multi_sys(struct list_head *list, int *idx,
events_dir = tracing_events__opendir();
if (!events_dir) {
- tracepoint_error(err, errno, sys_name, evt_name);
+ tracepoint_error(err, errno, sys_name, evt_name, loc->first_column);
return -1;
}
@@ -640,7 +629,7 @@ static int add_tracepoint_multi_sys(struct list_head *list, int *idx,
continue;
ret = add_tracepoint_event(list, idx, events_ent->d_name,
- evt_name, err, head_config);
+ evt_name, err, head_config, loc);
}
closedir(events_dir);
@@ -648,264 +637,6 @@ static int add_tracepoint_multi_sys(struct list_head *list, int *idx,
}
#endif /* HAVE_LIBTRACEEVENT */
-#ifdef HAVE_LIBBPF_SUPPORT
-struct __add_bpf_event_param {
- struct parse_events_state *parse_state;
- struct list_head *list;
- struct list_head *head_config;
-};
-
-static int add_bpf_event(const char *group, const char *event, int fd, struct bpf_object *obj,
- void *_param)
-{
- LIST_HEAD(new_evsels);
- struct __add_bpf_event_param *param = _param;
- struct parse_events_state *parse_state = param->parse_state;
- struct list_head *list = param->list;
- struct evsel *pos;
- int err;
- /*
- * Check if we should add the event, i.e. if it is a TP but starts with a '!',
- * then don't add the tracepoint, this will be used for something else, like
- * adding to a BPF_MAP_TYPE_PROG_ARRAY.
- *
- * See tools/perf/examples/bpf/augmented_raw_syscalls.c
- */
- if (group[0] == '!')
- return 0;
-
- pr_debug("add bpf event %s:%s and attach bpf program %d\n",
- group, event, fd);
-
- err = parse_events_add_tracepoint(&new_evsels, &parse_state->idx, group,
- event, parse_state->error,
- param->head_config);
- if (err) {
- struct evsel *evsel, *tmp;
-
- pr_debug("Failed to add BPF event %s:%s\n",
- group, event);
- list_for_each_entry_safe(evsel, tmp, &new_evsels, core.node) {
- list_del_init(&evsel->core.node);
- evsel__delete(evsel);
- }
- return err;
- }
- pr_debug("adding %s:%s\n", group, event);
-
- list_for_each_entry(pos, &new_evsels, core.node) {
- pr_debug("adding %s:%s to %p\n",
- group, event, pos);
- pos->bpf_fd = fd;
- pos->bpf_obj = obj;
- }
- list_splice(&new_evsels, list);
- return 0;
-}
-
-int parse_events_load_bpf_obj(struct parse_events_state *parse_state,
- struct list_head *list,
- struct bpf_object *obj,
- struct list_head *head_config)
-{
- int err;
- char errbuf[BUFSIZ];
- struct __add_bpf_event_param param = {parse_state, list, head_config};
- static bool registered_unprobe_atexit = false;
-
- if (IS_ERR(obj) || !obj) {
- snprintf(errbuf, sizeof(errbuf),
- "Internal error: load bpf obj with NULL");
- err = -EINVAL;
- goto errout;
- }
-
- /*
- * Register atexit handler before calling bpf__probe() so
- * bpf__probe() don't need to unprobe probe points its already
- * created when failure.
- */
- if (!registered_unprobe_atexit) {
- atexit(bpf__clear);
- registered_unprobe_atexit = true;
- }
-
- err = bpf__probe(obj);
- if (err) {
- bpf__strerror_probe(obj, err, errbuf, sizeof(errbuf));
- goto errout;
- }
-
- err = bpf__load(obj);
- if (err) {
- bpf__strerror_load(obj, err, errbuf, sizeof(errbuf));
- goto errout;
- }
-
- err = bpf__foreach_event(obj, add_bpf_event, &param);
- if (err) {
- snprintf(errbuf, sizeof(errbuf),
- "Attach events in BPF object failed");
- goto errout;
- }
-
- return 0;
-errout:
- parse_events_error__handle(parse_state->error, 0,
- strdup(errbuf), strdup("(add -v to see detail)"));
- return err;
-}
-
-static int
-parse_events_config_bpf(struct parse_events_state *parse_state,
- struct bpf_object *obj,
- struct list_head *head_config)
-{
- struct parse_events_term *term;
- int error_pos;
-
- if (!head_config || list_empty(head_config))
- return 0;
-
- list_for_each_entry(term, head_config, list) {
- int err;
-
- if (term->type_term != PARSE_EVENTS__TERM_TYPE_USER) {
- parse_events_error__handle(parse_state->error, term->err_term,
- strdup("Invalid config term for BPF object"),
- NULL);
- return -EINVAL;
- }
-
- err = bpf__config_obj(obj, term, parse_state->evlist, &error_pos);
- if (err) {
- char errbuf[BUFSIZ];
- int idx;
-
- bpf__strerror_config_obj(obj, term, parse_state->evlist,
- &error_pos, err, errbuf,
- sizeof(errbuf));
-
- if (err == -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUE)
- idx = term->err_val;
- else
- idx = term->err_term + error_pos;
-
- parse_events_error__handle(parse_state->error, idx,
- strdup(errbuf),
- strdup(
-"Hint:\tValid config terms:\n"
-" \tmap:[<arraymap>].value<indices>=[value]\n"
-" \tmap:[<eventmap>].event<indices>=[event]\n"
-"\n"
-" \twhere <indices> is something like [0,3...5] or [all]\n"
-" \t(add -v to see detail)"));
- return err;
- }
- }
- return 0;
-}
-
-/*
- * Split config terms:
- * perf record -e bpf.c/call-graph=fp,map:array.value[0]=1/ ...
- * 'call-graph=fp' is 'evt config', should be applied to each
- * events in bpf.c.
- * 'map:array.value[0]=1' is 'obj config', should be processed
- * with parse_events_config_bpf.
- *
- * Move object config terms from the first list to obj_head_config.
- */
-static void
-split_bpf_config_terms(struct list_head *evt_head_config,
- struct list_head *obj_head_config)
-{
- struct parse_events_term *term, *temp;
-
- /*
- * Currently, all possible user config term
- * belong to bpf object. parse_events__is_hardcoded_term()
- * happens to be a good flag.
- *
- * See parse_events_config_bpf() and
- * config_term_tracepoint().
- */
- list_for_each_entry_safe(term, temp, evt_head_config, list)
- if (!parse_events__is_hardcoded_term(term))
- list_move_tail(&term->list, obj_head_config);
-}
-
-int parse_events_load_bpf(struct parse_events_state *parse_state,
- struct list_head *list,
- char *bpf_file_name,
- bool source,
- struct list_head *head_config)
-{
- int err;
- struct bpf_object *obj;
- LIST_HEAD(obj_head_config);
-
- if (head_config)
- split_bpf_config_terms(head_config, &obj_head_config);
-
- obj = bpf__prepare_load(bpf_file_name, source);
- if (IS_ERR(obj)) {
- char errbuf[BUFSIZ];
-
- err = PTR_ERR(obj);
-
- if (err == -ENOTSUP)
- snprintf(errbuf, sizeof(errbuf),
- "BPF support is not compiled");
- else
- bpf__strerror_prepare_load(bpf_file_name,
- source,
- -err, errbuf,
- sizeof(errbuf));
-
- parse_events_error__handle(parse_state->error, 0,
- strdup(errbuf), strdup("(add -v to see detail)"));
- return err;
- }
-
- err = parse_events_load_bpf_obj(parse_state, list, obj, head_config);
- if (err)
- return err;
- err = parse_events_config_bpf(parse_state, obj, &obj_head_config);
-
- /*
- * Caller doesn't know anything about obj_head_config,
- * so combine them together again before returning.
- */
- if (head_config)
- list_splice_tail(&obj_head_config, head_config);
- return err;
-}
-#else // HAVE_LIBBPF_SUPPORT
-int parse_events_load_bpf_obj(struct parse_events_state *parse_state,
- struct list_head *list __maybe_unused,
- struct bpf_object *obj __maybe_unused,
- struct list_head *head_config __maybe_unused)
-{
- parse_events_error__handle(parse_state->error, 0,
- strdup("BPF support is not compiled"),
- strdup("Make sure libbpf-devel is available at build time."));
- return -ENOTSUP;
-}
-
-int parse_events_load_bpf(struct parse_events_state *parse_state,
- struct list_head *list __maybe_unused,
- char *bpf_file_name __maybe_unused,
- bool source __maybe_unused,
- struct list_head *head_config __maybe_unused)
-{
- parse_events_error__handle(parse_state->error, 0,
- strdup("BPF support is not compiled"),
- strdup("Make sure libbpf-devel is available at build time."));
- return -ENOTSUP;
-}
-#endif // HAVE_LIBBPF_SUPPORT
-
static int
parse_breakpoint_type(const char *type, struct perf_event_attr *attr)
{
@@ -991,7 +722,7 @@ int parse_events_add_breakpoint(struct parse_events_state *parse_state,
static int check_type_val(struct parse_events_term *term,
struct parse_events_error *err,
- int type)
+ enum parse_events__term_val_type type)
{
if (type == term->type_val)
return 0;
@@ -1006,42 +737,49 @@ static int check_type_val(struct parse_events_term *term,
return -EINVAL;
}
-/*
- * Update according to parse-events.l
- */
-static const char *config_term_names[__PARSE_EVENTS__TERM_TYPE_NR] = {
- [PARSE_EVENTS__TERM_TYPE_USER] = "<sysfs term>",
- [PARSE_EVENTS__TERM_TYPE_CONFIG] = "config",
- [PARSE_EVENTS__TERM_TYPE_CONFIG1] = "config1",
- [PARSE_EVENTS__TERM_TYPE_CONFIG2] = "config2",
- [PARSE_EVENTS__TERM_TYPE_CONFIG3] = "config3",
- [PARSE_EVENTS__TERM_TYPE_NAME] = "name",
- [PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD] = "period",
- [PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ] = "freq",
- [PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE] = "branch_type",
- [PARSE_EVENTS__TERM_TYPE_TIME] = "time",
- [PARSE_EVENTS__TERM_TYPE_CALLGRAPH] = "call-graph",
- [PARSE_EVENTS__TERM_TYPE_STACKSIZE] = "stack-size",
- [PARSE_EVENTS__TERM_TYPE_NOINHERIT] = "no-inherit",
- [PARSE_EVENTS__TERM_TYPE_INHERIT] = "inherit",
- [PARSE_EVENTS__TERM_TYPE_MAX_STACK] = "max-stack",
- [PARSE_EVENTS__TERM_TYPE_MAX_EVENTS] = "nr",
- [PARSE_EVENTS__TERM_TYPE_OVERWRITE] = "overwrite",
- [PARSE_EVENTS__TERM_TYPE_NOOVERWRITE] = "no-overwrite",
- [PARSE_EVENTS__TERM_TYPE_DRV_CFG] = "driver-config",
- [PARSE_EVENTS__TERM_TYPE_PERCORE] = "percore",
- [PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT] = "aux-output",
- [PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE] = "aux-sample-size",
- [PARSE_EVENTS__TERM_TYPE_METRIC_ID] = "metric-id",
- [PARSE_EVENTS__TERM_TYPE_RAW] = "raw",
- [PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE] = "legacy-cache",
- [PARSE_EVENTS__TERM_TYPE_HARDWARE] = "hardware",
-};
-
static bool config_term_shrinked;
+static const char *config_term_name(enum parse_events__term_type term_type)
+{
+ /*
+ * Update according to parse-events.l
+ */
+ static const char *config_term_names[__PARSE_EVENTS__TERM_TYPE_NR] = {
+ [PARSE_EVENTS__TERM_TYPE_USER] = "<sysfs term>",
+ [PARSE_EVENTS__TERM_TYPE_CONFIG] = "config",
+ [PARSE_EVENTS__TERM_TYPE_CONFIG1] = "config1",
+ [PARSE_EVENTS__TERM_TYPE_CONFIG2] = "config2",
+ [PARSE_EVENTS__TERM_TYPE_CONFIG3] = "config3",
+ [PARSE_EVENTS__TERM_TYPE_NAME] = "name",
+ [PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD] = "period",
+ [PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ] = "freq",
+ [PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE] = "branch_type",
+ [PARSE_EVENTS__TERM_TYPE_TIME] = "time",
+ [PARSE_EVENTS__TERM_TYPE_CALLGRAPH] = "call-graph",
+ [PARSE_EVENTS__TERM_TYPE_STACKSIZE] = "stack-size",
+ [PARSE_EVENTS__TERM_TYPE_NOINHERIT] = "no-inherit",
+ [PARSE_EVENTS__TERM_TYPE_INHERIT] = "inherit",
+ [PARSE_EVENTS__TERM_TYPE_MAX_STACK] = "max-stack",
+ [PARSE_EVENTS__TERM_TYPE_MAX_EVENTS] = "nr",
+ [PARSE_EVENTS__TERM_TYPE_OVERWRITE] = "overwrite",
+ [PARSE_EVENTS__TERM_TYPE_NOOVERWRITE] = "no-overwrite",
+ [PARSE_EVENTS__TERM_TYPE_DRV_CFG] = "driver-config",
+ [PARSE_EVENTS__TERM_TYPE_PERCORE] = "percore",
+ [PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT] = "aux-output",
+ [PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE] = "aux-sample-size",
+ [PARSE_EVENTS__TERM_TYPE_METRIC_ID] = "metric-id",
+ [PARSE_EVENTS__TERM_TYPE_RAW] = "raw",
+ [PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE] = "legacy-cache",
+ [PARSE_EVENTS__TERM_TYPE_HARDWARE] = "hardware",
+ };
+ if ((unsigned int)term_type >= __PARSE_EVENTS__TERM_TYPE_NR)
+ return "unknown term";
+
+ return config_term_names[term_type];
+}
+
static bool
-config_term_avail(int term_type, struct parse_events_error *err)
+config_term_avail(enum parse_events__term_type term_type, struct parse_events_error *err)
{
char *err_str;
@@ -1063,13 +801,31 @@ config_term_avail(int term_type, struct parse_events_error *err)
case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
case PARSE_EVENTS__TERM_TYPE_PERCORE:
return true;
+ case PARSE_EVENTS__TERM_TYPE_USER:
+ case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ:
+ case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
+ case PARSE_EVENTS__TERM_TYPE_TIME:
+ case PARSE_EVENTS__TERM_TYPE_CALLGRAPH:
+ case PARSE_EVENTS__TERM_TYPE_STACKSIZE:
+ case PARSE_EVENTS__TERM_TYPE_NOINHERIT:
+ case PARSE_EVENTS__TERM_TYPE_INHERIT:
+ case PARSE_EVENTS__TERM_TYPE_MAX_STACK:
+ case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS:
+ case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE:
+ case PARSE_EVENTS__TERM_TYPE_OVERWRITE:
+ case PARSE_EVENTS__TERM_TYPE_DRV_CFG:
+ case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT:
+ case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
+ case PARSE_EVENTS__TERM_TYPE_RAW:
+ case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE:
+ case PARSE_EVENTS__TERM_TYPE_HARDWARE:
default:
if (!err)
return false;
/* term_type is validated so indexing is safe */
if (asprintf(&err_str, "'%s' is not usable in 'perf stat'",
- config_term_names[term_type]) >= 0)
+ config_term_name(term_type)) >= 0)
parse_events_error__handle(err, -1, err_str, NULL);
return false;
}
@@ -1187,10 +943,14 @@ do { \
return -EINVAL;
}
break;
+ case PARSE_EVENTS__TERM_TYPE_DRV_CFG:
+ case PARSE_EVENTS__TERM_TYPE_USER:
+ case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE:
+ case PARSE_EVENTS__TERM_TYPE_HARDWARE:
default:
parse_events_error__handle(err, term->err_term,
- strdup("unknown term"),
- parse_events_formats_error_string(NULL));
+ strdup(config_term_name(term->type_term)),
+ parse_events_formats_error_string(NULL));
return -EINVAL;
}
@@ -1276,10 +1036,26 @@ static int config_term_tracepoint(struct perf_event_attr *attr,
case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT:
case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
return config_term_common(attr, term, err);
+ case PARSE_EVENTS__TERM_TYPE_USER:
+ case PARSE_EVENTS__TERM_TYPE_CONFIG:
+ case PARSE_EVENTS__TERM_TYPE_CONFIG1:
+ case PARSE_EVENTS__TERM_TYPE_CONFIG2:
+ case PARSE_EVENTS__TERM_TYPE_CONFIG3:
+ case PARSE_EVENTS__TERM_TYPE_NAME:
+ case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
+ case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ:
+ case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
+ case PARSE_EVENTS__TERM_TYPE_TIME:
+ case PARSE_EVENTS__TERM_TYPE_DRV_CFG:
+ case PARSE_EVENTS__TERM_TYPE_PERCORE:
+ case PARSE_EVENTS__TERM_TYPE_METRIC_ID:
+ case PARSE_EVENTS__TERM_TYPE_RAW:
+ case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE:
+ case PARSE_EVENTS__TERM_TYPE_HARDWARE:
default:
if (err) {
parse_events_error__handle(err, term->err_term,
- strdup("unknown term"),
+ strdup(config_term_name(term->type_term)),
strdup("valid terms: call-graph,stack-size\n"));
}
return -EINVAL;
@@ -1397,6 +1173,16 @@ do { \
ADD_CONFIG_TERM_VAL(AUX_SAMPLE_SIZE, aux_sample_size,
term->val.num, term->weak);
break;
+ case PARSE_EVENTS__TERM_TYPE_USER:
+ case PARSE_EVENTS__TERM_TYPE_CONFIG:
+ case PARSE_EVENTS__TERM_TYPE_CONFIG1:
+ case PARSE_EVENTS__TERM_TYPE_CONFIG2:
+ case PARSE_EVENTS__TERM_TYPE_CONFIG3:
+ case PARSE_EVENTS__TERM_TYPE_NAME:
+ case PARSE_EVENTS__TERM_TYPE_METRIC_ID:
+ case PARSE_EVENTS__TERM_TYPE_RAW:
+ case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE:
+ case PARSE_EVENTS__TERM_TYPE_HARDWARE:
default:
break;
}
@@ -1418,14 +1204,38 @@ static int get_config_chgs(struct perf_pmu *pmu, struct list_head *head_config,
list_for_each_entry(term, head_config, list) {
switch (term->type_term) {
case PARSE_EVENTS__TERM_TYPE_USER:
- type = perf_pmu__format_type(&pmu->format, term->config);
+ type = perf_pmu__format_type(pmu, term->config);
if (type != PERF_PMU_FORMAT_VALUE_CONFIG)
continue;
- bits |= perf_pmu__format_bits(&pmu->format, term->config);
+ bits |= perf_pmu__format_bits(pmu, term->config);
break;
case PARSE_EVENTS__TERM_TYPE_CONFIG:
bits = ~(u64)0;
break;
+ case PARSE_EVENTS__TERM_TYPE_CONFIG1:
+ case PARSE_EVENTS__TERM_TYPE_CONFIG2:
+ case PARSE_EVENTS__TERM_TYPE_CONFIG3:
+ case PARSE_EVENTS__TERM_TYPE_NAME:
+ case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
+ case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ:
+ case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
+ case PARSE_EVENTS__TERM_TYPE_TIME:
+ case PARSE_EVENTS__TERM_TYPE_CALLGRAPH:
+ case PARSE_EVENTS__TERM_TYPE_STACKSIZE:
+ case PARSE_EVENTS__TERM_TYPE_NOINHERIT:
+ case PARSE_EVENTS__TERM_TYPE_INHERIT:
+ case PARSE_EVENTS__TERM_TYPE_MAX_STACK:
+ case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS:
+ case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE:
+ case PARSE_EVENTS__TERM_TYPE_OVERWRITE:
+ case PARSE_EVENTS__TERM_TYPE_DRV_CFG:
+ case PARSE_EVENTS__TERM_TYPE_PERCORE:
+ case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT:
+ case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
+ case PARSE_EVENTS__TERM_TYPE_METRIC_ID:
+ case PARSE_EVENTS__TERM_TYPE_RAW:
+ case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE:
+ case PARSE_EVENTS__TERM_TYPE_HARDWARE:
default:
break;
}
@@ -1441,8 +1251,9 @@ static int get_config_chgs(struct perf_pmu *pmu, struct list_head *head_config,
int parse_events_add_tracepoint(struct list_head *list, int *idx,
const char *sys, const char *event,
struct parse_events_error *err,
- struct list_head *head_config)
+ struct list_head *head_config, void *loc_)
{
+ YYLTYPE *loc = loc_;
#ifdef HAVE_LIBTRACEEVENT
if (head_config) {
struct perf_event_attr attr;
@@ -1454,17 +1265,17 @@ int parse_events_add_tracepoint(struct list_head *list, int *idx,
if (strpbrk(sys, "*?"))
return add_tracepoint_multi_sys(list, idx, sys, event,
- err, head_config);
+ err, head_config, loc);
else
return add_tracepoint_event(list, idx, sys, event,
- err, head_config);
+ err, head_config, loc);
#else
(void)list;
(void)idx;
(void)sys;
(void)event;
(void)head_config;
- parse_events_error__handle(err, 0, strdup("unsupported tracepoint"),
+ parse_events_error__handle(err, loc->first_column, strdup("unsupported tracepoint"),
strdup("libtraceevent is necessary for tracepoint support"));
return -1;
#endif
@@ -1557,41 +1368,44 @@ static bool config_term_percore(struct list_head *config_terms)
}
int parse_events_add_pmu(struct parse_events_state *parse_state,
- struct list_head *list, char *name,
+ struct list_head *list, const char *name,
struct list_head *head_config,
- bool auto_merge_stats)
+ bool auto_merge_stats, void *loc_)
{
struct perf_event_attr attr;
struct perf_pmu_info info;
struct perf_pmu *pmu;
struct evsel *evsel;
struct parse_events_error *err = parse_state->error;
+ YYLTYPE *loc = loc_;
LIST_HEAD(config_terms);
pmu = parse_state->fake_pmu ?: perf_pmus__find(name);
- if (verbose > 1 && !(pmu && pmu->selectable)) {
- fprintf(stderr, "Attempting to add event pmu '%s' with '",
- name);
- if (head_config) {
- struct parse_events_term *term;
-
- list_for_each_entry(term, head_config, list) {
- fprintf(stderr, "%s,", term->config);
- }
- }
- fprintf(stderr, "' that may result in non-fatal errors\n");
- }
-
if (!pmu) {
char *err_str;
if (asprintf(&err_str,
"Cannot find PMU `%s'. Missing kernel support?",
name) >= 0)
- parse_events_error__handle(err, 0, err_str, NULL);
+ parse_events_error__handle(err, loc->first_column, err_str, NULL);
return -EINVAL;
}
+
+ if (verbose > 1) {
+ struct strbuf sb;
+
+ strbuf_init(&sb, /*hint=*/ 0);
+ if (pmu->selectable && !head_config) {
+ strbuf_addf(&sb, "%s//", name);
+ } else {
+ strbuf_addf(&sb, "%s/", name);
+ parse_events_term__to_strbuf(head_config, &sb);
+ strbuf_addch(&sb, '/');
+ }
+ fprintf(stderr, "Attempt to add: %s\n", sb.buf);
+ strbuf_release(&sb);
+ }
if (head_config)
fix_raw(head_config, pmu);
@@ -1612,20 +1426,16 @@ int parse_events_add_pmu(struct parse_events_state *parse_state,
return evsel ? 0 : -ENOMEM;
}
- if (!parse_state->fake_pmu && perf_pmu__check_alias(pmu, head_config, &info))
+ if (!parse_state->fake_pmu && perf_pmu__check_alias(pmu, head_config, &info, err))
return -EINVAL;
if (verbose > 1) {
- fprintf(stderr, "After aliases, add event pmu '%s' with '",
- name);
- if (head_config) {
- struct parse_events_term *term;
+ struct strbuf sb;
- list_for_each_entry(term, head_config, list) {
- fprintf(stderr, "%s,", term->config);
- }
- }
- fprintf(stderr, "' that may result in non-fatal errors\n");
+ strbuf_init(&sb, /*hint=*/ 0);
+ parse_events_term__to_strbuf(head_config, &sb);
+ fprintf(stderr, "..after resolving event: %s/%s/\n", name, sb.buf);
+ strbuf_release(&sb);
}
/*
@@ -1675,14 +1485,15 @@ int parse_events_add_pmu(struct parse_events_state *parse_state,
int parse_events_multi_pmu_add(struct parse_events_state *parse_state,
char *str, struct list_head *head,
- struct list_head **listp)
+ struct list_head **listp, void *loc_)
{
struct parse_events_term *term;
struct list_head *list = NULL;
struct list_head *orig_head = NULL;
struct perf_pmu *pmu = NULL;
+ YYLTYPE *loc = loc_;
int ok = 0;
- char *config;
+ const char *config;
*listp = NULL;
@@ -1699,9 +1510,9 @@ int parse_events_multi_pmu_add(struct parse_events_state *parse_state,
if (parse_events_term__num(&term,
PARSE_EVENTS__TERM_TYPE_USER,
- config, 1, false, NULL,
- NULL) < 0) {
- free(config);
+ config, /*num=*/1, /*novalue=*/true,
+ loc, /*loc_val=*/NULL) < 0) {
+ zfree(&config);
goto out_err;
}
list_add_tail(&term->list, head);
@@ -1714,33 +1525,38 @@ int parse_events_multi_pmu_add(struct parse_events_state *parse_state,
INIT_LIST_HEAD(list);
while ((pmu = perf_pmus__scan(pmu)) != NULL) {
- struct perf_pmu_alias *alias;
bool auto_merge_stats;
if (parse_events__filter_pmu(parse_state, pmu))
continue;
- auto_merge_stats = perf_pmu__auto_merge_stats(pmu);
+ if (!perf_pmu__have_event(pmu, str))
+ continue;
- list_for_each_entry(alias, &pmu->aliases, list) {
- if (!strcasecmp(alias->name, str)) {
- parse_events_copy_term_list(head, &orig_head);
- if (!parse_events_add_pmu(parse_state, list,
- pmu->name, orig_head,
- auto_merge_stats)) {
- pr_debug("%s -> %s/%s/\n", str,
- pmu->name, alias->str);
- ok++;
- }
- parse_events_terms__delete(orig_head);
- }
+ auto_merge_stats = perf_pmu__auto_merge_stats(pmu);
+ parse_events_copy_term_list(head, &orig_head);
+ if (!parse_events_add_pmu(parse_state, list, pmu->name,
+ orig_head, auto_merge_stats, loc)) {
+ struct strbuf sb;
+
+ strbuf_init(&sb, /*hint=*/ 0);
+ parse_events_term__to_strbuf(orig_head, &sb);
+ pr_debug("%s -> %s/%s/\n", str, pmu->name, sb.buf);
+ strbuf_release(&sb);
+ ok++;
}
+ parse_events_terms__delete(orig_head);
}
if (parse_state->fake_pmu) {
if (!parse_events_add_pmu(parse_state, list, str, head,
- /*auto_merge_stats=*/true)) {
- pr_debug("%s -> %s/%s/\n", str, "fake_pmu", str);
+ /*auto_merge_stats=*/true, loc)) {
+ struct strbuf sb;
+
+ strbuf_init(&sb, /*hint=*/ 0);
+ parse_events_term__to_strbuf(head, &sb);
+ pr_debug("%s -> %s/%s/\n", str, "fake_pmu", sb.buf);
+ strbuf_release(&sb);
ok++;
}
}
@@ -1972,14 +1788,18 @@ int parse_events_name(struct list_head *list, const char *name)
struct evsel *evsel;
__evlist__for_each_entry(list, evsel) {
- if (!evsel->name)
+ if (!evsel->name) {
evsel->name = strdup(name);
+ if (!evsel->name)
+ return -ENOMEM;
+ }
}
return 0;
}
static int parse_events__scanner(const char *str,
+ FILE *input,
struct parse_events_state *parse_state)
{
YY_BUFFER_STATE buffer;
@@ -1990,7 +1810,10 @@ static int parse_events__scanner(const char *str,
if (ret)
return ret;
- buffer = parse_events__scan_string(str, scanner);
+ if (str)
+ buffer = parse_events__scan_string(str, scanner);
+ else
+ parse_events_set_in(input, scanner);
#ifdef PARSER_DEBUG
parse_events_debug = 1;
@@ -1998,8 +1821,10 @@ static int parse_events__scanner(const char *str,
#endif
ret = parse_events_parse(parse_state, scanner);
- parse_events__flush_buffer(buffer, scanner);
- parse_events__delete_buffer(buffer, scanner);
+ if (str) {
+ parse_events__flush_buffer(buffer, scanner);
+ parse_events__delete_buffer(buffer, scanner);
+ }
parse_events_lex_destroy(scanner);
return ret;
}
@@ -2007,7 +1832,7 @@ static int parse_events__scanner(const char *str,
/*
* parse event config string, return a list of event terms.
*/
-int parse_events_terms(struct list_head *terms, const char *str)
+int parse_events_terms(struct list_head *terms, const char *str, FILE *input)
{
struct parse_events_state parse_state = {
.terms = NULL,
@@ -2015,7 +1840,7 @@ int parse_events_terms(struct list_head *terms, const char *str)
};
int ret;
- ret = parse_events__scanner(str, &parse_state);
+ ret = parse_events__scanner(str, input, &parse_state);
if (!ret) {
list_splice(parse_state.terms, terms);
@@ -2259,7 +2084,6 @@ int __parse_events(struct evlist *evlist, const char *str, const char *pmu_filte
.list = LIST_HEAD_INIT(parse_state.list),
.idx = evlist->core.nr_entries,
.error = err,
- .evlist = evlist,
.stoken = PE_START_EVENTS,
.fake_pmu = fake_pmu,
.pmu_filter = pmu_filter,
@@ -2267,7 +2091,7 @@ int __parse_events(struct evlist *evlist, const char *str, const char *pmu_filte
};
int ret, ret2;
- ret = parse_events__scanner(str, &parse_state);
+ ret = parse_events__scanner(str, /*input=*/ NULL, &parse_state);
if (!ret && list_empty(&parse_state.list)) {
WARN_ONCE(true, "WARNING: event parser found nothing\n");
@@ -2348,7 +2172,7 @@ void parse_events_error__handle(struct parse_events_error *err, int idx,
break;
default:
pr_debug("Multiple errors dropping message: %s (%s)\n",
- err->str, err->help);
+ err->str, err->help ?: "<no help>");
free(err->str);
err->str = str;
free(err->help);
@@ -2641,7 +2465,8 @@ static int new_term(struct parse_events_term **_term,
}
int parse_events_term__num(struct parse_events_term **term,
- int type_term, char *config, u64 num,
+ enum parse_events__term_type type_term,
+ const char *config, u64 num,
bool no_value,
void *loc_term_, void *loc_val_)
{
@@ -2651,17 +2476,18 @@ int parse_events_term__num(struct parse_events_term **term,
struct parse_events_term temp = {
.type_val = PARSE_EVENTS__TERM_TYPE_NUM,
.type_term = type_term,
- .config = config ? : strdup(config_term_names[type_term]),
+ .config = config ? : strdup(config_term_name(type_term)),
.no_value = no_value,
.err_term = loc_term ? loc_term->first_column : 0,
.err_val = loc_val ? loc_val->first_column : 0,
};
- return new_term(term, &temp, NULL, num);
+ return new_term(term, &temp, /*str=*/NULL, num);
}
int parse_events_term__str(struct parse_events_term **term,
- int type_term, char *config, char *str,
+ enum parse_events__term_type type_term,
+ char *config, char *str,
void *loc_term_, void *loc_val_)
{
YYLTYPE *loc_term = loc_term_;
@@ -2675,15 +2501,16 @@ int parse_events_term__str(struct parse_events_term **term,
.err_val = loc_val ? loc_val->first_column : 0,
};
- return new_term(term, &temp, str, 0);
+ return new_term(term, &temp, str, /*num=*/0);
}
int parse_events_term__term(struct parse_events_term **term,
- int term_lhs, int term_rhs,
+ enum parse_events__term_type term_lhs,
+ enum parse_events__term_type term_rhs,
void *loc_term, void *loc_val)
{
return parse_events_term__str(term, term_lhs, NULL,
- strdup(config_term_names[term_rhs]),
+ strdup(config_term_name(term_rhs)),
loc_term, loc_val);
}
@@ -2691,33 +2518,25 @@ int parse_events_term__clone(struct parse_events_term **new,
struct parse_events_term *term)
{
char *str;
- struct parse_events_term temp = {
- .type_val = term->type_val,
- .type_term = term->type_term,
- .config = NULL,
- .err_term = term->err_term,
- .err_val = term->err_val,
- };
+ struct parse_events_term temp = *term;
+ temp.used = false;
if (term->config) {
temp.config = strdup(term->config);
if (!temp.config)
return -ENOMEM;
}
if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM)
- return new_term(new, &temp, NULL, term->val.num);
+ return new_term(new, &temp, /*str=*/NULL, term->val.num);
str = strdup(term->val.str);
if (!str)
return -ENOMEM;
- return new_term(new, &temp, str, 0);
+ return new_term(new, &temp, str, /*num=*/0);
}
void parse_events_term__delete(struct parse_events_term *term)
{
- if (term->array.nr_ranges)
- zfree(&term->array.ranges);
-
if (term->type_val != PARSE_EVENTS__TERM_TYPE_NUM)
zfree(&term->val.str);
@@ -2768,9 +2587,47 @@ void parse_events_terms__delete(struct list_head *terms)
free(terms);
}
-void parse_events__clear_array(struct parse_events_array *a)
+int parse_events_term__to_strbuf(struct list_head *term_list, struct strbuf *sb)
{
- zfree(&a->ranges);
+ struct parse_events_term *term;
+ bool first = true;
+
+ if (!term_list)
+ return 0;
+
+ list_for_each_entry(term, term_list, list) {
+ int ret;
+
+ if (!first) {
+ ret = strbuf_addch(sb, ',');
+ if (ret < 0)
+ return ret;
+ }
+ first = false;
+
+ if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM)
+ if (term->no_value) {
+ assert(term->val.num == 1);
+ ret = strbuf_addf(sb, "%s", term->config);
+ } else
+ ret = strbuf_addf(sb, "%s=%#"PRIx64, term->config, term->val.num);
+ else if (term->type_val == PARSE_EVENTS__TERM_TYPE_STR) {
+ if (term->config) {
+ ret = strbuf_addf(sb, "%s=", term->config);
+ if (ret < 0)
+ return ret;
+ } else if ((unsigned int)term->type_term < __PARSE_EVENTS__TERM_TYPE_NR) {
+ ret = strbuf_addf(sb, "%s=", config_term_name(term->type_term));
+ if (ret < 0)
+ return ret;
+ }
+ assert(!term->no_value);
+ ret = strbuf_addf(sb, "%s", term->val.str);
+ }
+ if (ret < 0)
+ return ret;
+ }
+ return 0;
}
void parse_events_evlist_error(struct parse_events_state *parse_state,
@@ -2789,7 +2646,7 @@ static void config_terms_list(char *buf, size_t buf_sz)
buf[0] = '\0';
for (i = 0; i < __PARSE_EVENTS__TERM_TYPE_NR; i++) {
- const char *name = config_term_names[i];
+ const char *name = config_term_name(i);
if (!config_term_avail(i, NULL))
continue;
diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h
index b0eb95f93e9c..594e5d2dc67f 100644
--- a/tools/perf/util/parse-events.h
+++ b/tools/perf/util/parse-events.h
@@ -9,6 +9,7 @@
#include <stdbool.h>
#include <linux/types.h>
#include <linux/perf_event.h>
+#include <stdio.h>
#include <string.h>
struct evsel;
@@ -17,6 +18,7 @@ struct parse_events_error;
struct option;
struct perf_pmu;
+struct strbuf;
const char *event_type(int type);
@@ -42,16 +44,16 @@ static inline int parse_events(struct evlist *evlist, const char *str,
int parse_event(struct evlist *evlist, const char *str);
-int parse_events_terms(struct list_head *terms, const char *str);
+int parse_events_terms(struct list_head *terms, const char *str, FILE *input);
int parse_filter(const struct option *opt, const char *str, int unset);
int exclude_perf(const struct option *opt, const char *arg, int unset);
-enum {
+enum parse_events__term_val_type {
PARSE_EVENTS__TERM_TYPE_NUM,
PARSE_EVENTS__TERM_TYPE_STR,
};
-enum {
+enum parse_events__term_type {
PARSE_EVENTS__TERM_TYPE_USER,
PARSE_EVENTS__TERM_TYPE_CONFIG,
PARSE_EVENTS__TERM_TYPE_CONFIG1,
@@ -78,36 +80,54 @@ enum {
PARSE_EVENTS__TERM_TYPE_RAW,
PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE,
PARSE_EVENTS__TERM_TYPE_HARDWARE,
- __PARSE_EVENTS__TERM_TYPE_NR,
-};
-
-struct parse_events_array {
- size_t nr_ranges;
- struct {
- unsigned int start;
- size_t length;
- } *ranges;
+#define __PARSE_EVENTS__TERM_TYPE_NR (PARSE_EVENTS__TERM_TYPE_HARDWARE + 1)
};
struct parse_events_term {
- char *config;
- struct parse_events_array array;
+ /** @list: The term list the term is a part of. */
+ struct list_head list;
+ /**
+ * @config: The left-hand side of a term assignment, so the term
+ * "event=8" would have the config be "event"
+ */
+ const char *config;
+ /**
+ * @val: The right-hand side of a term assignment that can either be a
+ * string or a number depending on type_val.
+ */
union {
char *str;
u64 num;
} val;
- int type_val;
- int type_term;
- struct list_head list;
- bool used;
- bool no_value;
-
- /* error string indexes for within parsed string */
+ /** @type_val: The union variable in val to be used for the term. */
+ enum parse_events__term_val_type type_val;
+ /**
+ * @type_term: A predefined term type or PARSE_EVENTS__TERM_TYPE_USER
+ * when not inbuilt.
+ */
+ enum parse_events__term_type type_term;
+ /**
+ * @err_term: The column index of the term from parsing, used during
+ * error output.
+ */
int err_term;
+ /**
+ * @err_val: The column index of the val from parsing, used during error
+ * output.
+ */
int err_val;
-
- /* Coming from implicit alias */
+ /** @used: Was the term used during parameterized-eval. */
+ bool used;
+ /**
+ * @weak: A term from the sysfs or json encoding of an event that
+ * shouldn't override terms coming from the command line.
+ */
bool weak;
+ /**
+ * @no_value: Is there no value. If a numeric term has no value then the
+ * value is assumed to be 1. An event name also has no value.
+ */
+ bool no_value;
};
struct parse_events_error {
@@ -121,17 +141,23 @@ struct parse_events_error {
};
struct parse_events_state {
+ /* The list parsed events are placed on. */
struct list_head list;
+ /* The updated index used by entries as they are added. */
int idx;
+ /* Error information. */
struct parse_events_error *error;
- struct evlist *evlist;
+ /* Holds returned terms for term parsing. */
struct list_head *terms;
+ /* Start token. */
int stoken;
+ /* Special fake PMU marker for testing. */
struct perf_pmu *fake_pmu;
/* If non-null, when wildcard matching only match the given PMU. */
const char *pmu_filter;
/* Should PE_LEGACY_NAME tokens be generated for config terms? */
bool match_legacy_cache_terms;
+ /* Were multiple PMUs scanned to find events? */
bool wild_card_pmus;
};
@@ -140,39 +166,31 @@ bool parse_events__filter_pmu(const struct parse_events_state *parse_state,
void parse_events__shrink_config_terms(void);
int parse_events__is_hardcoded_term(struct parse_events_term *term);
int parse_events_term__num(struct parse_events_term **term,
- int type_term, char *config, u64 num,
+ enum parse_events__term_type type_term,
+ const char *config, u64 num,
bool novalue,
void *loc_term, void *loc_val);
int parse_events_term__str(struct parse_events_term **term,
- int type_term, char *config, char *str,
+ enum parse_events__term_type type_term,
+ char *config, char *str,
void *loc_term, void *loc_val);
int parse_events_term__term(struct parse_events_term **term,
- int term_lhs, int term_rhs,
+ enum parse_events__term_type term_lhs,
+ enum parse_events__term_type term_rhs,
void *loc_term, void *loc_val);
int parse_events_term__clone(struct parse_events_term **new,
struct parse_events_term *term);
void parse_events_term__delete(struct parse_events_term *term);
void parse_events_terms__delete(struct list_head *terms);
void parse_events_terms__purge(struct list_head *terms);
-void parse_events__clear_array(struct parse_events_array *a);
+int parse_events_term__to_strbuf(struct list_head *term_list, struct strbuf *sb);
int parse_events__modifier_event(struct list_head *list, char *str, bool add);
int parse_events__modifier_group(struct list_head *list, char *event_mod);
int parse_events_name(struct list_head *list, const char *name);
int parse_events_add_tracepoint(struct list_head *list, int *idx,
const char *sys, const char *event,
struct parse_events_error *error,
- struct list_head *head_config);
-int parse_events_load_bpf(struct parse_events_state *parse_state,
- struct list_head *list,
- char *bpf_file_name,
- bool source,
- struct list_head *head_config);
-/* Provide this function for perf test */
-struct bpf_object;
-int parse_events_load_bpf_obj(struct parse_events_state *parse_state,
- struct list_head *list,
- struct bpf_object *obj,
- struct list_head *head_config);
+ struct list_head *head_config, void *loc);
int parse_events_add_numeric(struct parse_events_state *parse_state,
struct list_head *list,
u32 type, u64 config,
@@ -190,9 +208,9 @@ int parse_events_add_breakpoint(struct parse_events_state *parse_state,
u64 addr, char *type, u64 len,
struct list_head *head_config);
int parse_events_add_pmu(struct parse_events_state *parse_state,
- struct list_head *list, char *name,
+ struct list_head *list, const char *name,
struct list_head *head_config,
- bool auto_merge_stats);
+ bool auto_merge_stats, void *loc);
struct evsel *parse_events__add_event(int idx, struct perf_event_attr *attr,
const char *name, const char *metric_id,
@@ -201,7 +219,7 @@ struct evsel *parse_events__add_event(int idx, struct perf_event_attr *attr,
int parse_events_multi_pmu_add(struct parse_events_state *parse_state,
char *str,
struct list_head *head_config,
- struct list_head **listp);
+ struct list_head **listp, void *loc);
int parse_events_copy_term_list(struct list_head *old,
struct list_head **new);
diff --git a/tools/perf/util/parse-events.l b/tools/perf/util/parse-events.l
index 99335ec586ae..4ef4b6f171a0 100644
--- a/tools/perf/util/parse-events.l
+++ b/tools/perf/util/parse-events.l
@@ -68,31 +68,6 @@ static int lc_str(yyscan_t scanner, const struct parse_events_state *state)
return str(scanner, state->match_legacy_cache_terms ? PE_LEGACY_CACHE : PE_NAME);
}
-static bool isbpf_suffix(char *text)
-{
- int len = strlen(text);
-
- if (len < 2)
- return false;
- if ((text[len - 1] == 'c' || text[len - 1] == 'o') &&
- text[len - 2] == '.')
- return true;
- if (len > 4 && !strcmp(text + len - 4, ".obj"))
- return true;
- return false;
-}
-
-static bool isbpf(yyscan_t scanner)
-{
- char *text = parse_events_get_text(scanner);
- struct stat st;
-
- if (!isbpf_suffix(text))
- return false;
-
- return stat(text, &st) == 0;
-}
-
/*
* This function is called when the parser gets two kind of input:
*
@@ -141,7 +116,7 @@ static int tool(yyscan_t scanner, enum perf_tool_event event)
return PE_VALUE_SYM_TOOL;
}
-static int term(yyscan_t scanner, int type)
+static int term(yyscan_t scanner, enum parse_events__term_type type)
{
YYSTYPE *yylval = parse_events_get_lval(scanner);
@@ -175,13 +150,10 @@ do { \
%x mem
%s config
%x event
-%x array
group [^,{}/]*[{][^}]*[}][^,{}/]*
event_pmu [^,{}/]+[/][^/]*[/][^,{}/]*
event [^,{}/]+
-bpf_object [^,{}]+\.(o|bpf)[a-zA-Z0-9._]*
-bpf_source [^,{}]+\.c[a-zA-Z0-9._]*
num_dec [0-9]+
num_hex 0x[a-fA-F0-9]+
@@ -234,8 +206,6 @@ non_digit [^0-9]
}
{event_pmu} |
-{bpf_object} |
-{bpf_source} |
{event} {
BEGIN(INITIAL);
REWIND(1);
@@ -251,14 +221,6 @@ non_digit [^0-9]
}
}
-<array>{
-"]" { BEGIN(config); return ']'; }
-{num_dec} { return value(yyscanner, 10); }
-{num_hex} { return value(yyscanner, 16); }
-, { return ','; }
-"\.\.\." { return PE_ARRAY_RANGE; }
-}
-
<config>{
/*
* Please update config_term_names when new static term is added.
@@ -302,8 +264,6 @@ r0x{num_raw_hex} { return str(yyscanner, PE_RAW); }
{lc_type}-{lc_op_result} { return lc_str(yyscanner, _parse_state); }
{lc_type}-{lc_op_result}-{lc_op_result} { return lc_str(yyscanner, _parse_state); }
{name_minus} { return str(yyscanner, PE_NAME); }
-\[all\] { return PE_ARRAY_ALL; }
-"[" { BEGIN(array); return '['; }
@{drv_cfg_term} { return drv_str(yyscanner, PE_DRV_CFG_TERM); }
}
@@ -374,8 +334,6 @@ r{num_raw_hex} { return str(yyscanner, PE_RAW); }
{num_hex} { return value(yyscanner, 16); }
{modifier_event} { return str(yyscanner, PE_MODIFIER_EVENT); }
-{bpf_object} { if (!isbpf(yyscanner)) { USER_REJECT }; return str(yyscanner, PE_BPF_OBJECT); }
-{bpf_source} { if (!isbpf(yyscanner)) { USER_REJECT }; return str(yyscanner, PE_BPF_SOURCE); }
{name} { return str(yyscanner, PE_NAME); }
{name_tag} { return str(yyscanner, PE_NAME); }
"/" { BEGIN(config); return '/'; }
diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y
index 9f28d4b5502f..21bfe7e0d944 100644
--- a/tools/perf/util/parse-events.y
+++ b/tools/perf/util/parse-events.y
@@ -20,12 +20,14 @@
#include "parse-events.h"
#include "parse-events-bison.h"
+int parse_events_lex(YYSTYPE * yylval_param, YYLTYPE * yylloc_param , void *yyscanner);
void parse_events_error(YYLTYPE *loc, void *parse_state, void *scanner, char const *msg);
-#define ABORT_ON(val) \
+#define PE_ABORT(val) \
do { \
- if (val) \
- YYABORT; \
+ if (val == -ENOMEM) \
+ YYNOMEM; \
+ YYABORT; \
} while (0)
static struct list_head* alloc_list(void)
@@ -58,13 +60,10 @@ static void free_list_evsel(struct list_head* list_evsel)
%token PE_VALUE_SYM_TOOL
%token PE_EVENT_NAME
%token PE_RAW PE_NAME
-%token PE_BPF_OBJECT PE_BPF_SOURCE
%token PE_MODIFIER_EVENT PE_MODIFIER_BP PE_BP_COLON PE_BP_SLASH
%token PE_LEGACY_CACHE
-%token PE_PREFIX_MEM PE_PREFIX_RAW PE_PREFIX_GROUP
+%token PE_PREFIX_MEM
%token PE_ERROR
-%token PE_KERNEL_PMU_EVENT PE_PMU_EVENT_FAKE
-%token PE_ARRAY_ALL PE_ARRAY_RANGE
%token PE_DRV_CFG_TERM
%token PE_TERM_HW
%type <num> PE_VALUE
@@ -75,13 +74,10 @@ static void free_list_evsel(struct list_head* list_evsel)
%type <num> value_sym
%type <str> PE_RAW
%type <str> PE_NAME
-%type <str> PE_BPF_OBJECT
-%type <str> PE_BPF_SOURCE
%type <str> PE_LEGACY_CACHE
%type <str> PE_MODIFIER_EVENT
%type <str> PE_MODIFIER_BP
%type <str> PE_EVENT_NAME
-%type <str> PE_KERNEL_PMU_EVENT PE_PMU_EVENT_FAKE
%type <str> PE_DRV_CFG_TERM
%type <str> name_or_raw name_or_legacy
%destructor { free ($$); } <str>
@@ -98,7 +94,6 @@ static void free_list_evsel(struct list_head* list_evsel)
%type <list_evsel> event_legacy_tracepoint
%type <list_evsel> event_legacy_numeric
%type <list_evsel> event_legacy_raw
-%type <list_evsel> event_bpf_file
%type <list_evsel> event_def
%type <list_evsel> event_mod
%type <list_evsel> event_name
@@ -109,11 +104,6 @@ static void free_list_evsel(struct list_head* list_evsel)
%type <list_evsel> groups
%destructor { free_list_evsel ($$); } <list_evsel>
%type <tracepoint_name> tracepoint_name
-%destructor { free ($$.sys); free ($$.event); } <tracepoint_name>
-%type <array> array
-%type <array> array_term
-%type <array> array_terms
-%destructor { free ($$.ranges); } <array>
%type <hardware_term> PE_TERM_HW
%destructor { free ($$.str); } <hardware_term>
@@ -128,7 +118,6 @@ static void free_list_evsel(struct list_head* list_evsel)
char *sys;
char *event;
} tracepoint_name;
- struct parse_events_array array;
struct hardware_term {
char *str;
u64 num;
@@ -265,7 +254,7 @@ PE_EVENT_NAME event_def
free($1);
if (err) {
free_list_evsel($2);
- YYABORT;
+ YYNOMEM;
}
$$ = $2;
}
@@ -278,47 +267,47 @@ event_def: event_pmu |
event_legacy_mem sep_dc |
event_legacy_tracepoint sep_dc |
event_legacy_numeric sep_dc |
- event_legacy_raw sep_dc |
- event_bpf_file
+ event_legacy_raw sep_dc
event_pmu:
PE_NAME opt_pmu_config
{
struct parse_events_state *parse_state = _parse_state;
- struct parse_events_error *error = parse_state->error;
struct list_head *list = NULL, *orig_terms = NULL, *terms= NULL;
char *pattern = NULL;
-#define CLEANUP_YYABORT \
+#define CLEANUP \
do { \
parse_events_terms__delete($2); \
parse_events_terms__delete(orig_terms); \
free(list); \
free($1); \
free(pattern); \
- YYABORT; \
} while(0)
- if (parse_events_copy_term_list($2, &orig_terms))
- CLEANUP_YYABORT;
-
- if (error)
- error->idx = @1.first_column;
+ if (parse_events_copy_term_list($2, &orig_terms)) {
+ CLEANUP;
+ YYNOMEM;
+ }
list = alloc_list();
- if (!list)
- CLEANUP_YYABORT;
+ if (!list) {
+ CLEANUP;
+ YYNOMEM;
+ }
/* Attempt to add to list assuming $1 is a PMU name. */
- if (parse_events_add_pmu(parse_state, list, $1, $2, /*auto_merge_stats=*/false)) {
+ if (parse_events_add_pmu(parse_state, list, $1, $2, /*auto_merge_stats=*/false, &@1)) {
struct perf_pmu *pmu = NULL;
int ok = 0;
/* Failure to add, try wildcard expansion of $1 as a PMU name. */
- if (asprintf(&pattern, "%s*", $1) < 0)
- CLEANUP_YYABORT;
+ if (asprintf(&pattern, "%s*", $1) < 0) {
+ CLEANUP;
+ YYNOMEM;
+ }
while ((pmu = perf_pmus__scan(pmu)) != NULL) {
- char *name = pmu->name;
+ const char *name = pmu->name;
if (parse_events__filter_pmu(parse_state, pmu))
continue;
@@ -330,10 +319,12 @@ PE_NAME opt_pmu_config
!perf_pmu__match(pattern, pmu->alias_name, $1)) {
bool auto_merge_stats = perf_pmu__auto_merge_stats(pmu);
- if (parse_events_copy_term_list(orig_terms, &terms))
- CLEANUP_YYABORT;
+ if (parse_events_copy_term_list(orig_terms, &terms)) {
+ CLEANUP;
+ YYNOMEM;
+ }
if (!parse_events_add_pmu(parse_state, list, pmu->name, terms,
- auto_merge_stats)) {
+ auto_merge_stats, &@1)) {
ok++;
parse_state->wild_card_pmus = true;
}
@@ -344,30 +335,26 @@ PE_NAME opt_pmu_config
if (!ok) {
/* Failure to add, assume $1 is an event name. */
zfree(&list);
- ok = !parse_events_multi_pmu_add(parse_state, $1, $2, &list);
+ ok = !parse_events_multi_pmu_add(parse_state, $1, $2, &list, &@1);
$2 = NULL;
}
- if (!ok)
- CLEANUP_YYABORT;
+ if (!ok) {
+ struct parse_events_error *error = parse_state->error;
+ char *help;
+
+ if (asprintf(&help, "Unable to find PMU or event on a PMU of '%s'", $1) < 0)
+ help = NULL;
+ parse_events_error__handle(error, @1.first_column,
+ strdup("Bad event or PMU"),
+ help);
+ CLEANUP;
+ YYABORT;
+ }
}
- parse_events_terms__delete($2);
- parse_events_terms__delete(orig_terms);
- free(pattern);
- free($1);
- $$ = list;
-#undef CLEANUP_YYABORT
-}
-|
-PE_KERNEL_PMU_EVENT sep_dc
-{
- struct list_head *list;
- int err;
-
- err = parse_events_multi_pmu_add(_parse_state, $1, NULL, &list);
- free($1);
- if (err < 0)
- YYABORT;
$$ = list;
+ list = NULL;
+ CLEANUP;
+#undef CLEANUP
}
|
PE_NAME sep_dc
@@ -375,61 +362,19 @@ PE_NAME sep_dc
struct list_head *list;
int err;
- err = parse_events_multi_pmu_add(_parse_state, $1, NULL, &list);
- free($1);
- if (err < 0)
- YYABORT;
- $$ = list;
-}
-|
-PE_KERNEL_PMU_EVENT opt_pmu_config
-{
- struct list_head *list;
- int err;
-
- /* frees $2 */
- err = parse_events_multi_pmu_add(_parse_state, $1, $2, &list);
- free($1);
- if (err < 0)
- YYABORT;
- $$ = list;
-}
-|
-PE_PMU_EVENT_FAKE sep_dc
-{
- struct list_head *list;
- int err;
-
- list = alloc_list();
- if (!list)
- YYABORT;
-
- err = parse_events_add_pmu(_parse_state, list, $1, /*head_config=*/NULL,
- /*auto_merge_stats=*/false);
- free($1);
+ err = parse_events_multi_pmu_add(_parse_state, $1, NULL, &list, &@1);
if (err < 0) {
- free(list);
- YYABORT;
- }
- $$ = list;
-}
-|
-PE_PMU_EVENT_FAKE opt_pmu_config
-{
- struct list_head *list;
- int err;
-
- list = alloc_list();
- if (!list)
- YYABORT;
+ struct parse_events_state *parse_state = _parse_state;
+ struct parse_events_error *error = parse_state->error;
+ char *help;
- err = parse_events_add_pmu(_parse_state, list, $1, $2, /*auto_merge_stats=*/false);
- free($1);
- parse_events_terms__delete($2);
- if (err < 0) {
- free(list);
- YYABORT;
+ if (asprintf(&help, "Unable to find event on a PMU of '%s'", $1) < 0)
+ help = NULL;
+ parse_events_error__handle(error, @1.first_column, strdup("Bad event name"), help);
+ free($1);
+ PE_ABORT(err);
}
+ free($1);
$$ = list;
}
@@ -448,12 +393,13 @@ value_sym '/' event_config '/'
bool wildcard = (type == PERF_TYPE_HARDWARE || type == PERF_TYPE_HW_CACHE);
list = alloc_list();
- ABORT_ON(!list);
+ if (!list)
+ YYNOMEM;
err = parse_events_add_numeric(_parse_state, list, type, config, $3, wildcard);
parse_events_terms__delete($3);
if (err) {
free_list_evsel(list);
- YYABORT;
+ PE_ABORT(err);
}
$$ = list;
}
@@ -464,21 +410,28 @@ value_sym sep_slash_slash_dc
int type = $1 >> 16;
int config = $1 & 255;
bool wildcard = (type == PERF_TYPE_HARDWARE || type == PERF_TYPE_HW_CACHE);
+ int err;
list = alloc_list();
- ABORT_ON(!list);
- ABORT_ON(parse_events_add_numeric(_parse_state, list, type, config,
- /*head_config=*/NULL, wildcard));
+ if (!list)
+ YYNOMEM;
+ err = parse_events_add_numeric(_parse_state, list, type, config, /*head_config=*/NULL, wildcard);
+ if (err)
+ PE_ABORT(err);
$$ = list;
}
|
PE_VALUE_SYM_TOOL sep_slash_slash_dc
{
struct list_head *list;
+ int err;
list = alloc_list();
- ABORT_ON(!list);
- ABORT_ON(parse_events_add_tool(_parse_state, list, $1));
+ if (!list)
+ YYNOMEM;
+ err = parse_events_add_tool(_parse_state, list, $1);
+ if (err)
+ YYNOMEM;
$$ = list;
}
@@ -490,14 +443,16 @@ PE_LEGACY_CACHE opt_event_config
int err;
list = alloc_list();
- ABORT_ON(!list);
+ if (!list)
+ YYNOMEM;
+
err = parse_events_add_cache(list, &parse_state->idx, $1, parse_state, $2);
parse_events_terms__delete($2);
free($1);
if (err) {
free_list_evsel(list);
- YYABORT;
+ PE_ABORT(err);
}
$$ = list;
}
@@ -509,14 +464,16 @@ PE_PREFIX_MEM PE_VALUE PE_BP_SLASH PE_VALUE PE_BP_COLON PE_MODIFIER_BP opt_event
int err;
list = alloc_list();
- ABORT_ON(!list);
+ if (!list)
+ YYNOMEM;
+
err = parse_events_add_breakpoint(_parse_state, list,
$2, $6, $4, $7);
parse_events_terms__delete($7);
free($6);
if (err) {
free(list);
- YYABORT;
+ PE_ABORT(err);
}
$$ = list;
}
@@ -527,13 +484,15 @@ PE_PREFIX_MEM PE_VALUE PE_BP_SLASH PE_VALUE opt_event_config
int err;
list = alloc_list();
- ABORT_ON(!list);
+ if (!list)
+ YYNOMEM;
+
err = parse_events_add_breakpoint(_parse_state, list,
$2, NULL, $4, $5);
parse_events_terms__delete($5);
if (err) {
free(list);
- YYABORT;
+ PE_ABORT(err);
}
$$ = list;
}
@@ -544,14 +503,16 @@ PE_PREFIX_MEM PE_VALUE PE_BP_COLON PE_MODIFIER_BP opt_event_config
int err;
list = alloc_list();
- ABORT_ON(!list);
+ if (!list)
+ YYNOMEM;
+
err = parse_events_add_breakpoint(_parse_state, list,
$2, $4, 0, $5);
parse_events_terms__delete($5);
free($4);
if (err) {
free(list);
- YYABORT;
+ PE_ABORT(err);
}
$$ = list;
}
@@ -562,13 +523,14 @@ PE_PREFIX_MEM PE_VALUE opt_event_config
int err;
list = alloc_list();
- ABORT_ON(!list);
+ if (!list)
+ YYNOMEM;
err = parse_events_add_breakpoint(_parse_state, list,
$2, NULL, 0, $3);
parse_events_terms__delete($3);
if (err) {
free(list);
- YYABORT;
+ PE_ABORT(err);
}
$$ = list;
}
@@ -582,19 +544,20 @@ tracepoint_name opt_event_config
int err;
list = alloc_list();
- ABORT_ON(!list);
+ if (!list)
+ YYNOMEM;
if (error)
error->idx = @1.first_column;
err = parse_events_add_tracepoint(list, &parse_state->idx, $1.sys, $1.event,
- error, $2);
+ error, $2, &@1);
parse_events_terms__delete($2);
free($1.sys);
free($1.event);
if (err) {
free(list);
- YYABORT;
+ PE_ABORT(err);
}
$$ = list;
}
@@ -614,13 +577,14 @@ PE_VALUE ':' PE_VALUE opt_event_config
int err;
list = alloc_list();
- ABORT_ON(!list);
+ if (!list)
+ YYNOMEM;
err = parse_events_add_numeric(_parse_state, list, (u32)$1, $3, $4,
/*wildcard=*/false);
parse_events_terms__delete($4);
if (err) {
free(list);
- YYABORT;
+ PE_ABORT(err);
}
$$ = list;
}
@@ -633,52 +597,20 @@ PE_RAW opt_event_config
u64 num;
list = alloc_list();
- ABORT_ON(!list);
+ if (!list)
+ YYNOMEM;
errno = 0;
num = strtoull($1 + 1, NULL, 16);
- ABORT_ON(errno);
+ /* Given the lexer will only give [a-fA-F0-9]+ a failure here should be impossible. */
+ if (errno)
+ YYABORT;
free($1);
err = parse_events_add_numeric(_parse_state, list, PERF_TYPE_RAW, num, $2,
/*wildcard=*/false);
parse_events_terms__delete($2);
if (err) {
free(list);
- YYABORT;
- }
- $$ = list;
-}
-
-event_bpf_file:
-PE_BPF_OBJECT opt_event_config
-{
- struct parse_events_state *parse_state = _parse_state;
- struct list_head *list;
- int err;
-
- list = alloc_list();
- ABORT_ON(!list);
- err = parse_events_load_bpf(parse_state, list, $1, false, $2);
- parse_events_terms__delete($2);
- free($1);
- if (err) {
- free(list);
- YYABORT;
- }
- $$ = list;
-}
-|
-PE_BPF_SOURCE opt_event_config
-{
- struct list_head *list;
- int err;
-
- list = alloc_list();
- ABORT_ON(!list);
- err = parse_events_load_bpf(_parse_state, list, $1, true, $2);
- parse_events_terms__delete($2);
- if (err) {
- free(list);
- YYABORT;
+ PE_ABORT(err);
}
$$ = list;
}
@@ -738,7 +670,8 @@ event_term
struct list_head *head = malloc(sizeof(*head));
struct parse_events_term *term = $1;
- ABORT_ON(!head);
+ if (!head)
+ YYNOMEM;
INIT_LIST_HEAD(head);
list_add_tail(&term->list, head);
$$ = head;
@@ -752,11 +685,12 @@ event_term:
PE_RAW
{
struct parse_events_term *term;
+ int err = parse_events_term__str(&term, PARSE_EVENTS__TERM_TYPE_RAW,
+ strdup("raw"), $1, &@1, &@1);
- if (parse_events_term__str(&term, PARSE_EVENTS__TERM_TYPE_RAW,
- strdup("raw"), $1, &@1, &@1)) {
+ if (err) {
free($1);
- YYABORT;
+ PE_ABORT(err);
}
$$ = term;
}
@@ -764,12 +698,12 @@ PE_RAW
name_or_raw '=' name_or_legacy
{
struct parse_events_term *term;
+ int err = parse_events_term__str(&term, PARSE_EVENTS__TERM_TYPE_USER, $1, $3, &@1, &@3);
- if (parse_events_term__str(&term, PARSE_EVENTS__TERM_TYPE_USER,
- $1, $3, &@1, &@3)) {
+ if (err) {
free($1);
free($3);
- YYABORT;
+ PE_ABORT(err);
}
$$ = term;
}
@@ -777,11 +711,12 @@ name_or_raw '=' name_or_legacy
name_or_raw '=' PE_VALUE
{
struct parse_events_term *term;
+ int err = parse_events_term__num(&term, PARSE_EVENTS__TERM_TYPE_USER,
+ $1, $3, /*novalue=*/false, &@1, &@3);
- if (parse_events_term__num(&term, PARSE_EVENTS__TERM_TYPE_USER,
- $1, $3, false, &@1, &@3)) {
+ if (err) {
free($1);
- YYABORT;
+ PE_ABORT(err);
}
$$ = term;
}
@@ -789,12 +724,13 @@ name_or_raw '=' PE_VALUE
name_or_raw '=' PE_TERM_HW
{
struct parse_events_term *term;
+ int err = parse_events_term__str(&term, PARSE_EVENTS__TERM_TYPE_USER,
+ $1, $3.str, &@1, &@3);
- if (parse_events_term__str(&term, PARSE_EVENTS__TERM_TYPE_USER,
- $1, $3.str, &@1, &@3)) {
+ if (err) {
free($1);
free($3.str);
- YYABORT;
+ PE_ABORT(err);
}
$$ = term;
}
@@ -802,11 +738,12 @@ name_or_raw '=' PE_TERM_HW
PE_LEGACY_CACHE
{
struct parse_events_term *term;
+ int err = parse_events_term__num(&term, PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE,
+ $1, /*num=*/1, /*novalue=*/true, &@1, /*loc_val=*/NULL);
- if (parse_events_term__num(&term, PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE,
- $1, 1, true, &@1, NULL)) {
+ if (err) {
free($1);
- YYABORT;
+ PE_ABORT(err);
}
$$ = term;
}
@@ -814,11 +751,12 @@ PE_LEGACY_CACHE
PE_NAME
{
struct parse_events_term *term;
+ int err = parse_events_term__num(&term, PARSE_EVENTS__TERM_TYPE_USER,
+ $1, /*num=*/1, /*novalue=*/true, &@1, /*loc_val=*/NULL);
- if (parse_events_term__num(&term, PARSE_EVENTS__TERM_TYPE_USER,
- $1, 1, true, &@1, NULL)) {
+ if (err) {
free($1);
- YYABORT;
+ PE_ABORT(err);
}
$$ = term;
}
@@ -826,11 +764,13 @@ PE_NAME
PE_TERM_HW
{
struct parse_events_term *term;
+ int err = parse_events_term__num(&term, PARSE_EVENTS__TERM_TYPE_HARDWARE,
+ $1.str, $1.num & 255, /*novalue=*/false,
+ &@1, /*loc_val=*/NULL);
- if (parse_events_term__num(&term, PARSE_EVENTS__TERM_TYPE_HARDWARE,
- $1.str, $1.num & 255, false, &@1, NULL)) {
+ if (err) {
free($1.str);
- YYABORT;
+ PE_ABORT(err);
}
$$ = term;
}
@@ -838,10 +778,12 @@ PE_TERM_HW
PE_TERM '=' name_or_legacy
{
struct parse_events_term *term;
+ int err = parse_events_term__str(&term, (enum parse_events__term_type)$1,
+ /*config=*/NULL, $3, &@1, &@3);
- if (parse_events_term__str(&term, (int)$1, NULL, $3, &@1, &@3)) {
+ if (err) {
free($3);
- YYABORT;
+ PE_ABORT(err);
}
$$ = term;
}
@@ -849,10 +791,12 @@ PE_TERM '=' name_or_legacy
PE_TERM '=' PE_TERM_HW
{
struct parse_events_term *term;
+ int err = parse_events_term__str(&term, (enum parse_events__term_type)$1,
+ /*config=*/NULL, $3.str, &@1, &@3);
- if (parse_events_term__str(&term, (int)$1, NULL, $3.str, &@1, &@3)) {
+ if (err) {
free($3.str);
- YYABORT;
+ PE_ABORT(err);
}
$$ = term;
}
@@ -860,53 +804,39 @@ PE_TERM '=' PE_TERM_HW
PE_TERM '=' PE_TERM
{
struct parse_events_term *term;
+ int err = parse_events_term__term(&term,
+ (enum parse_events__term_type)$1,
+ (enum parse_events__term_type)$3,
+ &@1, &@3);
+
+ if (err)
+ PE_ABORT(err);
- ABORT_ON(parse_events_term__term(&term, (int)$1, (int)$3, &@1, &@3));
$$ = term;
}
|
PE_TERM '=' PE_VALUE
{
struct parse_events_term *term;
+ int err = parse_events_term__num(&term, (enum parse_events__term_type)$1,
+ /*config=*/NULL, $3, /*novalue=*/false, &@1, &@3);
+
+ if (err)
+ PE_ABORT(err);
- ABORT_ON(parse_events_term__num(&term, (int)$1, NULL, $3, false, &@1, &@3));
$$ = term;
}
|
PE_TERM
{
struct parse_events_term *term;
+ int err = parse_events_term__num(&term, (enum parse_events__term_type)$1,
+ /*config=*/NULL, /*num=*/1, /*novalue=*/true,
+ &@1, /*loc_val=*/NULL);
- ABORT_ON(parse_events_term__num(&term, (int)$1, NULL, 1, true, &@1, NULL));
- $$ = term;
-}
-|
-name_or_raw array '=' name_or_legacy
-{
- struct parse_events_term *term;
+ if (err)
+ PE_ABORT(err);
- if (parse_events_term__str(&term, PARSE_EVENTS__TERM_TYPE_USER,
- $1, $4, &@1, &@4)) {
- free($1);
- free($4);
- free($2.ranges);
- YYABORT;
- }
- term->array = $2;
- $$ = term;
-}
-|
-name_or_raw array '=' PE_VALUE
-{
- struct parse_events_term *term;
-
- if (parse_events_term__num(&term, PARSE_EVENTS__TERM_TYPE_USER,
- $1, $4, false, &@1, &@4)) {
- free($1);
- free($2.ranges);
- YYABORT;
- }
- term->array = $2;
$$ = term;
}
|
@@ -914,73 +844,19 @@ PE_DRV_CFG_TERM
{
struct parse_events_term *term;
char *config = strdup($1);
+ int err;
- ABORT_ON(!config);
- if (parse_events_term__str(&term, PARSE_EVENTS__TERM_TYPE_DRV_CFG,
- config, $1, &@1, NULL)) {
+ if (!config)
+ YYNOMEM;
+ err = parse_events_term__str(&term, PARSE_EVENTS__TERM_TYPE_DRV_CFG, config, $1, &@1, NULL);
+ if (err) {
free($1);
free(config);
- YYABORT;
+ PE_ABORT(err);
}
$$ = term;
}
-array:
-'[' array_terms ']'
-{
- $$ = $2;
-}
-|
-PE_ARRAY_ALL
-{
- $$.nr_ranges = 0;
- $$.ranges = NULL;
-}
-
-array_terms:
-array_terms ',' array_term
-{
- struct parse_events_array new_array;
-
- new_array.nr_ranges = $1.nr_ranges + $3.nr_ranges;
- new_array.ranges = realloc($1.ranges,
- sizeof(new_array.ranges[0]) *
- new_array.nr_ranges);
- ABORT_ON(!new_array.ranges);
- memcpy(&new_array.ranges[$1.nr_ranges], $3.ranges,
- $3.nr_ranges * sizeof(new_array.ranges[0]));
- free($3.ranges);
- $$ = new_array;
-}
-|
-array_term
-
-array_term:
-PE_VALUE
-{
- struct parse_events_array array;
-
- array.nr_ranges = 1;
- array.ranges = malloc(sizeof(array.ranges[0]));
- ABORT_ON(!array.ranges);
- array.ranges[0].start = $1;
- array.ranges[0].length = 1;
- $$ = array;
-}
-|
-PE_VALUE PE_ARRAY_RANGE PE_VALUE
-{
- struct parse_events_array array;
-
- ABORT_ON($3 < $1);
- array.nr_ranges = 1;
- array.ranges = malloc(sizeof(array.ranges[0]));
- ABORT_ON(!array.ranges);
- array.ranges[0].start = $1;
- array.ranges[0].length = $3 - $1 + 1;
- $$ = array;
-}
-
sep_dc: ':' |
sep_slash_slash_dc: '/' '/' | ':' |
diff --git a/tools/perf/util/perf-regs-arch/Build b/tools/perf/util/perf-regs-arch/Build
new file mode 100644
index 000000000000..d9d596d330a7
--- /dev/null
+++ b/tools/perf/util/perf-regs-arch/Build
@@ -0,0 +1,9 @@
+perf-y += perf_regs_aarch64.o
+perf-y += perf_regs_arm.o
+perf-y += perf_regs_csky.o
+perf-y += perf_regs_loongarch.o
+perf-y += perf_regs_mips.o
+perf-y += perf_regs_powerpc.o
+perf-y += perf_regs_riscv.o
+perf-y += perf_regs_s390.o
+perf-y += perf_regs_x86.o
diff --git a/tools/perf/util/perf-regs-arch/perf_regs_aarch64.c b/tools/perf/util/perf-regs-arch/perf_regs_aarch64.c
new file mode 100644
index 000000000000..696566c54768
--- /dev/null
+++ b/tools/perf/util/perf-regs-arch/perf_regs_aarch64.c
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#ifdef HAVE_PERF_REGS_SUPPORT
+
+#include "../perf_regs.h"
+#include "../../../arch/arm64/include/uapi/asm/perf_regs.h"
+
+const char *__perf_reg_name_arm64(int id)
+{
+ switch (id) {
+ case PERF_REG_ARM64_X0:
+ return "x0";
+ case PERF_REG_ARM64_X1:
+ return "x1";
+ case PERF_REG_ARM64_X2:
+ return "x2";
+ case PERF_REG_ARM64_X3:
+ return "x3";
+ case PERF_REG_ARM64_X4:
+ return "x4";
+ case PERF_REG_ARM64_X5:
+ return "x5";
+ case PERF_REG_ARM64_X6:
+ return "x6";
+ case PERF_REG_ARM64_X7:
+ return "x7";
+ case PERF_REG_ARM64_X8:
+ return "x8";
+ case PERF_REG_ARM64_X9:
+ return "x9";
+ case PERF_REG_ARM64_X10:
+ return "x10";
+ case PERF_REG_ARM64_X11:
+ return "x11";
+ case PERF_REG_ARM64_X12:
+ return "x12";
+ case PERF_REG_ARM64_X13:
+ return "x13";
+ case PERF_REG_ARM64_X14:
+ return "x14";
+ case PERF_REG_ARM64_X15:
+ return "x15";
+ case PERF_REG_ARM64_X16:
+ return "x16";
+ case PERF_REG_ARM64_X17:
+ return "x17";
+ case PERF_REG_ARM64_X18:
+ return "x18";
+ case PERF_REG_ARM64_X19:
+ return "x19";
+ case PERF_REG_ARM64_X20:
+ return "x20";
+ case PERF_REG_ARM64_X21:
+ return "x21";
+ case PERF_REG_ARM64_X22:
+ return "x22";
+ case PERF_REG_ARM64_X23:
+ return "x23";
+ case PERF_REG_ARM64_X24:
+ return "x24";
+ case PERF_REG_ARM64_X25:
+ return "x25";
+ case PERF_REG_ARM64_X26:
+ return "x26";
+ case PERF_REG_ARM64_X27:
+ return "x27";
+ case PERF_REG_ARM64_X28:
+ return "x28";
+ case PERF_REG_ARM64_X29:
+ return "x29";
+ case PERF_REG_ARM64_SP:
+ return "sp";
+ case PERF_REG_ARM64_LR:
+ return "lr";
+ case PERF_REG_ARM64_PC:
+ return "pc";
+ case PERF_REG_ARM64_VG:
+ return "vg";
+ default:
+ return NULL;
+ }
+
+ return NULL;
+}
+
+uint64_t __perf_reg_ip_arm64(void)
+{
+ return PERF_REG_ARM64_PC;
+}
+
+uint64_t __perf_reg_sp_arm64(void)
+{
+ return PERF_REG_ARM64_SP;
+}
+
+#endif
diff --git a/tools/perf/util/perf-regs-arch/perf_regs_arm.c b/tools/perf/util/perf-regs-arch/perf_regs_arm.c
new file mode 100644
index 000000000000..700fd07cd2aa
--- /dev/null
+++ b/tools/perf/util/perf-regs-arch/perf_regs_arm.c
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#ifdef HAVE_PERF_REGS_SUPPORT
+
+#include "../perf_regs.h"
+#include "../../../arch/arm/include/uapi/asm/perf_regs.h"
+
+const char *__perf_reg_name_arm(int id)
+{
+ switch (id) {
+ case PERF_REG_ARM_R0:
+ return "r0";
+ case PERF_REG_ARM_R1:
+ return "r1";
+ case PERF_REG_ARM_R2:
+ return "r2";
+ case PERF_REG_ARM_R3:
+ return "r3";
+ case PERF_REG_ARM_R4:
+ return "r4";
+ case PERF_REG_ARM_R5:
+ return "r5";
+ case PERF_REG_ARM_R6:
+ return "r6";
+ case PERF_REG_ARM_R7:
+ return "r7";
+ case PERF_REG_ARM_R8:
+ return "r8";
+ case PERF_REG_ARM_R9:
+ return "r9";
+ case PERF_REG_ARM_R10:
+ return "r10";
+ case PERF_REG_ARM_FP:
+ return "fp";
+ case PERF_REG_ARM_IP:
+ return "ip";
+ case PERF_REG_ARM_SP:
+ return "sp";
+ case PERF_REG_ARM_LR:
+ return "lr";
+ case PERF_REG_ARM_PC:
+ return "pc";
+ default:
+ return NULL;
+ }
+
+ return NULL;
+}
+
+uint64_t __perf_reg_ip_arm(void)
+{
+ return PERF_REG_ARM_PC;
+}
+
+uint64_t __perf_reg_sp_arm(void)
+{
+ return PERF_REG_ARM_SP;
+}
+
+#endif
diff --git a/tools/perf/util/perf-regs-arch/perf_regs_csky.c b/tools/perf/util/perf-regs-arch/perf_regs_csky.c
new file mode 100644
index 000000000000..a2841094e096
--- /dev/null
+++ b/tools/perf/util/perf-regs-arch/perf_regs_csky.c
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#ifdef HAVE_PERF_REGS_SUPPORT
+
+#include "../perf_regs.h"
+#include "../../arch/csky/include/uapi/asm/perf_regs.h"
+
+const char *__perf_reg_name_csky(int id)
+{
+ switch (id) {
+ case PERF_REG_CSKY_A0:
+ return "a0";
+ case PERF_REG_CSKY_A1:
+ return "a1";
+ case PERF_REG_CSKY_A2:
+ return "a2";
+ case PERF_REG_CSKY_A3:
+ return "a3";
+ case PERF_REG_CSKY_REGS0:
+ return "regs0";
+ case PERF_REG_CSKY_REGS1:
+ return "regs1";
+ case PERF_REG_CSKY_REGS2:
+ return "regs2";
+ case PERF_REG_CSKY_REGS3:
+ return "regs3";
+ case PERF_REG_CSKY_REGS4:
+ return "regs4";
+ case PERF_REG_CSKY_REGS5:
+ return "regs5";
+ case PERF_REG_CSKY_REGS6:
+ return "regs6";
+ case PERF_REG_CSKY_REGS7:
+ return "regs7";
+ case PERF_REG_CSKY_REGS8:
+ return "regs8";
+ case PERF_REG_CSKY_REGS9:
+ return "regs9";
+ case PERF_REG_CSKY_SP:
+ return "sp";
+ case PERF_REG_CSKY_LR:
+ return "lr";
+ case PERF_REG_CSKY_PC:
+ return "pc";
+#if defined(__CSKYABIV2__)
+ case PERF_REG_CSKY_EXREGS0:
+ return "exregs0";
+ case PERF_REG_CSKY_EXREGS1:
+ return "exregs1";
+ case PERF_REG_CSKY_EXREGS2:
+ return "exregs2";
+ case PERF_REG_CSKY_EXREGS3:
+ return "exregs3";
+ case PERF_REG_CSKY_EXREGS4:
+ return "exregs4";
+ case PERF_REG_CSKY_EXREGS5:
+ return "exregs5";
+ case PERF_REG_CSKY_EXREGS6:
+ return "exregs6";
+ case PERF_REG_CSKY_EXREGS7:
+ return "exregs7";
+ case PERF_REG_CSKY_EXREGS8:
+ return "exregs8";
+ case PERF_REG_CSKY_EXREGS9:
+ return "exregs9";
+ case PERF_REG_CSKY_EXREGS10:
+ return "exregs10";
+ case PERF_REG_CSKY_EXREGS11:
+ return "exregs11";
+ case PERF_REG_CSKY_EXREGS12:
+ return "exregs12";
+ case PERF_REG_CSKY_EXREGS13:
+ return "exregs13";
+ case PERF_REG_CSKY_EXREGS14:
+ return "exregs14";
+ case PERF_REG_CSKY_TLS:
+ return "tls";
+ case PERF_REG_CSKY_HI:
+ return "hi";
+ case PERF_REG_CSKY_LO:
+ return "lo";
+#endif
+ default:
+ return NULL;
+ }
+
+ return NULL;
+}
+
+uint64_t __perf_reg_ip_csky(void)
+{
+ return PERF_REG_CSKY_PC;
+}
+
+uint64_t __perf_reg_sp_csky(void)
+{
+ return PERF_REG_CSKY_SP;
+}
+
+#endif
diff --git a/tools/perf/util/perf-regs-arch/perf_regs_loongarch.c b/tools/perf/util/perf-regs-arch/perf_regs_loongarch.c
new file mode 100644
index 000000000000..a9ba0f934123
--- /dev/null
+++ b/tools/perf/util/perf-regs-arch/perf_regs_loongarch.c
@@ -0,0 +1,91 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#ifdef HAVE_PERF_REGS_SUPPORT
+
+#include "../perf_regs.h"
+#include "../../../arch/loongarch/include/uapi/asm/perf_regs.h"
+
+const char *__perf_reg_name_loongarch(int id)
+{
+ switch (id) {
+ case PERF_REG_LOONGARCH_PC:
+ return "PC";
+ case PERF_REG_LOONGARCH_R1:
+ return "%r1";
+ case PERF_REG_LOONGARCH_R2:
+ return "%r2";
+ case PERF_REG_LOONGARCH_R3:
+ return "%r3";
+ case PERF_REG_LOONGARCH_R4:
+ return "%r4";
+ case PERF_REG_LOONGARCH_R5:
+ return "%r5";
+ case PERF_REG_LOONGARCH_R6:
+ return "%r6";
+ case PERF_REG_LOONGARCH_R7:
+ return "%r7";
+ case PERF_REG_LOONGARCH_R8:
+ return "%r8";
+ case PERF_REG_LOONGARCH_R9:
+ return "%r9";
+ case PERF_REG_LOONGARCH_R10:
+ return "%r10";
+ case PERF_REG_LOONGARCH_R11:
+ return "%r11";
+ case PERF_REG_LOONGARCH_R12:
+ return "%r12";
+ case PERF_REG_LOONGARCH_R13:
+ return "%r13";
+ case PERF_REG_LOONGARCH_R14:
+ return "%r14";
+ case PERF_REG_LOONGARCH_R15:
+ return "%r15";
+ case PERF_REG_LOONGARCH_R16:
+ return "%r16";
+ case PERF_REG_LOONGARCH_R17:
+ return "%r17";
+ case PERF_REG_LOONGARCH_R18:
+ return "%r18";
+ case PERF_REG_LOONGARCH_R19:
+ return "%r19";
+ case PERF_REG_LOONGARCH_R20:
+ return "%r20";
+ case PERF_REG_LOONGARCH_R21:
+ return "%r21";
+ case PERF_REG_LOONGARCH_R22:
+ return "%r22";
+ case PERF_REG_LOONGARCH_R23:
+ return "%r23";
+ case PERF_REG_LOONGARCH_R24:
+ return "%r24";
+ case PERF_REG_LOONGARCH_R25:
+ return "%r25";
+ case PERF_REG_LOONGARCH_R26:
+ return "%r26";
+ case PERF_REG_LOONGARCH_R27:
+ return "%r27";
+ case PERF_REG_LOONGARCH_R28:
+ return "%r28";
+ case PERF_REG_LOONGARCH_R29:
+ return "%r29";
+ case PERF_REG_LOONGARCH_R30:
+ return "%r30";
+ case PERF_REG_LOONGARCH_R31:
+ return "%r31";
+ default:
+ break;
+ }
+ return NULL;
+}
+
+uint64_t __perf_reg_ip_loongarch(void)
+{
+ return PERF_REG_LOONGARCH_PC;
+}
+
+uint64_t __perf_reg_sp_loongarch(void)
+{
+ return PERF_REG_LOONGARCH_R3;
+}
+
+#endif
diff --git a/tools/perf/util/perf-regs-arch/perf_regs_mips.c b/tools/perf/util/perf-regs-arch/perf_regs_mips.c
new file mode 100644
index 000000000000..5a45830cfbf5
--- /dev/null
+++ b/tools/perf/util/perf-regs-arch/perf_regs_mips.c
@@ -0,0 +1,87 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#ifdef HAVE_PERF_REGS_SUPPORT
+
+#include "../perf_regs.h"
+#include "../../../arch/mips/include/uapi/asm/perf_regs.h"
+
+const char *__perf_reg_name_mips(int id)
+{
+ switch (id) {
+ case PERF_REG_MIPS_PC:
+ return "PC";
+ case PERF_REG_MIPS_R1:
+ return "$1";
+ case PERF_REG_MIPS_R2:
+ return "$2";
+ case PERF_REG_MIPS_R3:
+ return "$3";
+ case PERF_REG_MIPS_R4:
+ return "$4";
+ case PERF_REG_MIPS_R5:
+ return "$5";
+ case PERF_REG_MIPS_R6:
+ return "$6";
+ case PERF_REG_MIPS_R7:
+ return "$7";
+ case PERF_REG_MIPS_R8:
+ return "$8";
+ case PERF_REG_MIPS_R9:
+ return "$9";
+ case PERF_REG_MIPS_R10:
+ return "$10";
+ case PERF_REG_MIPS_R11:
+ return "$11";
+ case PERF_REG_MIPS_R12:
+ return "$12";
+ case PERF_REG_MIPS_R13:
+ return "$13";
+ case PERF_REG_MIPS_R14:
+ return "$14";
+ case PERF_REG_MIPS_R15:
+ return "$15";
+ case PERF_REG_MIPS_R16:
+ return "$16";
+ case PERF_REG_MIPS_R17:
+ return "$17";
+ case PERF_REG_MIPS_R18:
+ return "$18";
+ case PERF_REG_MIPS_R19:
+ return "$19";
+ case PERF_REG_MIPS_R20:
+ return "$20";
+ case PERF_REG_MIPS_R21:
+ return "$21";
+ case PERF_REG_MIPS_R22:
+ return "$22";
+ case PERF_REG_MIPS_R23:
+ return "$23";
+ case PERF_REG_MIPS_R24:
+ return "$24";
+ case PERF_REG_MIPS_R25:
+ return "$25";
+ case PERF_REG_MIPS_R28:
+ return "$28";
+ case PERF_REG_MIPS_R29:
+ return "$29";
+ case PERF_REG_MIPS_R30:
+ return "$30";
+ case PERF_REG_MIPS_R31:
+ return "$31";
+ default:
+ break;
+ }
+ return NULL;
+}
+
+uint64_t __perf_reg_ip_mips(void)
+{
+ return PERF_REG_MIPS_PC;
+}
+
+uint64_t __perf_reg_sp_mips(void)
+{
+ return PERF_REG_MIPS_R29;
+}
+
+#endif
diff --git a/tools/perf/util/perf-regs-arch/perf_regs_powerpc.c b/tools/perf/util/perf-regs-arch/perf_regs_powerpc.c
new file mode 100644
index 000000000000..1f0d682db74a
--- /dev/null
+++ b/tools/perf/util/perf-regs-arch/perf_regs_powerpc.c
@@ -0,0 +1,145 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#ifdef HAVE_PERF_REGS_SUPPORT
+
+#include "../perf_regs.h"
+#include "../../../arch/powerpc/include/uapi/asm/perf_regs.h"
+
+const char *__perf_reg_name_powerpc(int id)
+{
+ switch (id) {
+ case PERF_REG_POWERPC_R0:
+ return "r0";
+ case PERF_REG_POWERPC_R1:
+ return "r1";
+ case PERF_REG_POWERPC_R2:
+ return "r2";
+ case PERF_REG_POWERPC_R3:
+ return "r3";
+ case PERF_REG_POWERPC_R4:
+ return "r4";
+ case PERF_REG_POWERPC_R5:
+ return "r5";
+ case PERF_REG_POWERPC_R6:
+ return "r6";
+ case PERF_REG_POWERPC_R7:
+ return "r7";
+ case PERF_REG_POWERPC_R8:
+ return "r8";
+ case PERF_REG_POWERPC_R9:
+ return "r9";
+ case PERF_REG_POWERPC_R10:
+ return "r10";
+ case PERF_REG_POWERPC_R11:
+ return "r11";
+ case PERF_REG_POWERPC_R12:
+ return "r12";
+ case PERF_REG_POWERPC_R13:
+ return "r13";
+ case PERF_REG_POWERPC_R14:
+ return "r14";
+ case PERF_REG_POWERPC_R15:
+ return "r15";
+ case PERF_REG_POWERPC_R16:
+ return "r16";
+ case PERF_REG_POWERPC_R17:
+ return "r17";
+ case PERF_REG_POWERPC_R18:
+ return "r18";
+ case PERF_REG_POWERPC_R19:
+ return "r19";
+ case PERF_REG_POWERPC_R20:
+ return "r20";
+ case PERF_REG_POWERPC_R21:
+ return "r21";
+ case PERF_REG_POWERPC_R22:
+ return "r22";
+ case PERF_REG_POWERPC_R23:
+ return "r23";
+ case PERF_REG_POWERPC_R24:
+ return "r24";
+ case PERF_REG_POWERPC_R25:
+ return "r25";
+ case PERF_REG_POWERPC_R26:
+ return "r26";
+ case PERF_REG_POWERPC_R27:
+ return "r27";
+ case PERF_REG_POWERPC_R28:
+ return "r28";
+ case PERF_REG_POWERPC_R29:
+ return "r29";
+ case PERF_REG_POWERPC_R30:
+ return "r30";
+ case PERF_REG_POWERPC_R31:
+ return "r31";
+ case PERF_REG_POWERPC_NIP:
+ return "nip";
+ case PERF_REG_POWERPC_MSR:
+ return "msr";
+ case PERF_REG_POWERPC_ORIG_R3:
+ return "orig_r3";
+ case PERF_REG_POWERPC_CTR:
+ return "ctr";
+ case PERF_REG_POWERPC_LINK:
+ return "link";
+ case PERF_REG_POWERPC_XER:
+ return "xer";
+ case PERF_REG_POWERPC_CCR:
+ return "ccr";
+ case PERF_REG_POWERPC_SOFTE:
+ return "softe";
+ case PERF_REG_POWERPC_TRAP:
+ return "trap";
+ case PERF_REG_POWERPC_DAR:
+ return "dar";
+ case PERF_REG_POWERPC_DSISR:
+ return "dsisr";
+ case PERF_REG_POWERPC_SIER:
+ return "sier";
+ case PERF_REG_POWERPC_MMCRA:
+ return "mmcra";
+ case PERF_REG_POWERPC_MMCR0:
+ return "mmcr0";
+ case PERF_REG_POWERPC_MMCR1:
+ return "mmcr1";
+ case PERF_REG_POWERPC_MMCR2:
+ return "mmcr2";
+ case PERF_REG_POWERPC_MMCR3:
+ return "mmcr3";
+ case PERF_REG_POWERPC_SIER2:
+ return "sier2";
+ case PERF_REG_POWERPC_SIER3:
+ return "sier3";
+ case PERF_REG_POWERPC_PMC1:
+ return "pmc1";
+ case PERF_REG_POWERPC_PMC2:
+ return "pmc2";
+ case PERF_REG_POWERPC_PMC3:
+ return "pmc3";
+ case PERF_REG_POWERPC_PMC4:
+ return "pmc4";
+ case PERF_REG_POWERPC_PMC5:
+ return "pmc5";
+ case PERF_REG_POWERPC_PMC6:
+ return "pmc6";
+ case PERF_REG_POWERPC_SDAR:
+ return "sdar";
+ case PERF_REG_POWERPC_SIAR:
+ return "siar";
+ default:
+ break;
+ }
+ return NULL;
+}
+
+uint64_t __perf_reg_ip_powerpc(void)
+{
+ return PERF_REG_POWERPC_NIP;
+}
+
+uint64_t __perf_reg_sp_powerpc(void)
+{
+ return PERF_REG_POWERPC_R1;
+}
+
+#endif
diff --git a/tools/perf/util/perf-regs-arch/perf_regs_riscv.c b/tools/perf/util/perf-regs-arch/perf_regs_riscv.c
new file mode 100644
index 000000000000..e432630be4c5
--- /dev/null
+++ b/tools/perf/util/perf-regs-arch/perf_regs_riscv.c
@@ -0,0 +1,92 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#ifdef HAVE_PERF_REGS_SUPPORT
+
+#include "../perf_regs.h"
+#include "../../../arch/riscv/include/uapi/asm/perf_regs.h"
+
+const char *__perf_reg_name_riscv(int id)
+{
+ switch (id) {
+ case PERF_REG_RISCV_PC:
+ return "pc";
+ case PERF_REG_RISCV_RA:
+ return "ra";
+ case PERF_REG_RISCV_SP:
+ return "sp";
+ case PERF_REG_RISCV_GP:
+ return "gp";
+ case PERF_REG_RISCV_TP:
+ return "tp";
+ case PERF_REG_RISCV_T0:
+ return "t0";
+ case PERF_REG_RISCV_T1:
+ return "t1";
+ case PERF_REG_RISCV_T2:
+ return "t2";
+ case PERF_REG_RISCV_S0:
+ return "s0";
+ case PERF_REG_RISCV_S1:
+ return "s1";
+ case PERF_REG_RISCV_A0:
+ return "a0";
+ case PERF_REG_RISCV_A1:
+ return "a1";
+ case PERF_REG_RISCV_A2:
+ return "a2";
+ case PERF_REG_RISCV_A3:
+ return "a3";
+ case PERF_REG_RISCV_A4:
+ return "a4";
+ case PERF_REG_RISCV_A5:
+ return "a5";
+ case PERF_REG_RISCV_A6:
+ return "a6";
+ case PERF_REG_RISCV_A7:
+ return "a7";
+ case PERF_REG_RISCV_S2:
+ return "s2";
+ case PERF_REG_RISCV_S3:
+ return "s3";
+ case PERF_REG_RISCV_S4:
+ return "s4";
+ case PERF_REG_RISCV_S5:
+ return "s5";
+ case PERF_REG_RISCV_S6:
+ return "s6";
+ case PERF_REG_RISCV_S7:
+ return "s7";
+ case PERF_REG_RISCV_S8:
+ return "s8";
+ case PERF_REG_RISCV_S9:
+ return "s9";
+ case PERF_REG_RISCV_S10:
+ return "s10";
+ case PERF_REG_RISCV_S11:
+ return "s11";
+ case PERF_REG_RISCV_T3:
+ return "t3";
+ case PERF_REG_RISCV_T4:
+ return "t4";
+ case PERF_REG_RISCV_T5:
+ return "t5";
+ case PERF_REG_RISCV_T6:
+ return "t6";
+ default:
+ return NULL;
+ }
+
+ return NULL;
+}
+
+uint64_t __perf_reg_ip_riscv(void)
+{
+ return PERF_REG_RISCV_PC;
+}
+
+uint64_t __perf_reg_sp_riscv(void)
+{
+ return PERF_REG_RISCV_SP;
+}
+
+#endif
diff --git a/tools/perf/util/perf-regs-arch/perf_regs_s390.c b/tools/perf/util/perf-regs-arch/perf_regs_s390.c
new file mode 100644
index 000000000000..1c7a46db778c
--- /dev/null
+++ b/tools/perf/util/perf-regs-arch/perf_regs_s390.c
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#ifdef HAVE_PERF_REGS_SUPPORT
+
+#include "../perf_regs.h"
+#include "../../../arch/s390/include/uapi/asm/perf_regs.h"
+
+const char *__perf_reg_name_s390(int id)
+{
+ switch (id) {
+ case PERF_REG_S390_R0:
+ return "R0";
+ case PERF_REG_S390_R1:
+ return "R1";
+ case PERF_REG_S390_R2:
+ return "R2";
+ case PERF_REG_S390_R3:
+ return "R3";
+ case PERF_REG_S390_R4:
+ return "R4";
+ case PERF_REG_S390_R5:
+ return "R5";
+ case PERF_REG_S390_R6:
+ return "R6";
+ case PERF_REG_S390_R7:
+ return "R7";
+ case PERF_REG_S390_R8:
+ return "R8";
+ case PERF_REG_S390_R9:
+ return "R9";
+ case PERF_REG_S390_R10:
+ return "R10";
+ case PERF_REG_S390_R11:
+ return "R11";
+ case PERF_REG_S390_R12:
+ return "R12";
+ case PERF_REG_S390_R13:
+ return "R13";
+ case PERF_REG_S390_R14:
+ return "R14";
+ case PERF_REG_S390_R15:
+ return "R15";
+ case PERF_REG_S390_FP0:
+ return "FP0";
+ case PERF_REG_S390_FP1:
+ return "FP1";
+ case PERF_REG_S390_FP2:
+ return "FP2";
+ case PERF_REG_S390_FP3:
+ return "FP3";
+ case PERF_REG_S390_FP4:
+ return "FP4";
+ case PERF_REG_S390_FP5:
+ return "FP5";
+ case PERF_REG_S390_FP6:
+ return "FP6";
+ case PERF_REG_S390_FP7:
+ return "FP7";
+ case PERF_REG_S390_FP8:
+ return "FP8";
+ case PERF_REG_S390_FP9:
+ return "FP9";
+ case PERF_REG_S390_FP10:
+ return "FP10";
+ case PERF_REG_S390_FP11:
+ return "FP11";
+ case PERF_REG_S390_FP12:
+ return "FP12";
+ case PERF_REG_S390_FP13:
+ return "FP13";
+ case PERF_REG_S390_FP14:
+ return "FP14";
+ case PERF_REG_S390_FP15:
+ return "FP15";
+ case PERF_REG_S390_MASK:
+ return "MASK";
+ case PERF_REG_S390_PC:
+ return "PC";
+ default:
+ return NULL;
+ }
+
+ return NULL;
+}
+
+uint64_t __perf_reg_ip_s390(void)
+{
+ return PERF_REG_S390_PC;
+}
+
+uint64_t __perf_reg_sp_s390(void)
+{
+ return PERF_REG_S390_R15;
+}
+
+#endif
diff --git a/tools/perf/util/perf-regs-arch/perf_regs_x86.c b/tools/perf/util/perf-regs-arch/perf_regs_x86.c
new file mode 100644
index 000000000000..873c620f0634
--- /dev/null
+++ b/tools/perf/util/perf-regs-arch/perf_regs_x86.c
@@ -0,0 +1,98 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#ifdef HAVE_PERF_REGS_SUPPORT
+
+#include "../perf_regs.h"
+#include "../../../arch/x86/include/uapi/asm/perf_regs.h"
+
+const char *__perf_reg_name_x86(int id)
+{
+ switch (id) {
+ case PERF_REG_X86_AX:
+ return "AX";
+ case PERF_REG_X86_BX:
+ return "BX";
+ case PERF_REG_X86_CX:
+ return "CX";
+ case PERF_REG_X86_DX:
+ return "DX";
+ case PERF_REG_X86_SI:
+ return "SI";
+ case PERF_REG_X86_DI:
+ return "DI";
+ case PERF_REG_X86_BP:
+ return "BP";
+ case PERF_REG_X86_SP:
+ return "SP";
+ case PERF_REG_X86_IP:
+ return "IP";
+ case PERF_REG_X86_FLAGS:
+ return "FLAGS";
+ case PERF_REG_X86_CS:
+ return "CS";
+ case PERF_REG_X86_SS:
+ return "SS";
+ case PERF_REG_X86_DS:
+ return "DS";
+ case PERF_REG_X86_ES:
+ return "ES";
+ case PERF_REG_X86_FS:
+ return "FS";
+ case PERF_REG_X86_GS:
+ return "GS";
+ case PERF_REG_X86_R8:
+ return "R8";
+ case PERF_REG_X86_R9:
+ return "R9";
+ case PERF_REG_X86_R10:
+ return "R10";
+ case PERF_REG_X86_R11:
+ return "R11";
+ case PERF_REG_X86_R12:
+ return "R12";
+ case PERF_REG_X86_R13:
+ return "R13";
+ case PERF_REG_X86_R14:
+ return "R14";
+ case PERF_REG_X86_R15:
+ return "R15";
+
+#define XMM(x) \
+ case PERF_REG_X86_XMM ## x: \
+ case PERF_REG_X86_XMM ## x + 1: \
+ return "XMM" #x;
+ XMM(0)
+ XMM(1)
+ XMM(2)
+ XMM(3)
+ XMM(4)
+ XMM(5)
+ XMM(6)
+ XMM(7)
+ XMM(8)
+ XMM(9)
+ XMM(10)
+ XMM(11)
+ XMM(12)
+ XMM(13)
+ XMM(14)
+ XMM(15)
+#undef XMM
+ default:
+ return NULL;
+ }
+
+ return NULL;
+}
+
+uint64_t __perf_reg_ip_x86(void)
+{
+ return PERF_REG_X86_IP;
+}
+
+uint64_t __perf_reg_sp_x86(void)
+{
+ return PERF_REG_X86_SP;
+}
+
+#endif
diff --git a/tools/perf/util/perf_regs.c b/tools/perf/util/perf_regs.c
index 9bdbaa37f813..e2275856b570 100644
--- a/tools/perf/util/perf_regs.c
+++ b/tools/perf/util/perf_regs.c
@@ -3,6 +3,7 @@
#include <string.h>
#include "perf_regs.h"
#include "util/sample.h"
+#include "debug.h"
int __weak arch_sdt_arg_parse_op(char *old_op __maybe_unused,
char **new_op __maybe_unused)
@@ -12,732 +13,16 @@ int __weak arch_sdt_arg_parse_op(char *old_op __maybe_unused,
uint64_t __weak arch__intr_reg_mask(void)
{
- return PERF_REGS_MASK;
+ return 0;
}
uint64_t __weak arch__user_reg_mask(void)
{
- return PERF_REGS_MASK;
+ return 0;
}
#ifdef HAVE_PERF_REGS_SUPPORT
-#define perf_event_arm_regs perf_event_arm64_regs
-#include "../../arch/arm64/include/uapi/asm/perf_regs.h"
-#undef perf_event_arm_regs
-
-#include "../../arch/arm/include/uapi/asm/perf_regs.h"
-#include "../../arch/csky/include/uapi/asm/perf_regs.h"
-#include "../../arch/loongarch/include/uapi/asm/perf_regs.h"
-#include "../../arch/mips/include/uapi/asm/perf_regs.h"
-#include "../../arch/powerpc/include/uapi/asm/perf_regs.h"
-#include "../../arch/riscv/include/uapi/asm/perf_regs.h"
-#include "../../arch/s390/include/uapi/asm/perf_regs.h"
-#include "../../arch/x86/include/uapi/asm/perf_regs.h"
-
-static const char *__perf_reg_name_arm64(int id)
-{
- switch (id) {
- case PERF_REG_ARM64_X0:
- return "x0";
- case PERF_REG_ARM64_X1:
- return "x1";
- case PERF_REG_ARM64_X2:
- return "x2";
- case PERF_REG_ARM64_X3:
- return "x3";
- case PERF_REG_ARM64_X4:
- return "x4";
- case PERF_REG_ARM64_X5:
- return "x5";
- case PERF_REG_ARM64_X6:
- return "x6";
- case PERF_REG_ARM64_X7:
- return "x7";
- case PERF_REG_ARM64_X8:
- return "x8";
- case PERF_REG_ARM64_X9:
- return "x9";
- case PERF_REG_ARM64_X10:
- return "x10";
- case PERF_REG_ARM64_X11:
- return "x11";
- case PERF_REG_ARM64_X12:
- return "x12";
- case PERF_REG_ARM64_X13:
- return "x13";
- case PERF_REG_ARM64_X14:
- return "x14";
- case PERF_REG_ARM64_X15:
- return "x15";
- case PERF_REG_ARM64_X16:
- return "x16";
- case PERF_REG_ARM64_X17:
- return "x17";
- case PERF_REG_ARM64_X18:
- return "x18";
- case PERF_REG_ARM64_X19:
- return "x19";
- case PERF_REG_ARM64_X20:
- return "x20";
- case PERF_REG_ARM64_X21:
- return "x21";
- case PERF_REG_ARM64_X22:
- return "x22";
- case PERF_REG_ARM64_X23:
- return "x23";
- case PERF_REG_ARM64_X24:
- return "x24";
- case PERF_REG_ARM64_X25:
- return "x25";
- case PERF_REG_ARM64_X26:
- return "x26";
- case PERF_REG_ARM64_X27:
- return "x27";
- case PERF_REG_ARM64_X28:
- return "x28";
- case PERF_REG_ARM64_X29:
- return "x29";
- case PERF_REG_ARM64_SP:
- return "sp";
- case PERF_REG_ARM64_LR:
- return "lr";
- case PERF_REG_ARM64_PC:
- return "pc";
- case PERF_REG_ARM64_VG:
- return "vg";
- default:
- return NULL;
- }
-
- return NULL;
-}
-
-static const char *__perf_reg_name_arm(int id)
-{
- switch (id) {
- case PERF_REG_ARM_R0:
- return "r0";
- case PERF_REG_ARM_R1:
- return "r1";
- case PERF_REG_ARM_R2:
- return "r2";
- case PERF_REG_ARM_R3:
- return "r3";
- case PERF_REG_ARM_R4:
- return "r4";
- case PERF_REG_ARM_R5:
- return "r5";
- case PERF_REG_ARM_R6:
- return "r6";
- case PERF_REG_ARM_R7:
- return "r7";
- case PERF_REG_ARM_R8:
- return "r8";
- case PERF_REG_ARM_R9:
- return "r9";
- case PERF_REG_ARM_R10:
- return "r10";
- case PERF_REG_ARM_FP:
- return "fp";
- case PERF_REG_ARM_IP:
- return "ip";
- case PERF_REG_ARM_SP:
- return "sp";
- case PERF_REG_ARM_LR:
- return "lr";
- case PERF_REG_ARM_PC:
- return "pc";
- default:
- return NULL;
- }
-
- return NULL;
-}
-
-static const char *__perf_reg_name_csky(int id)
-{
- switch (id) {
- case PERF_REG_CSKY_A0:
- return "a0";
- case PERF_REG_CSKY_A1:
- return "a1";
- case PERF_REG_CSKY_A2:
- return "a2";
- case PERF_REG_CSKY_A3:
- return "a3";
- case PERF_REG_CSKY_REGS0:
- return "regs0";
- case PERF_REG_CSKY_REGS1:
- return "regs1";
- case PERF_REG_CSKY_REGS2:
- return "regs2";
- case PERF_REG_CSKY_REGS3:
- return "regs3";
- case PERF_REG_CSKY_REGS4:
- return "regs4";
- case PERF_REG_CSKY_REGS5:
- return "regs5";
- case PERF_REG_CSKY_REGS6:
- return "regs6";
- case PERF_REG_CSKY_REGS7:
- return "regs7";
- case PERF_REG_CSKY_REGS8:
- return "regs8";
- case PERF_REG_CSKY_REGS9:
- return "regs9";
- case PERF_REG_CSKY_SP:
- return "sp";
- case PERF_REG_CSKY_LR:
- return "lr";
- case PERF_REG_CSKY_PC:
- return "pc";
-#if defined(__CSKYABIV2__)
- case PERF_REG_CSKY_EXREGS0:
- return "exregs0";
- case PERF_REG_CSKY_EXREGS1:
- return "exregs1";
- case PERF_REG_CSKY_EXREGS2:
- return "exregs2";
- case PERF_REG_CSKY_EXREGS3:
- return "exregs3";
- case PERF_REG_CSKY_EXREGS4:
- return "exregs4";
- case PERF_REG_CSKY_EXREGS5:
- return "exregs5";
- case PERF_REG_CSKY_EXREGS6:
- return "exregs6";
- case PERF_REG_CSKY_EXREGS7:
- return "exregs7";
- case PERF_REG_CSKY_EXREGS8:
- return "exregs8";
- case PERF_REG_CSKY_EXREGS9:
- return "exregs9";
- case PERF_REG_CSKY_EXREGS10:
- return "exregs10";
- case PERF_REG_CSKY_EXREGS11:
- return "exregs11";
- case PERF_REG_CSKY_EXREGS12:
- return "exregs12";
- case PERF_REG_CSKY_EXREGS13:
- return "exregs13";
- case PERF_REG_CSKY_EXREGS14:
- return "exregs14";
- case PERF_REG_CSKY_TLS:
- return "tls";
- case PERF_REG_CSKY_HI:
- return "hi";
- case PERF_REG_CSKY_LO:
- return "lo";
-#endif
- default:
- return NULL;
- }
-
- return NULL;
-}
-
-static inline const char *__perf_reg_name_loongarch(int id)
-{
- switch (id) {
- case PERF_REG_LOONGARCH_PC:
- return "PC";
- case PERF_REG_LOONGARCH_R1:
- return "%r1";
- case PERF_REG_LOONGARCH_R2:
- return "%r2";
- case PERF_REG_LOONGARCH_R3:
- return "%r3";
- case PERF_REG_LOONGARCH_R4:
- return "%r4";
- case PERF_REG_LOONGARCH_R5:
- return "%r5";
- case PERF_REG_LOONGARCH_R6:
- return "%r6";
- case PERF_REG_LOONGARCH_R7:
- return "%r7";
- case PERF_REG_LOONGARCH_R8:
- return "%r8";
- case PERF_REG_LOONGARCH_R9:
- return "%r9";
- case PERF_REG_LOONGARCH_R10:
- return "%r10";
- case PERF_REG_LOONGARCH_R11:
- return "%r11";
- case PERF_REG_LOONGARCH_R12:
- return "%r12";
- case PERF_REG_LOONGARCH_R13:
- return "%r13";
- case PERF_REG_LOONGARCH_R14:
- return "%r14";
- case PERF_REG_LOONGARCH_R15:
- return "%r15";
- case PERF_REG_LOONGARCH_R16:
- return "%r16";
- case PERF_REG_LOONGARCH_R17:
- return "%r17";
- case PERF_REG_LOONGARCH_R18:
- return "%r18";
- case PERF_REG_LOONGARCH_R19:
- return "%r19";
- case PERF_REG_LOONGARCH_R20:
- return "%r20";
- case PERF_REG_LOONGARCH_R21:
- return "%r21";
- case PERF_REG_LOONGARCH_R22:
- return "%r22";
- case PERF_REG_LOONGARCH_R23:
- return "%r23";
- case PERF_REG_LOONGARCH_R24:
- return "%r24";
- case PERF_REG_LOONGARCH_R25:
- return "%r25";
- case PERF_REG_LOONGARCH_R26:
- return "%r26";
- case PERF_REG_LOONGARCH_R27:
- return "%r27";
- case PERF_REG_LOONGARCH_R28:
- return "%r28";
- case PERF_REG_LOONGARCH_R29:
- return "%r29";
- case PERF_REG_LOONGARCH_R30:
- return "%r30";
- case PERF_REG_LOONGARCH_R31:
- return "%r31";
- default:
- break;
- }
- return NULL;
-}
-
-static const char *__perf_reg_name_mips(int id)
-{
- switch (id) {
- case PERF_REG_MIPS_PC:
- return "PC";
- case PERF_REG_MIPS_R1:
- return "$1";
- case PERF_REG_MIPS_R2:
- return "$2";
- case PERF_REG_MIPS_R3:
- return "$3";
- case PERF_REG_MIPS_R4:
- return "$4";
- case PERF_REG_MIPS_R5:
- return "$5";
- case PERF_REG_MIPS_R6:
- return "$6";
- case PERF_REG_MIPS_R7:
- return "$7";
- case PERF_REG_MIPS_R8:
- return "$8";
- case PERF_REG_MIPS_R9:
- return "$9";
- case PERF_REG_MIPS_R10:
- return "$10";
- case PERF_REG_MIPS_R11:
- return "$11";
- case PERF_REG_MIPS_R12:
- return "$12";
- case PERF_REG_MIPS_R13:
- return "$13";
- case PERF_REG_MIPS_R14:
- return "$14";
- case PERF_REG_MIPS_R15:
- return "$15";
- case PERF_REG_MIPS_R16:
- return "$16";
- case PERF_REG_MIPS_R17:
- return "$17";
- case PERF_REG_MIPS_R18:
- return "$18";
- case PERF_REG_MIPS_R19:
- return "$19";
- case PERF_REG_MIPS_R20:
- return "$20";
- case PERF_REG_MIPS_R21:
- return "$21";
- case PERF_REG_MIPS_R22:
- return "$22";
- case PERF_REG_MIPS_R23:
- return "$23";
- case PERF_REG_MIPS_R24:
- return "$24";
- case PERF_REG_MIPS_R25:
- return "$25";
- case PERF_REG_MIPS_R28:
- return "$28";
- case PERF_REG_MIPS_R29:
- return "$29";
- case PERF_REG_MIPS_R30:
- return "$30";
- case PERF_REG_MIPS_R31:
- return "$31";
- default:
- break;
- }
- return NULL;
-}
-
-static const char *__perf_reg_name_powerpc(int id)
-{
- switch (id) {
- case PERF_REG_POWERPC_R0:
- return "r0";
- case PERF_REG_POWERPC_R1:
- return "r1";
- case PERF_REG_POWERPC_R2:
- return "r2";
- case PERF_REG_POWERPC_R3:
- return "r3";
- case PERF_REG_POWERPC_R4:
- return "r4";
- case PERF_REG_POWERPC_R5:
- return "r5";
- case PERF_REG_POWERPC_R6:
- return "r6";
- case PERF_REG_POWERPC_R7:
- return "r7";
- case PERF_REG_POWERPC_R8:
- return "r8";
- case PERF_REG_POWERPC_R9:
- return "r9";
- case PERF_REG_POWERPC_R10:
- return "r10";
- case PERF_REG_POWERPC_R11:
- return "r11";
- case PERF_REG_POWERPC_R12:
- return "r12";
- case PERF_REG_POWERPC_R13:
- return "r13";
- case PERF_REG_POWERPC_R14:
- return "r14";
- case PERF_REG_POWERPC_R15:
- return "r15";
- case PERF_REG_POWERPC_R16:
- return "r16";
- case PERF_REG_POWERPC_R17:
- return "r17";
- case PERF_REG_POWERPC_R18:
- return "r18";
- case PERF_REG_POWERPC_R19:
- return "r19";
- case PERF_REG_POWERPC_R20:
- return "r20";
- case PERF_REG_POWERPC_R21:
- return "r21";
- case PERF_REG_POWERPC_R22:
- return "r22";
- case PERF_REG_POWERPC_R23:
- return "r23";
- case PERF_REG_POWERPC_R24:
- return "r24";
- case PERF_REG_POWERPC_R25:
- return "r25";
- case PERF_REG_POWERPC_R26:
- return "r26";
- case PERF_REG_POWERPC_R27:
- return "r27";
- case PERF_REG_POWERPC_R28:
- return "r28";
- case PERF_REG_POWERPC_R29:
- return "r29";
- case PERF_REG_POWERPC_R30:
- return "r30";
- case PERF_REG_POWERPC_R31:
- return "r31";
- case PERF_REG_POWERPC_NIP:
- return "nip";
- case PERF_REG_POWERPC_MSR:
- return "msr";
- case PERF_REG_POWERPC_ORIG_R3:
- return "orig_r3";
- case PERF_REG_POWERPC_CTR:
- return "ctr";
- case PERF_REG_POWERPC_LINK:
- return "link";
- case PERF_REG_POWERPC_XER:
- return "xer";
- case PERF_REG_POWERPC_CCR:
- return "ccr";
- case PERF_REG_POWERPC_SOFTE:
- return "softe";
- case PERF_REG_POWERPC_TRAP:
- return "trap";
- case PERF_REG_POWERPC_DAR:
- return "dar";
- case PERF_REG_POWERPC_DSISR:
- return "dsisr";
- case PERF_REG_POWERPC_SIER:
- return "sier";
- case PERF_REG_POWERPC_MMCRA:
- return "mmcra";
- case PERF_REG_POWERPC_MMCR0:
- return "mmcr0";
- case PERF_REG_POWERPC_MMCR1:
- return "mmcr1";
- case PERF_REG_POWERPC_MMCR2:
- return "mmcr2";
- case PERF_REG_POWERPC_MMCR3:
- return "mmcr3";
- case PERF_REG_POWERPC_SIER2:
- return "sier2";
- case PERF_REG_POWERPC_SIER3:
- return "sier3";
- case PERF_REG_POWERPC_PMC1:
- return "pmc1";
- case PERF_REG_POWERPC_PMC2:
- return "pmc2";
- case PERF_REG_POWERPC_PMC3:
- return "pmc3";
- case PERF_REG_POWERPC_PMC4:
- return "pmc4";
- case PERF_REG_POWERPC_PMC5:
- return "pmc5";
- case PERF_REG_POWERPC_PMC6:
- return "pmc6";
- case PERF_REG_POWERPC_SDAR:
- return "sdar";
- case PERF_REG_POWERPC_SIAR:
- return "siar";
- default:
- break;
- }
- return NULL;
-}
-
-static const char *__perf_reg_name_riscv(int id)
-{
- switch (id) {
- case PERF_REG_RISCV_PC:
- return "pc";
- case PERF_REG_RISCV_RA:
- return "ra";
- case PERF_REG_RISCV_SP:
- return "sp";
- case PERF_REG_RISCV_GP:
- return "gp";
- case PERF_REG_RISCV_TP:
- return "tp";
- case PERF_REG_RISCV_T0:
- return "t0";
- case PERF_REG_RISCV_T1:
- return "t1";
- case PERF_REG_RISCV_T2:
- return "t2";
- case PERF_REG_RISCV_S0:
- return "s0";
- case PERF_REG_RISCV_S1:
- return "s1";
- case PERF_REG_RISCV_A0:
- return "a0";
- case PERF_REG_RISCV_A1:
- return "a1";
- case PERF_REG_RISCV_A2:
- return "a2";
- case PERF_REG_RISCV_A3:
- return "a3";
- case PERF_REG_RISCV_A4:
- return "a4";
- case PERF_REG_RISCV_A5:
- return "a5";
- case PERF_REG_RISCV_A6:
- return "a6";
- case PERF_REG_RISCV_A7:
- return "a7";
- case PERF_REG_RISCV_S2:
- return "s2";
- case PERF_REG_RISCV_S3:
- return "s3";
- case PERF_REG_RISCV_S4:
- return "s4";
- case PERF_REG_RISCV_S5:
- return "s5";
- case PERF_REG_RISCV_S6:
- return "s6";
- case PERF_REG_RISCV_S7:
- return "s7";
- case PERF_REG_RISCV_S8:
- return "s8";
- case PERF_REG_RISCV_S9:
- return "s9";
- case PERF_REG_RISCV_S10:
- return "s10";
- case PERF_REG_RISCV_S11:
- return "s11";
- case PERF_REG_RISCV_T3:
- return "t3";
- case PERF_REG_RISCV_T4:
- return "t4";
- case PERF_REG_RISCV_T5:
- return "t5";
- case PERF_REG_RISCV_T6:
- return "t6";
- default:
- return NULL;
- }
-
- return NULL;
-}
-
-static const char *__perf_reg_name_s390(int id)
-{
- switch (id) {
- case PERF_REG_S390_R0:
- return "R0";
- case PERF_REG_S390_R1:
- return "R1";
- case PERF_REG_S390_R2:
- return "R2";
- case PERF_REG_S390_R3:
- return "R3";
- case PERF_REG_S390_R4:
- return "R4";
- case PERF_REG_S390_R5:
- return "R5";
- case PERF_REG_S390_R6:
- return "R6";
- case PERF_REG_S390_R7:
- return "R7";
- case PERF_REG_S390_R8:
- return "R8";
- case PERF_REG_S390_R9:
- return "R9";
- case PERF_REG_S390_R10:
- return "R10";
- case PERF_REG_S390_R11:
- return "R11";
- case PERF_REG_S390_R12:
- return "R12";
- case PERF_REG_S390_R13:
- return "R13";
- case PERF_REG_S390_R14:
- return "R14";
- case PERF_REG_S390_R15:
- return "R15";
- case PERF_REG_S390_FP0:
- return "FP0";
- case PERF_REG_S390_FP1:
- return "FP1";
- case PERF_REG_S390_FP2:
- return "FP2";
- case PERF_REG_S390_FP3:
- return "FP3";
- case PERF_REG_S390_FP4:
- return "FP4";
- case PERF_REG_S390_FP5:
- return "FP5";
- case PERF_REG_S390_FP6:
- return "FP6";
- case PERF_REG_S390_FP7:
- return "FP7";
- case PERF_REG_S390_FP8:
- return "FP8";
- case PERF_REG_S390_FP9:
- return "FP9";
- case PERF_REG_S390_FP10:
- return "FP10";
- case PERF_REG_S390_FP11:
- return "FP11";
- case PERF_REG_S390_FP12:
- return "FP12";
- case PERF_REG_S390_FP13:
- return "FP13";
- case PERF_REG_S390_FP14:
- return "FP14";
- case PERF_REG_S390_FP15:
- return "FP15";
- case PERF_REG_S390_MASK:
- return "MASK";
- case PERF_REG_S390_PC:
- return "PC";
- default:
- return NULL;
- }
-
- return NULL;
-}
-
-static const char *__perf_reg_name_x86(int id)
-{
- switch (id) {
- case PERF_REG_X86_AX:
- return "AX";
- case PERF_REG_X86_BX:
- return "BX";
- case PERF_REG_X86_CX:
- return "CX";
- case PERF_REG_X86_DX:
- return "DX";
- case PERF_REG_X86_SI:
- return "SI";
- case PERF_REG_X86_DI:
- return "DI";
- case PERF_REG_X86_BP:
- return "BP";
- case PERF_REG_X86_SP:
- return "SP";
- case PERF_REG_X86_IP:
- return "IP";
- case PERF_REG_X86_FLAGS:
- return "FLAGS";
- case PERF_REG_X86_CS:
- return "CS";
- case PERF_REG_X86_SS:
- return "SS";
- case PERF_REG_X86_DS:
- return "DS";
- case PERF_REG_X86_ES:
- return "ES";
- case PERF_REG_X86_FS:
- return "FS";
- case PERF_REG_X86_GS:
- return "GS";
- case PERF_REG_X86_R8:
- return "R8";
- case PERF_REG_X86_R9:
- return "R9";
- case PERF_REG_X86_R10:
- return "R10";
- case PERF_REG_X86_R11:
- return "R11";
- case PERF_REG_X86_R12:
- return "R12";
- case PERF_REG_X86_R13:
- return "R13";
- case PERF_REG_X86_R14:
- return "R14";
- case PERF_REG_X86_R15:
- return "R15";
-
-#define XMM(x) \
- case PERF_REG_X86_XMM ## x: \
- case PERF_REG_X86_XMM ## x + 1: \
- return "XMM" #x;
- XMM(0)
- XMM(1)
- XMM(2)
- XMM(3)
- XMM(4)
- XMM(5)
- XMM(6)
- XMM(7)
- XMM(8)
- XMM(9)
- XMM(10)
- XMM(11)
- XMM(12)
- XMM(13)
- XMM(14)
- XMM(15)
-#undef XMM
- default:
- return NULL;
- }
-
- return NULL;
-}
-
const char *perf_reg_name(int id, const char *arch)
{
const char *reg_name = NULL;
@@ -790,4 +75,55 @@ out:
*valp = regs->cache_regs[id];
return 0;
}
+
+uint64_t perf_arch_reg_ip(const char *arch)
+{
+ if (!strcmp(arch, "arm"))
+ return __perf_reg_ip_arm();
+ else if (!strcmp(arch, "arm64"))
+ return __perf_reg_ip_arm64();
+ else if (!strcmp(arch, "csky"))
+ return __perf_reg_ip_csky();
+ else if (!strcmp(arch, "loongarch"))
+ return __perf_reg_ip_loongarch();
+ else if (!strcmp(arch, "mips"))
+ return __perf_reg_ip_mips();
+ else if (!strcmp(arch, "powerpc"))
+ return __perf_reg_ip_powerpc();
+ else if (!strcmp(arch, "riscv"))
+ return __perf_reg_ip_riscv();
+ else if (!strcmp(arch, "s390"))
+ return __perf_reg_ip_s390();
+ else if (!strcmp(arch, "x86"))
+ return __perf_reg_ip_x86();
+
+ pr_err("Fail to find IP register for arch %s, returns 0\n", arch);
+ return 0;
+}
+
+uint64_t perf_arch_reg_sp(const char *arch)
+{
+ if (!strcmp(arch, "arm"))
+ return __perf_reg_sp_arm();
+ else if (!strcmp(arch, "arm64"))
+ return __perf_reg_sp_arm64();
+ else if (!strcmp(arch, "csky"))
+ return __perf_reg_sp_csky();
+ else if (!strcmp(arch, "loongarch"))
+ return __perf_reg_sp_loongarch();
+ else if (!strcmp(arch, "mips"))
+ return __perf_reg_sp_mips();
+ else if (!strcmp(arch, "powerpc"))
+ return __perf_reg_sp_powerpc();
+ else if (!strcmp(arch, "riscv"))
+ return __perf_reg_sp_riscv();
+ else if (!strcmp(arch, "s390"))
+ return __perf_reg_sp_s390();
+ else if (!strcmp(arch, "x86"))
+ return __perf_reg_sp_x86();
+
+ pr_err("Fail to find SP register for arch %s, returns 0\n", arch);
+ return 0;
+}
+
#endif
diff --git a/tools/perf/util/perf_regs.h b/tools/perf/util/perf_regs.h
index ce1127af05e4..ecd2a5362042 100644
--- a/tools/perf/util/perf_regs.h
+++ b/tools/perf/util/perf_regs.h
@@ -30,18 +30,49 @@ uint64_t arch__user_reg_mask(void);
#ifdef HAVE_PERF_REGS_SUPPORT
extern const struct sample_reg sample_reg_masks[];
-#include <perf_regs.h>
-
-#define DWARF_MINIMAL_REGS ((1ULL << PERF_REG_IP) | (1ULL << PERF_REG_SP))
-
const char *perf_reg_name(int id, const char *arch);
int perf_reg_value(u64 *valp, struct regs_dump *regs, int id);
+uint64_t perf_arch_reg_ip(const char *arch);
+uint64_t perf_arch_reg_sp(const char *arch);
+const char *__perf_reg_name_arm64(int id);
+uint64_t __perf_reg_ip_arm64(void);
+uint64_t __perf_reg_sp_arm64(void);
+const char *__perf_reg_name_arm(int id);
+uint64_t __perf_reg_ip_arm(void);
+uint64_t __perf_reg_sp_arm(void);
+const char *__perf_reg_name_csky(int id);
+uint64_t __perf_reg_ip_csky(void);
+uint64_t __perf_reg_sp_csky(void);
+const char *__perf_reg_name_loongarch(int id);
+uint64_t __perf_reg_ip_loongarch(void);
+uint64_t __perf_reg_sp_loongarch(void);
+const char *__perf_reg_name_mips(int id);
+uint64_t __perf_reg_ip_mips(void);
+uint64_t __perf_reg_sp_mips(void);
+const char *__perf_reg_name_powerpc(int id);
+uint64_t __perf_reg_ip_powerpc(void);
+uint64_t __perf_reg_sp_powerpc(void);
+const char *__perf_reg_name_riscv(int id);
+uint64_t __perf_reg_ip_riscv(void);
+uint64_t __perf_reg_sp_riscv(void);
+const char *__perf_reg_name_s390(int id);
+uint64_t __perf_reg_ip_s390(void);
+uint64_t __perf_reg_sp_s390(void);
+const char *__perf_reg_name_x86(int id);
+uint64_t __perf_reg_ip_x86(void);
+uint64_t __perf_reg_sp_x86(void);
+
+static inline uint64_t DWARF_MINIMAL_REGS(const char *arch)
+{
+ return (1ULL << perf_arch_reg_ip(arch)) | (1ULL << perf_arch_reg_sp(arch));
+}
#else
-#define PERF_REGS_MASK 0
-#define PERF_REGS_MAX 0
-#define DWARF_MINIMAL_REGS PERF_REGS_MASK
+static inline uint64_t DWARF_MINIMAL_REGS(const char *arch __maybe_unused)
+{
+ return 0;
+}
static inline const char *perf_reg_name(int id __maybe_unused, const char *arch __maybe_unused)
{
@@ -54,5 +85,16 @@ static inline int perf_reg_value(u64 *valp __maybe_unused,
{
return 0;
}
+
+static inline uint64_t perf_arch_reg_ip(const char *arch __maybe_unused)
+{
+ return 0;
+}
+
+static inline uint64_t perf_arch_reg_sp(const char *arch __maybe_unused)
+{
+ return 0;
+}
+
#endif /* HAVE_PERF_REGS_SUPPORT */
#endif /* __PERF_REGS_H */
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index 28380e7aa8d0..d85602aa4b9f 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -19,8 +19,8 @@
#include "evsel.h"
#include "pmu.h"
#include "pmus.h"
-#include "pmu-bison.h"
-#include "pmu-flex.h"
+#include <util/pmu-bison.h>
+#include <util/pmu-flex.h>
#include "parse-events.h"
#include "print-events.h"
#include "header.h"
@@ -29,7 +29,63 @@
#include "fncache.h"
#include "util/evsel_config.h"
-struct perf_pmu perf_pmu__fake;
+struct perf_pmu perf_pmu__fake = {
+ .name = "fake",
+};
+
+#define UNIT_MAX_LEN 31 /* max length for event unit name */
+
+/**
+ * struct perf_pmu_alias - An event either read from sysfs or builtin in
+ * pmu-events.c, created by parsing the pmu-events json files.
+ */
+struct perf_pmu_alias {
+ /** @name: Name of the event like "mem-loads". */
+ char *name;
+ /** @desc: Optional short description of the event. */
+ char *desc;
+ /** @long_desc: Optional long description. */
+ char *long_desc;
+ /**
+ * @topic: Optional topic such as cache or pipeline, particularly for
+ * json events.
+ */
+ char *topic;
+ /** @terms: Owned list of the original parsed parameters. */
+ struct list_head terms;
+ /** @list: List element of struct perf_pmu aliases. */
+ struct list_head list;
+ /**
+ * @pmu_name: The name copied from the json struct pmu_event. This can
+ * differ from the PMU name as it won't have suffixes.
+ */
+ char *pmu_name;
+ /** @unit: Units for the event, such as bytes or cache lines. */
+ char unit[UNIT_MAX_LEN+1];
+ /** @scale: Value to scale read counter values by. */
+ double scale;
+ /**
+ * @per_pkg: Does the file
+ * <sysfs>/bus/event_source/devices/<pmu_name>/events/<name>.per-pkg or
+ * equivalent json value exist and have the value 1.
+ */
+ bool per_pkg;
+ /**
+ * @snapshot: Does the file
+ * <sysfs>/bus/event_source/devices/<pmu_name>/events/<name>.snapshot
+ * exist and have the value 1.
+ */
+ bool snapshot;
+ /**
+ * @deprecated: Is the event hidden and so not shown in perf list by
+ * default.
+ */
+ bool deprecated;
+ /** @from_sysfs: Was the alias from sysfs or a json event? */
+ bool from_sysfs;
+ /** @info_loaded: Have the scale, unit and other values been read from disk? */
+ bool info_loaded;
+};
/**
* struct perf_pmu_format - Values from a format file read from
@@ -40,6 +96,10 @@ struct perf_pmu perf_pmu__fake;
* value=PERF_PMU_FORMAT_VALUE_CONFIG and bits 0 to 7 will be set.
*/
struct perf_pmu_format {
+ /** @list: Element on list within struct perf_pmu. */
+ struct list_head list;
+ /** @bits: Which config bits are set by this format value. */
+ DECLARE_BITMAP(bits, PERF_PMU_FORMAT_BITS);
/** @name: The modifier/file name. */
char *name;
/**
@@ -47,18 +107,81 @@ struct perf_pmu_format {
* are from PERF_PMU_FORMAT_VALUE_CONFIG to
* PERF_PMU_FORMAT_VALUE_CONFIG_END.
*/
- int value;
- /** @bits: Which config bits are set by this format value. */
- DECLARE_BITMAP(bits, PERF_PMU_FORMAT_BITS);
- /** @list: Element on list within struct perf_pmu. */
- struct list_head list;
+ u16 value;
+ /** @loaded: Has the contents been loaded/parsed. */
+ bool loaded;
};
+static int pmu_aliases_parse(struct perf_pmu *pmu);
+
+static struct perf_pmu_format *perf_pmu__new_format(struct list_head *list, char *name)
+{
+ struct perf_pmu_format *format;
+
+ format = zalloc(sizeof(*format));
+ if (!format)
+ return NULL;
+
+ format->name = strdup(name);
+ if (!format->name) {
+ free(format);
+ return NULL;
+ }
+ list_add_tail(&format->list, list);
+ return format;
+}
+
+/* Called at the end of parsing a format. */
+void perf_pmu_format__set_value(void *vformat, int config, unsigned long *bits)
+{
+ struct perf_pmu_format *format = vformat;
+
+ format->value = config;
+ memcpy(format->bits, bits, sizeof(format->bits));
+}
+
+static void __perf_pmu_format__load(struct perf_pmu_format *format, FILE *file)
+{
+ void *scanner;
+ int ret;
+
+ ret = perf_pmu_lex_init(&scanner);
+ if (ret)
+ return;
+
+ perf_pmu_set_in(file, scanner);
+ ret = perf_pmu_parse(format, scanner);
+ perf_pmu_lex_destroy(scanner);
+ format->loaded = true;
+}
+
+static void perf_pmu_format__load(struct perf_pmu *pmu, struct perf_pmu_format *format)
+{
+ char path[PATH_MAX];
+ FILE *file = NULL;
+
+ if (format->loaded)
+ return;
+
+ if (!perf_pmu__pathname_scnprintf(path, sizeof(path), pmu->name, "format"))
+ return;
+
+ assert(strlen(path) + strlen(format->name) + 2 < sizeof(path));
+ strcat(path, "/");
+ strcat(path, format->name);
+
+ file = fopen(path, "r");
+ if (!file)
+ return;
+ __perf_pmu_format__load(format, file);
+ fclose(file);
+}
+
/*
* Parse & process all the sysfs attributes located under
* the directory specified in 'dir' parameter.
*/
-int perf_pmu__format_parse(int dirfd, struct list_head *head)
+int perf_pmu__format_parse(struct perf_pmu *pmu, int dirfd, bool eager_load)
{
struct dirent *evt_ent;
DIR *format_dir;
@@ -68,37 +191,35 @@ int perf_pmu__format_parse(int dirfd, struct list_head *head)
if (!format_dir)
return -EINVAL;
- while (!ret && (evt_ent = readdir(format_dir))) {
+ while ((evt_ent = readdir(format_dir)) != NULL) {
+ struct perf_pmu_format *format;
char *name = evt_ent->d_name;
- int fd;
- void *scanner;
- FILE *file;
if (!strcmp(name, ".") || !strcmp(name, ".."))
continue;
-
- ret = -EINVAL;
- fd = openat(dirfd, name, O_RDONLY);
- if (fd < 0)
- break;
-
- file = fdopen(fd, "r");
- if (!file) {
- close(fd);
+ format = perf_pmu__new_format(&pmu->format, name);
+ if (!format) {
+ ret = -ENOMEM;
break;
}
- ret = perf_pmu_lex_init(&scanner);
- if (ret) {
+ if (eager_load) {
+ FILE *file;
+ int fd = openat(dirfd, name, O_RDONLY);
+
+ if (fd < 0) {
+ ret = -errno;
+ break;
+ }
+ file = fdopen(fd, "r");
+ if (!file) {
+ close(fd);
+ break;
+ }
+ __perf_pmu_format__load(format, file);
fclose(file);
- break;
}
-
- perf_pmu_set_in(file, scanner);
- ret = perf_pmu_parse(head, name, scanner);
- perf_pmu_lex_destroy(scanner);
- fclose(file);
}
closedir(format_dir);
@@ -110,7 +231,7 @@ int perf_pmu__format_parse(int dirfd, struct list_head *head)
* located at:
* /sys/bus/event_source/devices/<dev>/format as sysfs group attributes.
*/
-static int pmu_format(int dirfd, const char *name, struct list_head *format)
+static int pmu_format(struct perf_pmu *pmu, int dirfd, const char *name)
{
int fd;
@@ -119,7 +240,7 @@ static int pmu_format(int dirfd, const char *name, struct list_head *format)
return 0;
/* it'll close the fd */
- if (perf_pmu__format_parse(fd, format))
+ if (perf_pmu__format_parse(pmu, fd, /*eager_load=*/false))
return -1;
return 0;
@@ -162,17 +283,21 @@ out:
return ret;
}
-static int perf_pmu__parse_scale(struct perf_pmu_alias *alias, int dirfd, char *name)
+static int perf_pmu__parse_scale(struct perf_pmu *pmu, struct perf_pmu_alias *alias)
{
struct stat st;
ssize_t sret;
+ size_t len;
char scale[128];
int fd, ret = -1;
char path[PATH_MAX];
- scnprintf(path, PATH_MAX, "%s.scale", name);
+ len = perf_pmu__event_source_devices_scnprintf(path, sizeof(path));
+ if (!len)
+ return 0;
+ scnprintf(path + len, sizeof(path) - len, "%s/%s.scale", pmu->name, alias->name);
- fd = openat(dirfd, path, O_RDONLY);
+ fd = open(path, O_RDONLY);
if (fd == -1)
return -1;
@@ -194,15 +319,20 @@ error:
return ret;
}
-static int perf_pmu__parse_unit(struct perf_pmu_alias *alias, int dirfd, char *name)
+static int perf_pmu__parse_unit(struct perf_pmu *pmu, struct perf_pmu_alias *alias)
{
char path[PATH_MAX];
+ size_t len;
ssize_t sret;
int fd;
- scnprintf(path, PATH_MAX, "%s.unit", name);
- fd = openat(dirfd, path, O_RDONLY);
+ len = perf_pmu__event_source_devices_scnprintf(path, sizeof(path));
+ if (!len)
+ return 0;
+ scnprintf(path + len, sizeof(path) - len, "%s/%s.unit", pmu->name, alias->name);
+
+ fd = open(path, O_RDONLY);
if (fd == -1)
return -1;
@@ -225,14 +355,18 @@ error:
}
static int
-perf_pmu__parse_per_pkg(struct perf_pmu_alias *alias, int dirfd, char *name)
+perf_pmu__parse_per_pkg(struct perf_pmu *pmu, struct perf_pmu_alias *alias)
{
char path[PATH_MAX];
+ size_t len;
int fd;
- scnprintf(path, PATH_MAX, "%s.per-pkg", name);
+ len = perf_pmu__event_source_devices_scnprintf(path, sizeof(path));
+ if (!len)
+ return 0;
+ scnprintf(path + len, sizeof(path) - len, "%s/%s.per-pkg", pmu->name, alias->name);
- fd = openat(dirfd, path, O_RDONLY);
+ fd = open(path, O_RDONLY);
if (fd == -1)
return -1;
@@ -242,15 +376,18 @@ perf_pmu__parse_per_pkg(struct perf_pmu_alias *alias, int dirfd, char *name)
return 0;
}
-static int perf_pmu__parse_snapshot(struct perf_pmu_alias *alias,
- int dirfd, char *name)
+static int perf_pmu__parse_snapshot(struct perf_pmu *pmu, struct perf_pmu_alias *alias)
{
char path[PATH_MAX];
+ size_t len;
int fd;
- scnprintf(path, PATH_MAX, "%s.snapshot", name);
+ len = perf_pmu__event_source_devices_scnprintf(path, sizeof(path));
+ if (!len)
+ return 0;
+ scnprintf(path + len, sizeof(path) - len, "%s/%s.snapshot", pmu->name, alias->name);
- fd = openat(dirfd, path, O_RDONLY);
+ fd = open(path, O_RDONLY);
if (fd == -1)
return -1;
@@ -259,46 +396,13 @@ static int perf_pmu__parse_snapshot(struct perf_pmu_alias *alias,
return 0;
}
-static void perf_pmu_assign_str(char *name, const char *field, char **old_str,
- char **new_str)
-{
- if (!*old_str)
- goto set_new;
-
- if (*new_str) { /* Have new string, check with old */
- if (strcasecmp(*old_str, *new_str))
- pr_debug("alias %s differs in field '%s'\n",
- name, field);
- zfree(old_str);
- } else /* Nothing new --> keep old string */
- return;
-set_new:
- *old_str = *new_str;
- *new_str = NULL;
-}
-
-static void perf_pmu_update_alias(struct perf_pmu_alias *old,
- struct perf_pmu_alias *newalias)
-{
- perf_pmu_assign_str(old->name, "desc", &old->desc, &newalias->desc);
- perf_pmu_assign_str(old->name, "long_desc", &old->long_desc,
- &newalias->long_desc);
- perf_pmu_assign_str(old->name, "topic", &old->topic, &newalias->topic);
- perf_pmu_assign_str(old->name, "value", &old->str, &newalias->str);
- old->scale = newalias->scale;
- old->per_pkg = newalias->per_pkg;
- old->snapshot = newalias->snapshot;
- memcpy(old->unit, newalias->unit, sizeof(old->unit));
-}
-
/* Delete an alias entry. */
-void perf_pmu_free_alias(struct perf_pmu_alias *newalias)
+static void perf_pmu_free_alias(struct perf_pmu_alias *newalias)
{
zfree(&newalias->name);
zfree(&newalias->desc);
zfree(&newalias->long_desc);
zfree(&newalias->topic);
- zfree(&newalias->str);
zfree(&newalias->pmu_name);
parse_events_terms__purge(&newalias->terms);
free(newalias);
@@ -314,38 +418,99 @@ static void perf_pmu__del_aliases(struct perf_pmu *pmu)
}
}
-/* Merge an alias, search in alias list. If this name is already
- * present merge both of them to combine all information.
- */
-static bool perf_pmu_merge_alias(struct perf_pmu_alias *newalias,
- struct list_head *alist)
+static struct perf_pmu_alias *perf_pmu__find_alias(struct perf_pmu *pmu,
+ const char *name,
+ bool load)
{
- struct perf_pmu_alias *a;
+ struct perf_pmu_alias *alias;
- list_for_each_entry(a, alist, list) {
- if (!strcasecmp(newalias->name, a->name)) {
- if (newalias->pmu_name && a->pmu_name &&
- !strcasecmp(newalias->pmu_name, a->pmu_name)) {
- continue;
- }
- perf_pmu_update_alias(a, newalias);
- perf_pmu_free_alias(newalias);
- return true;
- }
+ if (load && !pmu->sysfs_aliases_loaded)
+ pmu_aliases_parse(pmu);
+
+ list_for_each_entry(alias, &pmu->aliases, list) {
+ if (!strcasecmp(alias->name, name))
+ return alias;
}
- return false;
+ return NULL;
}
-static int __perf_pmu__new_alias(struct list_head *list, int dirfd, char *name,
- char *desc, char *val, const struct pmu_event *pe)
+static bool assign_str(const char *name, const char *field, char **old_str,
+ const char *new_str)
+{
+ if (!*old_str && new_str) {
+ *old_str = strdup(new_str);
+ return true;
+ }
+
+ if (!new_str || !strcasecmp(*old_str, new_str))
+ return false; /* Nothing to update. */
+
+ pr_debug("alias %s differs in field '%s' ('%s' != '%s')\n",
+ name, field, *old_str, new_str);
+ zfree(old_str);
+ *old_str = strdup(new_str);
+ return true;
+}
+
+static void read_alias_info(struct perf_pmu *pmu, struct perf_pmu_alias *alias)
+{
+ if (!alias->from_sysfs || alias->info_loaded)
+ return;
+
+ /*
+ * load unit name and scale if available
+ */
+ perf_pmu__parse_unit(pmu, alias);
+ perf_pmu__parse_scale(pmu, alias);
+ perf_pmu__parse_per_pkg(pmu, alias);
+ perf_pmu__parse_snapshot(pmu, alias);
+}
+
+struct update_alias_data {
+ struct perf_pmu *pmu;
+ struct perf_pmu_alias *alias;
+};
+
+static int update_alias(const struct pmu_event *pe,
+ const struct pmu_events_table *table __maybe_unused,
+ void *vdata)
+{
+ struct update_alias_data *data = vdata;
+ int ret = 0;
+
+ read_alias_info(data->pmu, data->alias);
+ assign_str(pe->name, "desc", &data->alias->desc, pe->desc);
+ assign_str(pe->name, "long_desc", &data->alias->long_desc, pe->long_desc);
+ assign_str(pe->name, "topic", &data->alias->topic, pe->topic);
+ data->alias->per_pkg = pe->perpkg;
+ if (pe->event) {
+ parse_events_terms__purge(&data->alias->terms);
+ ret = parse_events_terms(&data->alias->terms, pe->event, /*input=*/NULL);
+ }
+ if (!ret && pe->unit) {
+ char *unit;
+
+ ret = perf_pmu__convert_scale(pe->unit, &unit, &data->alias->scale);
+ if (!ret)
+ snprintf(data->alias->unit, sizeof(data->alias->unit), "%s", unit);
+ }
+ return ret;
+}
+
+static int perf_pmu__new_alias(struct perf_pmu *pmu, const char *name,
+ const char *desc, const char *val, FILE *val_fd,
+ const struct pmu_event *pe)
{
- struct parse_events_term *term;
struct perf_pmu_alias *alias;
int ret;
- char newval[256];
const char *long_desc = NULL, *topic = NULL, *unit = NULL, *pmu_name = NULL;
bool deprecated = false, perpkg = false;
+ if (perf_pmu__find_alias(pmu, name, /*load=*/ false)) {
+ /* Alias was already created/loaded. */
+ return 0;
+ }
+
if (pe) {
long_desc = pe->long_desc;
topic = pe->topic;
@@ -366,80 +531,49 @@ static int __perf_pmu__new_alias(struct list_head *list, int dirfd, char *name,
alias->snapshot = false;
alias->deprecated = deprecated;
- ret = parse_events_terms(&alias->terms, val);
+ ret = parse_events_terms(&alias->terms, val, val_fd);
if (ret) {
pr_err("Cannot parse alias %s: %d\n", val, ret);
free(alias);
return ret;
}
- /* Scan event and remove leading zeroes, spaces, newlines, some
- * platforms have terms specified as
- * event=0x0091 (read from files ../<PMU>/events/<FILE>
- * and terms specified as event=0x91 (read from JSON files).
- *
- * Rebuild string to make alias->str member comparable.
- */
- memset(newval, 0, sizeof(newval));
- ret = 0;
- list_for_each_entry(term, &alias->terms, list) {
- if (ret)
- ret += scnprintf(newval + ret, sizeof(newval) - ret,
- ",");
- if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM)
- ret += scnprintf(newval + ret, sizeof(newval) - ret,
- "%s=%#x", term->config, term->val.num);
- else if (term->type_val == PARSE_EVENTS__TERM_TYPE_STR)
- ret += scnprintf(newval + ret, sizeof(newval) - ret,
- "%s=%s", term->config, term->val.str);
- }
-
alias->name = strdup(name);
- if (dirfd >= 0) {
- /*
- * load unit name and scale if available
- */
- perf_pmu__parse_unit(alias, dirfd, name);
- perf_pmu__parse_scale(alias, dirfd, name);
- perf_pmu__parse_per_pkg(alias, dirfd, name);
- perf_pmu__parse_snapshot(alias, dirfd, name);
- }
-
alias->desc = desc ? strdup(desc) : NULL;
alias->long_desc = long_desc ? strdup(long_desc) :
desc ? strdup(desc) : NULL;
alias->topic = topic ? strdup(topic) : NULL;
+ alias->pmu_name = pmu_name ? strdup(pmu_name) : NULL;
if (unit) {
- if (perf_pmu__convert_scale(unit, (char **)&unit, &alias->scale) < 0)
+ if (perf_pmu__convert_scale(unit, (char **)&unit, &alias->scale) < 0) {
+ perf_pmu_free_alias(alias);
return -1;
+ }
snprintf(alias->unit, sizeof(alias->unit), "%s", unit);
}
- alias->str = strdup(newval);
- alias->pmu_name = pmu_name ? strdup(pmu_name) : NULL;
-
- if (!perf_pmu_merge_alias(alias, list))
- list_add_tail(&alias->list, list);
+ if (!pe) {
+ /* Update an event from sysfs with json data. */
+ struct update_alias_data data = {
+ .pmu = pmu,
+ .alias = alias,
+ };
+
+ alias->from_sysfs = true;
+ if (pmu->events_table) {
+ if (pmu_events_table__find_event(pmu->events_table, pmu, name,
+ update_alias, &data) == 0)
+ pmu->loaded_json_aliases++;
+ }
+ }
+ if (!pe)
+ pmu->sysfs_aliases++;
+ else
+ pmu->loaded_json_aliases++;
+ list_add_tail(&alias->list, &pmu->aliases);
return 0;
}
-static int perf_pmu__new_alias(struct list_head *list, int dirfd, char *name, FILE *file)
-{
- char buf[256];
- int ret;
-
- ret = fread(buf, 1, sizeof(buf), file);
- if (ret == 0)
- return -EINVAL;
-
- buf[ret] = 0;
-
- /* Remove trailing newline from sysfs file */
- strim(buf);
-
- return __perf_pmu__new_alias(list, dirfd, name, NULL, buf, NULL);
-}
-
static inline bool pmu_alias_info_file(char *name)
{
size_t len;
@@ -458,18 +592,33 @@ static inline bool pmu_alias_info_file(char *name)
}
/*
- * Process all the sysfs attributes located under the directory
- * specified in 'dir' parameter.
+ * Reading the pmu event aliases definition, which should be located at:
+ * /sys/bus/event_source/devices/<dev>/events as sysfs group attributes.
*/
-static int pmu_aliases_parse(int dirfd, struct list_head *head)
+static int pmu_aliases_parse(struct perf_pmu *pmu)
{
+ char path[PATH_MAX];
struct dirent *evt_ent;
DIR *event_dir;
- int fd;
+ size_t len;
+ int fd, dir_fd;
- event_dir = fdopendir(dirfd);
- if (!event_dir)
+ len = perf_pmu__event_source_devices_scnprintf(path, sizeof(path));
+ if (!len)
+ return 0;
+ scnprintf(path + len, sizeof(path) - len, "%s/events", pmu->name);
+
+ dir_fd = open(path, O_DIRECTORY);
+ if (dir_fd == -1) {
+ pmu->sysfs_aliases_loaded = true;
+ return 0;
+ }
+
+ event_dir = fdopendir(dir_fd);
+ if (!event_dir){
+ close (dir_fd);
return -EINVAL;
+ }
while ((evt_ent = readdir(event_dir))) {
char *name = evt_ent->d_name;
@@ -484,7 +633,7 @@ static int pmu_aliases_parse(int dirfd, struct list_head *head)
if (pmu_alias_info_file(name))
continue;
- fd = openat(dirfd, name, O_RDONLY);
+ fd = openat(dir_fd, name, O_RDONLY);
if (fd == -1) {
pr_debug("Cannot open %s\n", name);
continue;
@@ -495,31 +644,15 @@ static int pmu_aliases_parse(int dirfd, struct list_head *head)
continue;
}
- if (perf_pmu__new_alias(head, dirfd, name, file) < 0)
+ if (perf_pmu__new_alias(pmu, name, /*desc=*/ NULL,
+ /*val=*/ NULL, file, /*pe=*/ NULL) < 0)
pr_debug("Cannot set up %s\n", name);
fclose(file);
}
closedir(event_dir);
- return 0;
-}
-
-/*
- * Reading the pmu event aliases definition, which should be located at:
- * /sys/bus/event_source/devices/<dev>/events as sysfs group attributes.
- */
-static int pmu_aliases(int dirfd, const char *name, struct list_head *head)
-{
- int fd;
-
- fd = perf_pmu__pathname_fd(dirfd, name, "events", O_DIRECTORY);
- if (fd < 0)
- return 0;
-
- /* it'll close the fd */
- if (pmu_aliases_parse(fd, head))
- return -1;
-
+ close (dir_fd);
+ pmu->sysfs_aliases_loaded = true;
return 0;
}
@@ -741,28 +874,13 @@ out:
return res;
}
-struct pmu_add_cpu_aliases_map_data {
- /* List being added to. */
- struct list_head *head;
- /* If a pmu_event lacks a given PMU the default used. */
- char *default_pmu_name;
- /* The PMU that we're searching for events for. */
- struct perf_pmu *pmu;
-};
-
static int pmu_add_cpu_aliases_map_callback(const struct pmu_event *pe,
const struct pmu_events_table *table __maybe_unused,
void *vdata)
{
- struct pmu_add_cpu_aliases_map_data *data = vdata;
- const char *pname = pe->pmu ?: data->default_pmu_name;
+ struct perf_pmu *pmu = vdata;
- if (!strcmp(pname, data->pmu->name) ||
- (data->pmu->is_uncore && pmu_uncore_alias_match(pname, data->pmu->name))) {
- /* need type casts to override 'const' */
- __perf_pmu__new_alias(data->head, -1, (char *)pe->name, (char *)pe->desc,
- (char *)pe->event, pe);
- }
+ perf_pmu__new_alias(pmu, pe->name, pe->desc, pe->event, /*val_fd=*/ NULL, pe);
return 0;
}
@@ -770,68 +888,51 @@ static int pmu_add_cpu_aliases_map_callback(const struct pmu_event *pe,
* From the pmu_events_table, find the events that correspond to the given
* PMU and add them to the list 'head'.
*/
-void pmu_add_cpu_aliases_table(struct list_head *head, struct perf_pmu *pmu,
- const struct pmu_events_table *table)
+void pmu_add_cpu_aliases_table(struct perf_pmu *pmu, const struct pmu_events_table *table)
{
- struct pmu_add_cpu_aliases_map_data data = {
- .head = head,
- .default_pmu_name = perf_pmus__default_pmu_name(),
- .pmu = pmu,
- };
-
- pmu_events_table_for_each_event(table, pmu_add_cpu_aliases_map_callback, &data);
- free(data.default_pmu_name);
+ pmu_events_table__for_each_event(table, pmu, pmu_add_cpu_aliases_map_callback, pmu);
}
-static void pmu_add_cpu_aliases(struct list_head *head, struct perf_pmu *pmu)
+static void pmu_add_cpu_aliases(struct perf_pmu *pmu)
{
- const struct pmu_events_table *table;
+ if (!pmu->events_table)
+ return;
- table = perf_pmu__find_events_table(pmu);
- if (!table)
+ if (pmu->cpu_aliases_added)
return;
- pmu_add_cpu_aliases_table(head, pmu, table);
+ pmu_add_cpu_aliases_table(pmu, pmu->events_table);
+ pmu->cpu_aliases_added = true;
}
-struct pmu_sys_event_iter_data {
- struct list_head *head;
- struct perf_pmu *pmu;
-};
-
static int pmu_add_sys_aliases_iter_fn(const struct pmu_event *pe,
const struct pmu_events_table *table __maybe_unused,
- void *data)
+ void *vdata)
{
- struct pmu_sys_event_iter_data *idata = data;
- struct perf_pmu *pmu = idata->pmu;
+ struct perf_pmu *pmu = vdata;
if (!pe->compat || !pe->pmu)
return 0;
if (!strcmp(pmu->id, pe->compat) &&
pmu_uncore_alias_match(pe->pmu, pmu->name)) {
- __perf_pmu__new_alias(idata->head, -1,
- (char *)pe->name,
- (char *)pe->desc,
- (char *)pe->event,
- pe);
+ perf_pmu__new_alias(pmu,
+ pe->name,
+ pe->desc,
+ pe->event,
+ /*val_fd=*/ NULL,
+ pe);
}
return 0;
}
-void pmu_add_sys_aliases(struct list_head *head, struct perf_pmu *pmu)
+void pmu_add_sys_aliases(struct perf_pmu *pmu)
{
- struct pmu_sys_event_iter_data idata = {
- .head = head,
- .pmu = pmu,
- };
-
if (!pmu->id)
return;
- pmu_for_each_sys_event(pmu_add_sys_aliases_iter_fn, &idata);
+ pmu_for_each_sys_event(pmu_add_sys_aliases_iter_fn, pmu);
}
struct perf_event_attr * __weak
@@ -840,13 +941,13 @@ perf_pmu__get_default_config(struct perf_pmu *pmu __maybe_unused)
return NULL;
}
-char * __weak
+const char * __weak
pmu_find_real_name(const char *name)
{
- return (char *)name;
+ return name;
}
-char * __weak
+const char * __weak
pmu_find_alias_name(const char *name __maybe_unused)
{
return NULL;
@@ -863,40 +964,41 @@ static int pmu_max_precise(int dirfd, struct perf_pmu *pmu)
struct perf_pmu *perf_pmu__lookup(struct list_head *pmus, int dirfd, const char *lookup_name)
{
struct perf_pmu *pmu;
- LIST_HEAD(format);
- LIST_HEAD(aliases);
__u32 type;
- char *name = pmu_find_real_name(lookup_name);
- char *alias_name;
-
- /*
- * The pmu data we store & need consists of the pmu
- * type value and format definitions. Load both right
- * now.
- */
- if (pmu_format(dirfd, name, &format))
- return NULL;
-
- /*
- * Check the aliases first to avoid unnecessary work.
- */
- if (pmu_aliases(dirfd, name, &aliases))
- return NULL;
+ const char *name = pmu_find_real_name(lookup_name);
+ const char *alias_name;
pmu = zalloc(sizeof(*pmu));
if (!pmu)
return NULL;
- pmu->is_core = is_pmu_core(name);
- pmu->cpus = pmu_cpumask(dirfd, name, pmu->is_core);
pmu->name = strdup(name);
if (!pmu->name)
goto err;
- /* Read type, and ensure that type value is successfully assigned (return 1) */
+ /*
+ * Read type early to fail fast if a lookup name isn't a PMU. Ensure
+ * that type value is successfully assigned (return 1).
+ */
if (perf_pmu__scan_file_at(pmu, dirfd, "type", "%u", &type) != 1)
goto err;
+ INIT_LIST_HEAD(&pmu->format);
+ INIT_LIST_HEAD(&pmu->aliases);
+ INIT_LIST_HEAD(&pmu->caps);
+
+ /*
+ * The pmu data we store & need consists of the pmu
+ * type value and format definitions. Load both right
+ * now.
+ */
+ if (pmu_format(pmu, dirfd, name)) {
+ free(pmu);
+ return NULL;
+ }
+ pmu->is_core = is_pmu_core(name);
+ pmu->cpus = pmu_cpumask(dirfd, name, pmu->is_core);
+
alias_name = pmu_find_alias_name(name);
if (alias_name) {
pmu->alias_name = strdup(alias_name);
@@ -909,14 +1011,8 @@ struct perf_pmu *perf_pmu__lookup(struct list_head *pmus, int dirfd, const char
if (pmu->is_uncore)
pmu->id = pmu_id(name);
pmu->max_precise = pmu_max_precise(dirfd, pmu);
- pmu_add_cpu_aliases(&aliases, pmu);
- pmu_add_sys_aliases(&aliases, pmu);
-
- INIT_LIST_HEAD(&pmu->format);
- INIT_LIST_HEAD(&pmu->aliases);
- INIT_LIST_HEAD(&pmu->caps);
- list_splice(&format, &pmu->format);
- list_splice(&aliases, &pmu->aliases);
+ pmu->events_table = perf_pmu__find_events_table(pmu);
+ pmu_add_sys_aliases(pmu);
list_add_tail(&pmu->list, pmus);
pmu->default_config = perf_pmu__get_default_config(pmu);
@@ -966,13 +1062,15 @@ void perf_pmu__warn_invalid_formats(struct perf_pmu *pmu)
if (pmu == &perf_pmu__fake)
return;
- list_for_each_entry(format, &pmu->format, list)
+ list_for_each_entry(format, &pmu->format, list) {
+ perf_pmu_format__load(pmu, format);
if (format->value >= PERF_PMU_FORMAT_VALUE_CONFIG_END) {
pr_warning("WARNING: '%s' format '%s' requires 'perf_event_attr::config%d'"
"which is not supported by this version of perf!\n",
pmu->name, format->name, format->value);
return;
}
+ }
}
bool evsel__is_aux_event(const struct evsel *evsel)
@@ -1000,7 +1098,7 @@ void evsel__set_config_if_unset(struct perf_pmu *pmu, struct evsel *evsel,
if (term)
user_bits = term->val.cfg_chg;
- bits = perf_pmu__format_bits(&pmu->format, config_name);
+ bits = perf_pmu__format_bits(pmu, config_name);
/* Do nothing if the user changed the value */
if (bits & user_bits)
@@ -1023,9 +1121,9 @@ pmu_find_format(struct list_head *formats, const char *name)
return NULL;
}
-__u64 perf_pmu__format_bits(struct list_head *formats, const char *name)
+__u64 perf_pmu__format_bits(struct perf_pmu *pmu, const char *name)
{
- struct perf_pmu_format *format = pmu_find_format(formats, name);
+ struct perf_pmu_format *format = pmu_find_format(&pmu->format, name);
__u64 bits = 0;
int fbit;
@@ -1038,13 +1136,14 @@ __u64 perf_pmu__format_bits(struct list_head *formats, const char *name)
return bits;
}
-int perf_pmu__format_type(struct list_head *formats, const char *name)
+int perf_pmu__format_type(struct perf_pmu *pmu, const char *name)
{
- struct perf_pmu_format *format = pmu_find_format(formats, name);
+ struct perf_pmu_format *format = pmu_find_format(&pmu->format, name);
if (!format)
return -1;
+ perf_pmu_format__load(pmu, format);
return format->value;
}
@@ -1135,8 +1234,7 @@ error:
* Setup one of config[12] attr members based on the
* user input data - term parameter.
*/
-static int pmu_config_term(const char *pmu_name,
- struct list_head *formats,
+static int pmu_config_term(struct perf_pmu *pmu,
struct perf_event_attr *attr,
struct parse_events_term *term,
struct list_head *head_terms,
@@ -1160,15 +1258,15 @@ static int pmu_config_term(const char *pmu_name,
if (parse_events__is_hardcoded_term(term))
return 0;
- format = pmu_find_format(formats, term->config);
+ format = pmu_find_format(&pmu->format, term->config);
if (!format) {
- char *pmu_term = pmu_formats_string(formats);
+ char *pmu_term = pmu_formats_string(&pmu->format);
char *unknown_term;
char *help_msg;
if (asprintf(&unknown_term,
"unknown term '%s' for pmu '%s'",
- term->config, pmu_name) < 0)
+ term->config, pmu->name) < 0)
unknown_term = NULL;
help_msg = parse_events_formats_error_string(pmu_term);
if (err) {
@@ -1182,7 +1280,7 @@ static int pmu_config_term(const char *pmu_name,
free(pmu_term);
return -EINVAL;
}
-
+ perf_pmu_format__load(pmu, format);
switch (format->value) {
case PERF_PMU_FORMAT_VALUE_CONFIG:
vp = &attr->config;
@@ -1259,7 +1357,7 @@ static int pmu_config_term(const char *pmu_name,
return 0;
}
-int perf_pmu__config_terms(const char *pmu_name, struct list_head *formats,
+int perf_pmu__config_terms(struct perf_pmu *pmu,
struct perf_event_attr *attr,
struct list_head *head_terms,
bool zero, struct parse_events_error *err)
@@ -1267,8 +1365,7 @@ int perf_pmu__config_terms(const char *pmu_name, struct list_head *formats,
struct parse_events_term *term;
list_for_each_entry(term, head_terms, list) {
- if (pmu_config_term(pmu_name, formats, attr, term, head_terms,
- zero, err))
+ if (pmu_config_term(pmu, attr, term, head_terms, zero, err))
return -EINVAL;
}
@@ -1286,25 +1383,25 @@ int perf_pmu__config(struct perf_pmu *pmu, struct perf_event_attr *attr,
{
bool zero = !!pmu->default_config;
- return perf_pmu__config_terms(pmu->name, &pmu->format, attr,
- head_terms, zero, err);
+ return perf_pmu__config_terms(pmu, attr, head_terms, zero, err);
}
static struct perf_pmu_alias *pmu_find_alias(struct perf_pmu *pmu,
struct parse_events_term *term)
{
struct perf_pmu_alias *alias;
- char *name;
+ const char *name;
if (parse_events__is_hardcoded_term(term))
return NULL;
if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM) {
- if (term->val.num != 1)
+ if (!term->no_value)
return NULL;
if (pmu_find_format(&pmu->format, term->config))
return NULL;
name = term->config;
+
} else if (term->type_val == PARSE_EVENTS__TERM_TYPE_STR) {
if (strcasecmp(term->config, "event"))
return NULL;
@@ -1313,26 +1410,51 @@ static struct perf_pmu_alias *pmu_find_alias(struct perf_pmu *pmu,
return NULL;
}
- list_for_each_entry(alias, &pmu->aliases, list) {
- if (!strcasecmp(alias->name, name))
- return alias;
+ alias = perf_pmu__find_alias(pmu, name, /*load=*/ true);
+ if (alias || pmu->cpu_aliases_added)
+ return alias;
+
+ /* Alias doesn't exist, try to get it from the json events. */
+ if (pmu->events_table &&
+ pmu_events_table__find_event(pmu->events_table, pmu, name,
+ pmu_add_cpu_aliases_map_callback,
+ pmu) == 0) {
+ alias = perf_pmu__find_alias(pmu, name, /*load=*/ false);
}
- return NULL;
+ return alias;
}
-static int check_info_data(struct perf_pmu_alias *alias,
- struct perf_pmu_info *info)
+static int check_info_data(struct perf_pmu *pmu,
+ struct perf_pmu_alias *alias,
+ struct perf_pmu_info *info,
+ struct parse_events_error *err,
+ int column)
{
+ read_alias_info(pmu, alias);
/*
* Only one term in event definition can
* define unit, scale and snapshot, fail
* if there's more than one.
*/
- if ((info->unit && alias->unit[0]) ||
- (info->scale && alias->scale) ||
- (info->snapshot && alias->snapshot))
+ if (info->unit && alias->unit[0]) {
+ parse_events_error__handle(err, column,
+ strdup("Attempt to set event's unit twice"),
+ NULL);
return -EINVAL;
+ }
+ if (info->scale && alias->scale) {
+ parse_events_error__handle(err, column,
+ strdup("Attempt to set event's scale twice"),
+ NULL);
+ return -EINVAL;
+ }
+ if (info->snapshot && alias->snapshot) {
+ parse_events_error__handle(err, column,
+ strdup("Attempt to set event snapshot twice"),
+ NULL);
+ return -EINVAL;
+ }
if (alias->unit[0])
info->unit = alias->unit;
@@ -1351,7 +1473,7 @@ static int check_info_data(struct perf_pmu_alias *alias,
* defined for the alias
*/
int perf_pmu__check_alias(struct perf_pmu *pmu, struct list_head *head_terms,
- struct perf_pmu_info *info)
+ struct perf_pmu_info *info, struct parse_events_error *err)
{
struct parse_events_term *term, *h;
struct perf_pmu_alias *alias;
@@ -1372,10 +1494,14 @@ int perf_pmu__check_alias(struct perf_pmu *pmu, struct list_head *head_terms,
if (!alias)
continue;
ret = pmu_alias_terms(alias, &term->list);
- if (ret)
+ if (ret) {
+ parse_events_error__handle(err, term->err_term,
+ strdup("Failure to duplicate terms"),
+ NULL);
return ret;
+ }
- ret = check_info_data(alias, info);
+ ret = check_info_data(pmu, alias, info, err, term->err_term);
if (ret)
return ret;
@@ -1400,36 +1526,36 @@ int perf_pmu__check_alias(struct perf_pmu *pmu, struct list_head *head_terms,
return 0;
}
-int perf_pmu__new_format(struct list_head *list, char *name,
- int config, unsigned long *bits)
-{
- struct perf_pmu_format *format;
+struct find_event_args {
+ const char *event;
+ void *state;
+ pmu_event_callback cb;
+};
- format = zalloc(sizeof(*format));
- if (!format)
- return -ENOMEM;
+static int find_event_callback(void *state, struct pmu_event_info *info)
+{
+ struct find_event_args *args = state;
- format->name = strdup(name);
- format->value = config;
- memcpy(format->bits, bits, sizeof(format->bits));
+ if (!strcmp(args->event, info->name))
+ return args->cb(args->state, info);
- list_add_tail(&format->list, list);
return 0;
}
-void perf_pmu__set_format(unsigned long *bits, long from, long to)
+int perf_pmu__find_event(struct perf_pmu *pmu, const char *event, void *state, pmu_event_callback cb)
{
- long b;
-
- if (!to)
- to = from;
+ struct find_event_args args = {
+ .event = event,
+ .state = state,
+ .cb = cb,
+ };
- memset(bits, 0, BITS_TO_BYTES(PERF_PMU_FORMAT_BITS));
- for (b = from; b <= to; b++)
- __set_bit(b, bits);
+ /* Sub-optimal, but function is only used by tests. */
+ return perf_pmu__for_each_event(pmu, /*skip_duplicate_pmus=*/ false,
+ &args, find_event_callback);
}
-void perf_pmu__del_formats(struct list_head *formats)
+static void perf_pmu__del_formats(struct list_head *formats)
{
struct perf_pmu_format *fmt, *tmp;
@@ -1466,15 +1592,145 @@ bool perf_pmu__auto_merge_stats(const struct perf_pmu *pmu)
return !pmu->is_core || perf_pmus__num_core_pmus() == 1;
}
-bool perf_pmu__have_event(const struct perf_pmu *pmu, const char *name)
+bool perf_pmu__have_event(struct perf_pmu *pmu, const char *name)
{
- struct perf_pmu_alias *alias;
+ if (perf_pmu__find_alias(pmu, name, /*load=*/ true) != NULL)
+ return true;
+ if (pmu->cpu_aliases_added || !pmu->events_table)
+ return false;
+ return pmu_events_table__find_event(pmu->events_table, pmu, name, NULL, NULL) == 0;
+}
- list_for_each_entry(alias, &pmu->aliases, list) {
- if (!strcmp(alias->name, name))
- return true;
+size_t perf_pmu__num_events(struct perf_pmu *pmu)
+{
+ size_t nr;
+
+ if (!pmu->sysfs_aliases_loaded)
+ pmu_aliases_parse(pmu);
+
+ nr = pmu->sysfs_aliases;
+
+ if (pmu->cpu_aliases_added)
+ nr += pmu->loaded_json_aliases;
+ else if (pmu->events_table)
+ nr += pmu_events_table__num_events(pmu->events_table, pmu) - pmu->loaded_json_aliases;
+
+ return pmu->selectable ? nr + 1 : nr;
+}
+
+static int sub_non_neg(int a, int b)
+{
+ if (b > a)
+ return 0;
+ return a - b;
+}
+
+static char *format_alias(char *buf, int len, const struct perf_pmu *pmu,
+ const struct perf_pmu_alias *alias, bool skip_duplicate_pmus)
+{
+ struct parse_events_term *term;
+ int pmu_name_len = skip_duplicate_pmus
+ ? pmu_name_len_no_suffix(pmu->name, /*num=*/NULL)
+ : (int)strlen(pmu->name);
+ int used = snprintf(buf, len, "%.*s/%s", pmu_name_len, pmu->name, alias->name);
+
+ list_for_each_entry(term, &alias->terms, list) {
+ if (term->type_val == PARSE_EVENTS__TERM_TYPE_STR)
+ used += snprintf(buf + used, sub_non_neg(len, used),
+ ",%s=%s", term->config,
+ term->val.str);
}
- return false;
+
+ if (sub_non_neg(len, used) > 0) {
+ buf[used] = '/';
+ used++;
+ }
+ if (sub_non_neg(len, used) > 0) {
+ buf[used] = '\0';
+ used++;
+ } else
+ buf[len - 1] = '\0';
+
+ return buf;
+}
+
+int perf_pmu__for_each_event(struct perf_pmu *pmu, bool skip_duplicate_pmus,
+ void *state, pmu_event_callback cb)
+{
+ char buf[1024];
+ struct perf_pmu_alias *event;
+ struct pmu_event_info info = {
+ .pmu = pmu,
+ };
+ int ret = 0;
+ struct strbuf sb;
+
+ strbuf_init(&sb, /*hint=*/ 0);
+ pmu_add_cpu_aliases(pmu);
+ list_for_each_entry(event, &pmu->aliases, list) {
+ size_t buf_used;
+
+ info.pmu_name = event->pmu_name ?: pmu->name;
+ info.alias = NULL;
+ if (event->desc) {
+ info.name = event->name;
+ buf_used = 0;
+ } else {
+ info.name = format_alias(buf, sizeof(buf), pmu, event,
+ skip_duplicate_pmus);
+ if (pmu->is_core) {
+ info.alias = info.name;
+ info.name = event->name;
+ }
+ buf_used = strlen(buf) + 1;
+ }
+ info.scale_unit = NULL;
+ if (strlen(event->unit) || event->scale != 1.0) {
+ info.scale_unit = buf + buf_used;
+ buf_used += snprintf(buf + buf_used, sizeof(buf) - buf_used,
+ "%G%s", event->scale, event->unit) + 1;
+ }
+ info.desc = event->desc;
+ info.long_desc = event->long_desc;
+ info.encoding_desc = buf + buf_used;
+ parse_events_term__to_strbuf(&event->terms, &sb);
+ buf_used += snprintf(buf + buf_used, sizeof(buf) - buf_used,
+ "%s/%s/", info.pmu_name, sb.buf) + 1;
+ info.topic = event->topic;
+ info.str = sb.buf;
+ info.deprecated = event->deprecated;
+ ret = cb(state, &info);
+ if (ret)
+ goto out;
+ strbuf_setlen(&sb, /*len=*/ 0);
+ }
+ if (pmu->selectable) {
+ info.name = buf;
+ snprintf(buf, sizeof(buf), "%s//", pmu->name);
+ info.alias = NULL;
+ info.scale_unit = NULL;
+ info.desc = NULL;
+ info.long_desc = NULL;
+ info.encoding_desc = NULL;
+ info.topic = NULL;
+ info.pmu_name = pmu->name;
+ info.deprecated = false;
+ ret = cb(state, &info);
+ }
+out:
+ strbuf_release(&sb);
+ return ret;
+}
+
+bool pmu__name_match(const struct perf_pmu *pmu, const char *pmu_name)
+{
+ return !strcmp(pmu->name, pmu_name) ||
+ (pmu->is_uncore && pmu_uncore_alias_match(pmu_name, pmu->name)) ||
+ /*
+ * jevents and tests use default_core as a marker for any core
+ * PMU as the PMU name varies across architectures.
+ */
+ (pmu->is_core && !strcmp(pmu_name, "default_core"));
}
bool perf_pmu__is_software(const struct perf_pmu *pmu)
@@ -1710,7 +1966,7 @@ void perf_pmu__warn_invalid_config(struct perf_pmu *pmu, __u64 config,
name ?: "N/A", buf, config_name, config);
}
-int perf_pmu__match(char *pattern, char *name, char *tok)
+int perf_pmu__match(const char *pattern, const char *name, const char *tok)
{
if (!name)
return -1;
@@ -1756,17 +2012,19 @@ int perf_pmu__event_source_devices_fd(void)
* then pathname will be filled with
* "/sys/bus/event_source/devices/cs_etm/format"
*
- * Return 0 if the sysfs mountpoint couldn't be found or if no
- * characters were written.
+ * Return 0 if the sysfs mountpoint couldn't be found, if no characters were
+ * written or if the buffer size is exceeded.
*/
int perf_pmu__pathname_scnprintf(char *buf, size_t size,
const char *pmu_name, const char *filename)
{
- char base_path[PATH_MAX];
+ size_t len;
- if (!perf_pmu__event_source_devices_scnprintf(base_path, sizeof(base_path)))
+ len = perf_pmu__event_source_devices_scnprintf(buf, size);
+ if (!len || (len + strlen(pmu_name) + strlen(filename) + 1) >= size)
return 0;
- return scnprintf(buf, size, "%s%s/%s", base_path, pmu_name, filename);
+
+ return scnprintf(buf + len, size - len, "%s/%s", pmu_name, filename);
}
int perf_pmu__pathname_fd(int dirfd, const char *pmu_name, const char *filename, int flags)
@@ -1788,5 +2046,23 @@ void perf_pmu__delete(struct perf_pmu *pmu)
zfree(&pmu->default_config);
zfree(&pmu->name);
zfree(&pmu->alias_name);
+ zfree(&pmu->id);
free(pmu);
}
+
+struct perf_pmu *pmu__find_core_pmu(void)
+{
+ struct perf_pmu *pmu = NULL;
+
+ while ((pmu = perf_pmus__scan_core(pmu))) {
+ /*
+ * The cpumap should cover all CPUs. Otherwise, some CPUs may
+ * not support some events or have different event IDs.
+ */
+ if (RC_CHK_ACCESS(pmu->cpus)->nr != cpu__max_cpu().cpu)
+ return NULL;
+
+ return pmu;
+ }
+ return NULL;
+}
diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h
index 6b414cecbad2..6a4e170c61d6 100644
--- a/tools/perf/util/pmu.h
+++ b/tools/perf/util/pmu.h
@@ -39,7 +39,7 @@ struct perf_pmu_caps {
*/
struct perf_pmu {
/** @name: The name of the PMU such as "cpu". */
- char *name;
+ const char *name;
/**
* @alias_name: Optional alternate name for the PMU determined in
* architecture specific code.
@@ -49,7 +49,7 @@ struct perf_pmu {
* @id: Optional PMU identifier read from
* <sysfs>/bus/event_source/devices/<name>/identifier.
*/
- char *id;
+ const char *id;
/**
* @type: Perf event attributed type value, read from
* <sysfs>/bus/event_source/devices/<name>/type.
@@ -114,6 +114,21 @@ struct perf_pmu {
* from json events in pmu-events.c.
*/
struct list_head aliases;
+ /**
+ * @events_table: The events table for json events in pmu-events.c.
+ */
+ const struct pmu_events_table *events_table;
+ /** @sysfs_aliases: Number of sysfs aliases loaded. */
+ uint32_t sysfs_aliases;
+ /** @sysfs_aliases: Number of json event aliases loaded. */
+ uint32_t loaded_json_aliases;
+ /** @sysfs_aliases_loaded: Are sysfs aliases loaded from disk? */
+ bool sysfs_aliases_loaded;
+ /**
+ * @cpu_aliases_added: Have all json events table entries for the PMU
+ * been added?
+ */
+ bool cpu_aliases_added;
/** @caps_initialized: Has the list caps been initialized? */
bool caps_initialized;
/** @nr_caps: The length of the list caps. */
@@ -158,88 +173,49 @@ struct perf_pmu_info {
bool snapshot;
};
-#define UNIT_MAX_LEN 31 /* max length for event unit name */
-
-/**
- * struct perf_pmu_alias - An event either read from sysfs or builtin in
- * pmu-events.c, created by parsing the pmu-events json files.
- */
-struct perf_pmu_alias {
- /** @name: Name of the event like "mem-loads". */
- char *name;
- /** @desc: Optional short description of the event. */
- char *desc;
- /** @long_desc: Optional long description. */
- char *long_desc;
- /**
- * @topic: Optional topic such as cache or pipeline, particularly for
- * json events.
- */
- char *topic;
- /**
- * @str: Comma separated parameter list like
- * "event=0xcd,umask=0x1,ldlat=0x3".
- */
- char *str;
- /** @terms: Owned list of the original parsed parameters. */
- struct list_head terms;
- /** @list: List element of struct perf_pmu aliases. */
- struct list_head list;
- /** @unit: Units for the event, such as bytes or cache lines. */
- char unit[UNIT_MAX_LEN+1];
- /** @scale: Value to scale read counter values by. */
- double scale;
- /**
- * @per_pkg: Does the file
- * <sysfs>/bus/event_source/devices/<pmu_name>/events/<name>.per-pkg or
- * equivalent json value exist and have the value 1.
- */
- bool per_pkg;
- /**
- * @snapshot: Does the file
- * <sysfs>/bus/event_source/devices/<pmu_name>/events/<name>.snapshot
- * exist and have the value 1.
- */
- bool snapshot;
- /**
- * @deprecated: Is the event hidden and so not shown in perf list by
- * default.
- */
+struct pmu_event_info {
+ const struct perf_pmu *pmu;
+ const char *name;
+ const char* alias;
+ const char *scale_unit;
+ const char *desc;
+ const char *long_desc;
+ const char *encoding_desc;
+ const char *topic;
+ const char *pmu_name;
+ const char *str;
bool deprecated;
- /**
- * @pmu_name: The name copied from the json struct pmu_event. This can
- * differ from the PMU name as it won't have suffixes.
- */
- char *pmu_name;
};
-void pmu_add_sys_aliases(struct list_head *head, struct perf_pmu *pmu);
+typedef int (*pmu_event_callback)(void *state, struct pmu_event_info *info);
+
+void pmu_add_sys_aliases(struct perf_pmu *pmu);
int perf_pmu__config(struct perf_pmu *pmu, struct perf_event_attr *attr,
struct list_head *head_terms,
struct parse_events_error *error);
-int perf_pmu__config_terms(const char *pmu_name, struct list_head *formats,
+int perf_pmu__config_terms(struct perf_pmu *pmu,
struct perf_event_attr *attr,
struct list_head *head_terms,
bool zero, struct parse_events_error *error);
-__u64 perf_pmu__format_bits(struct list_head *formats, const char *name);
-int perf_pmu__format_type(struct list_head *formats, const char *name);
+__u64 perf_pmu__format_bits(struct perf_pmu *pmu, const char *name);
+int perf_pmu__format_type(struct perf_pmu *pmu, const char *name);
int perf_pmu__check_alias(struct perf_pmu *pmu, struct list_head *head_terms,
- struct perf_pmu_info *info);
-struct list_head *perf_pmu__alias(struct perf_pmu *pmu,
- struct list_head *head_terms);
-void perf_pmu_error(struct list_head *list, char *name, void *scanner, char const *msg);
+ struct perf_pmu_info *info, struct parse_events_error *err);
+int perf_pmu__find_event(struct perf_pmu *pmu, const char *event, void *state, pmu_event_callback cb);
-int perf_pmu__new_format(struct list_head *list, char *name,
- int config, unsigned long *bits);
-void perf_pmu__set_format(unsigned long *bits, long from, long to);
-int perf_pmu__format_parse(int dirfd, struct list_head *head);
-void perf_pmu__del_formats(struct list_head *formats);
+int perf_pmu__format_parse(struct perf_pmu *pmu, int dirfd, bool eager_load);
+void perf_pmu_format__set_value(void *format, int config, unsigned long *bits);
bool perf_pmu__has_format(const struct perf_pmu *pmu, const char *name);
bool is_pmu_core(const char *name);
bool perf_pmu__supports_legacy_cache(const struct perf_pmu *pmu);
bool perf_pmu__auto_merge_stats(const struct perf_pmu *pmu);
-bool perf_pmu__have_event(const struct perf_pmu *pmu, const char *name);
+bool perf_pmu__have_event(struct perf_pmu *pmu, const char *name);
+size_t perf_pmu__num_events(struct perf_pmu *pmu);
+int perf_pmu__for_each_event(struct perf_pmu *pmu, bool skip_duplicate_pmus,
+ void *state, pmu_event_callback cb);
+bool pmu__name_match(const struct perf_pmu *pmu, const char *pmu_name);
+
/**
* perf_pmu_is_software - is the PMU a software PMU as in it uses the
* perf_sw_context in the kernel?
@@ -258,13 +234,12 @@ bool perf_pmu__file_exists(struct perf_pmu *pmu, const char *name);
int perf_pmu__test(void);
struct perf_event_attr *perf_pmu__get_default_config(struct perf_pmu *pmu);
-void pmu_add_cpu_aliases_table(struct list_head *head, struct perf_pmu *pmu,
+void pmu_add_cpu_aliases_table(struct perf_pmu *pmu,
const struct pmu_events_table *table);
char *perf_pmu__getcpuid(struct perf_pmu *pmu);
const struct pmu_events_table *pmu_events_table__find(void);
const struct pmu_metrics_table *pmu_metrics_table__find(void);
-void perf_pmu_free_alias(struct perf_pmu_alias *alias);
int perf_pmu__convert_scale(const char *scale, char **end, double *sval);
@@ -275,10 +250,10 @@ void perf_pmu__warn_invalid_config(struct perf_pmu *pmu, __u64 config,
const char *config_name);
void perf_pmu__warn_invalid_formats(struct perf_pmu *pmu);
-int perf_pmu__match(char *pattern, char *name, char *tok);
+int perf_pmu__match(const char *pattern, const char *name, const char *tok);
-char *pmu_find_real_name(const char *name);
-char *pmu_find_alias_name(const char *name);
+const char *pmu_find_real_name(const char *name);
+const char *pmu_find_alias_name(const char *name);
double perf_pmu__cpu_slots_per_cycle(void);
int perf_pmu__event_source_devices_scnprintf(char *pathname, size_t size);
int perf_pmu__pathname_scnprintf(char *buf, size_t size,
@@ -289,5 +264,6 @@ int perf_pmu__pathname_fd(int dirfd, const char *pmu_name, const char *filename,
struct perf_pmu *perf_pmu__lookup(struct list_head *pmus, int dirfd, const char *lookup_name);
struct perf_pmu *perf_pmu__create_placeholder_core_pmu(struct list_head *core_pmus);
void perf_pmu__delete(struct perf_pmu *pmu);
+struct perf_pmu *pmu__find_core_pmu(void);
#endif /* __PMU_H */
diff --git a/tools/perf/util/pmu.y b/tools/perf/util/pmu.y
index dff4e892ac4d..600c8c158c8e 100644
--- a/tools/perf/util/pmu.y
+++ b/tools/perf/util/pmu.y
@@ -1,6 +1,5 @@
%define api.pure full
-%parse-param {struct list_head *format}
-%parse-param {char *name}
+%parse-param {void *format}
%parse-param {void *scanner}
%lex-param {void* scanner}
@@ -11,6 +10,9 @@
#include <linux/bitmap.h>
#include <string.h>
#include "pmu.h"
+#include "pmu-bison.h"
+
+int perf_pmu_lex(YYSTYPE * yylval_param , void *yyscanner);
#define ABORT_ON(val) \
do { \
@@ -18,6 +20,20 @@ do { \
YYABORT; \
} while (0)
+static void perf_pmu_error(void *format, void *scanner, const char *msg);
+
+static void perf_pmu__set_format(unsigned long *bits, long from, long to)
+{
+ long b;
+
+ if (!to)
+ to = from;
+
+ memset(bits, 0, BITS_TO_BYTES(PERF_PMU_FORMAT_BITS));
+ for (b = from; b <= to; b++)
+ __set_bit(b, bits);
+}
+
%}
%token PP_CONFIG
@@ -42,16 +58,12 @@ format_term
format_term:
PP_CONFIG ':' bits
{
- ABORT_ON(perf_pmu__new_format(format, name,
- PERF_PMU_FORMAT_VALUE_CONFIG,
- $3));
+ perf_pmu_format__set_value(format, PERF_PMU_FORMAT_VALUE_CONFIG, $3);
}
|
PP_CONFIG PP_VALUE ':' bits
{
- ABORT_ON(perf_pmu__new_format(format, name,
- $2,
- $4));
+ perf_pmu_format__set_value(format, $2, $4);
}
bits:
@@ -78,9 +90,8 @@ PP_VALUE
%%
-void perf_pmu_error(struct list_head *list __maybe_unused,
- char *name __maybe_unused,
- void *scanner __maybe_unused,
- char const *msg __maybe_unused)
+static void perf_pmu_error(void *format __maybe_unused,
+ void *scanner __maybe_unused,
+ const char *msg __maybe_unused)
{
}
diff --git a/tools/perf/util/pmus.c b/tools/perf/util/pmus.c
index c58ba9fb6a36..6631367c756f 100644
--- a/tools/perf/util/pmus.c
+++ b/tools/perf/util/pmus.c
@@ -1,8 +1,11 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/list.h>
+#include <linux/list_sort.h>
+#include <linux/string.h>
#include <linux/zalloc.h>
#include <subcmd/pager.h>
#include <sys/types.h>
+#include <ctype.h>
#include <dirent.h>
#include <pthread.h>
#include <string.h>
@@ -33,6 +36,31 @@ static LIST_HEAD(other_pmus);
static bool read_sysfs_core_pmus;
static bool read_sysfs_all_pmus;
+int pmu_name_len_no_suffix(const char *str, unsigned long *num)
+{
+ int orig_len, len;
+
+ orig_len = len = strlen(str);
+
+ /* Non-uncore PMUs have their full length, for example, i915. */
+ if (!strstarts(str, "uncore_"))
+ return len;
+
+ /*
+ * Count trailing digits and '_', if '_{num}' suffix isn't present use
+ * the full length.
+ */
+ while (len > 0 && isdigit(str[len - 1]))
+ len--;
+
+ if (len > 0 && len != orig_len && str[len - 1] == '_') {
+ if (num)
+ *num = strtoul(&str[len], NULL, 10);
+ return len - 1;
+ }
+ return orig_len;
+}
+
void perf_pmus__destroy(void)
{
struct perf_pmu *pmu, *tmp;
@@ -122,6 +150,25 @@ static struct perf_pmu *perf_pmu__find2(int dirfd, const char *name)
return perf_pmu__lookup(core_pmu ? &core_pmus : &other_pmus, dirfd, name);
}
+static int pmus_cmp(void *priv __maybe_unused,
+ const struct list_head *lhs, const struct list_head *rhs)
+{
+ unsigned long lhs_num = 0, rhs_num = 0;
+ struct perf_pmu *lhs_pmu = container_of(lhs, struct perf_pmu, list);
+ struct perf_pmu *rhs_pmu = container_of(rhs, struct perf_pmu, list);
+ const char *lhs_pmu_name = lhs_pmu->name ?: "";
+ const char *rhs_pmu_name = rhs_pmu->name ?: "";
+ int lhs_pmu_name_len = pmu_name_len_no_suffix(lhs_pmu_name, &lhs_num);
+ int rhs_pmu_name_len = pmu_name_len_no_suffix(rhs_pmu_name, &rhs_num);
+ int ret = strncmp(lhs_pmu_name, rhs_pmu_name,
+ lhs_pmu_name_len < rhs_pmu_name_len ? lhs_pmu_name_len : rhs_pmu_name_len);
+
+ if (lhs_pmu_name_len != rhs_pmu_name_len || ret != 0 || lhs_pmu_name_len == 0)
+ return ret;
+
+ return lhs_num < rhs_num ? -1 : (lhs_num > rhs_num ? 1 : 0);
+}
+
/* Add all pmus in sysfs to pmu list: */
static void pmu_read_sysfs(bool core_only)
{
@@ -156,6 +203,8 @@ static void pmu_read_sysfs(bool core_only)
if (!perf_pmu__create_placeholder_core_pmu(&core_pmus))
pr_err("Failure to set up any core PMUs\n");
}
+ list_sort(NULL, &core_pmus, pmus_cmp);
+ list_sort(NULL, &other_pmus, pmus_cmp);
if (!list_empty(&core_pmus)) {
read_sysfs_core_pmus = true;
if (!core_only)
@@ -227,6 +276,43 @@ struct perf_pmu *perf_pmus__scan_core(struct perf_pmu *pmu)
return NULL;
}
+static struct perf_pmu *perf_pmus__scan_skip_duplicates(struct perf_pmu *pmu)
+{
+ bool use_core_pmus = !pmu || pmu->is_core;
+ int last_pmu_name_len = 0;
+ const char *last_pmu_name = (pmu && pmu->name) ? pmu->name : "";
+
+ if (!pmu) {
+ pmu_read_sysfs(/*core_only=*/false);
+ pmu = list_prepare_entry(pmu, &core_pmus, list);
+ } else
+ last_pmu_name_len = pmu_name_len_no_suffix(pmu->name ?: "", NULL);
+
+ if (use_core_pmus) {
+ list_for_each_entry_continue(pmu, &core_pmus, list) {
+ int pmu_name_len = pmu_name_len_no_suffix(pmu->name ?: "", /*num=*/NULL);
+
+ if (last_pmu_name_len == pmu_name_len &&
+ !strncmp(last_pmu_name, pmu->name ?: "", pmu_name_len))
+ continue;
+
+ return pmu;
+ }
+ pmu = NULL;
+ pmu = list_prepare_entry(pmu, &other_pmus, list);
+ }
+ list_for_each_entry_continue(pmu, &other_pmus, list) {
+ int pmu_name_len = pmu_name_len_no_suffix(pmu->name ?: "", /*num=*/NULL);
+
+ if (last_pmu_name_len == pmu_name_len &&
+ !strncmp(last_pmu_name, pmu->name ?: "", pmu_name_len))
+ continue;
+
+ return pmu;
+ }
+ return NULL;
+}
+
const struct perf_pmu *perf_pmus__pmu_for_pmu_filter(const char *str)
{
struct perf_pmu *pmu = NULL;
@@ -258,219 +344,153 @@ int __weak perf_pmus__num_mem_pmus(void)
struct sevent {
/** PMU for event. */
const struct perf_pmu *pmu;
- /**
- * Optional event for name, desc, etc. If not present then this is a
- * selectable PMU and the event name is shown as "//".
- */
- const struct perf_pmu_alias *event;
- /** Is the PMU for the CPU? */
- bool is_cpu;
+ const char *name;
+ const char* alias;
+ const char *scale_unit;
+ const char *desc;
+ const char *long_desc;
+ const char *encoding_desc;
+ const char *topic;
+ const char *pmu_name;
+ bool deprecated;
};
static int cmp_sevent(const void *a, const void *b)
{
const struct sevent *as = a;
const struct sevent *bs = b;
- const char *a_pmu_name = NULL, *b_pmu_name = NULL;
- const char *a_name = "//", *a_desc = NULL, *a_topic = "";
- const char *b_name = "//", *b_desc = NULL, *b_topic = "";
+ bool a_iscpu, b_iscpu;
int ret;
- if (as->event) {
- a_name = as->event->name;
- a_desc = as->event->desc;
- a_topic = as->event->topic ?: "";
- a_pmu_name = as->event->pmu_name;
- }
- if (bs->event) {
- b_name = bs->event->name;
- b_desc = bs->event->desc;
- b_topic = bs->event->topic ?: "";
- b_pmu_name = bs->event->pmu_name;
- }
/* Put extra events last. */
- if (!!a_desc != !!b_desc)
- return !!a_desc - !!b_desc;
+ if (!!as->desc != !!bs->desc)
+ return !!as->desc - !!bs->desc;
/* Order by topics. */
- ret = strcmp(a_topic, b_topic);
+ ret = strcmp(as->topic ?: "", bs->topic ?: "");
if (ret)
return ret;
/* Order CPU core events to be first */
- if (as->is_cpu != bs->is_cpu)
- return as->is_cpu ? -1 : 1;
+ a_iscpu = as->pmu ? as->pmu->is_core : true;
+ b_iscpu = bs->pmu ? bs->pmu->is_core : true;
+ if (a_iscpu != b_iscpu)
+ return a_iscpu ? -1 : 1;
/* Order by PMU name. */
if (as->pmu != bs->pmu) {
- a_pmu_name = a_pmu_name ?: (as->pmu->name ?: "");
- b_pmu_name = b_pmu_name ?: (bs->pmu->name ?: "");
- ret = strcmp(a_pmu_name, b_pmu_name);
+ ret = strcmp(as->pmu_name ?: "", bs->pmu_name ?: "");
if (ret)
return ret;
}
/* Order by event name. */
- return strcmp(a_name, b_name);
+ return strcmp(as->name, bs->name);
}
-static bool pmu_alias_is_duplicate(struct sevent *alias_a,
- struct sevent *alias_b)
+static bool pmu_alias_is_duplicate(struct sevent *a, struct sevent *b)
{
- const char *a_pmu_name = NULL, *b_pmu_name = NULL;
- const char *a_name = "//", *b_name = "//";
-
-
- if (alias_a->event) {
- a_name = alias_a->event->name;
- a_pmu_name = alias_a->event->pmu_name;
- }
- if (alias_b->event) {
- b_name = alias_b->event->name;
- b_pmu_name = alias_b->event->pmu_name;
- }
-
/* Different names -> never duplicates */
- if (strcmp(a_name, b_name))
+ if (strcmp(a->name ?: "//", b->name ?: "//"))
return false;
/* Don't remove duplicates for different PMUs */
- a_pmu_name = a_pmu_name ?: (alias_a->pmu->name ?: "");
- b_pmu_name = b_pmu_name ?: (alias_b->pmu->name ?: "");
- return strcmp(a_pmu_name, b_pmu_name) == 0;
+ return strcmp(a->pmu_name, b->pmu_name) == 0;
}
-static int sub_non_neg(int a, int b)
-{
- if (b > a)
- return 0;
- return a - b;
-}
+struct events_callback_state {
+ struct sevent *aliases;
+ size_t aliases_len;
+ size_t index;
+};
-static char *format_alias(char *buf, int len, const struct perf_pmu *pmu,
- const struct perf_pmu_alias *alias)
+static int perf_pmus__print_pmu_events__callback(void *vstate,
+ struct pmu_event_info *info)
{
- struct parse_events_term *term;
- int used = snprintf(buf, len, "%s/%s", pmu->name, alias->name);
-
- list_for_each_entry(term, &alias->terms, list) {
- if (term->type_val == PARSE_EVENTS__TERM_TYPE_STR)
- used += snprintf(buf + used, sub_non_neg(len, used),
- ",%s=%s", term->config,
- term->val.str);
- }
+ struct events_callback_state *state = vstate;
+ struct sevent *s;
- if (sub_non_neg(len, used) > 0) {
- buf[used] = '/';
- used++;
+ if (state->index >= state->aliases_len) {
+ pr_err("Unexpected event %s/%s/\n", info->pmu->name, info->name);
+ return 1;
}
- if (sub_non_neg(len, used) > 0) {
- buf[used] = '\0';
- used++;
- } else
- buf[len - 1] = '\0';
-
- return buf;
+ s = &state->aliases[state->index];
+ s->pmu = info->pmu;
+#define COPY_STR(str) s->str = info->str ? strdup(info->str) : NULL
+ COPY_STR(name);
+ COPY_STR(alias);
+ COPY_STR(scale_unit);
+ COPY_STR(desc);
+ COPY_STR(long_desc);
+ COPY_STR(encoding_desc);
+ COPY_STR(topic);
+ COPY_STR(pmu_name);
+#undef COPY_STR
+ s->deprecated = info->deprecated;
+ state->index++;
+ return 0;
}
void perf_pmus__print_pmu_events(const struct print_callbacks *print_cb, void *print_state)
{
struct perf_pmu *pmu;
- struct perf_pmu_alias *event;
- char buf[1024];
int printed = 0;
- int len, j;
+ int len;
struct sevent *aliases;
+ struct events_callback_state state;
+ bool skip_duplicate_pmus = print_cb->skip_duplicate_pmus(print_state);
+ struct perf_pmu *(*scan_fn)(struct perf_pmu *);
+
+ if (skip_duplicate_pmus)
+ scan_fn = perf_pmus__scan_skip_duplicates;
+ else
+ scan_fn = perf_pmus__scan;
pmu = NULL;
len = 0;
- while ((pmu = perf_pmus__scan(pmu)) != NULL) {
- list_for_each_entry(event, &pmu->aliases, list)
- len++;
- if (pmu->selectable)
- len++;
- }
+ while ((pmu = scan_fn(pmu)) != NULL)
+ len += perf_pmu__num_events(pmu);
+
aliases = zalloc(sizeof(struct sevent) * len);
if (!aliases) {
pr_err("FATAL: not enough memory to print PMU events\n");
return;
}
pmu = NULL;
- j = 0;
- while ((pmu = perf_pmus__scan(pmu)) != NULL) {
- bool is_cpu = pmu->is_core;
-
- list_for_each_entry(event, &pmu->aliases, list) {
- aliases[j].event = event;
- aliases[j].pmu = pmu;
- aliases[j].is_cpu = is_cpu;
- j++;
- }
- if (pmu->selectable) {
- aliases[j].event = NULL;
- aliases[j].pmu = pmu;
- aliases[j].is_cpu = is_cpu;
- j++;
- }
+ state = (struct events_callback_state) {
+ .aliases = aliases,
+ .aliases_len = len,
+ .index = 0,
+ };
+ while ((pmu = scan_fn(pmu)) != NULL) {
+ perf_pmu__for_each_event(pmu, skip_duplicate_pmus, &state,
+ perf_pmus__print_pmu_events__callback);
}
- len = j;
qsort(aliases, len, sizeof(struct sevent), cmp_sevent);
- for (j = 0; j < len; j++) {
- const char *name, *alias = NULL, *scale_unit = NULL,
- *desc = NULL, *long_desc = NULL,
- *encoding_desc = NULL, *topic = NULL,
- *pmu_name = NULL;
- bool deprecated = false;
- size_t buf_used;
-
+ for (int j = 0; j < len; j++) {
/* Skip duplicates */
if (j > 0 && pmu_alias_is_duplicate(&aliases[j], &aliases[j - 1]))
continue;
- if (!aliases[j].event) {
- /* A selectable event. */
- pmu_name = aliases[j].pmu->name;
- buf_used = snprintf(buf, sizeof(buf), "%s//", pmu_name) + 1;
- name = buf;
- } else {
- if (aliases[j].event->desc) {
- name = aliases[j].event->name;
- buf_used = 0;
- } else {
- name = format_alias(buf, sizeof(buf), aliases[j].pmu,
- aliases[j].event);
- if (aliases[j].is_cpu) {
- alias = name;
- name = aliases[j].event->name;
- }
- buf_used = strlen(buf) + 1;
- }
- pmu_name = aliases[j].event->pmu_name ?: (aliases[j].pmu->name ?: "");
- if (strlen(aliases[j].event->unit) || aliases[j].event->scale != 1.0) {
- scale_unit = buf + buf_used;
- buf_used += snprintf(buf + buf_used, sizeof(buf) - buf_used,
- "%G%s", aliases[j].event->scale,
- aliases[j].event->unit) + 1;
- }
- desc = aliases[j].event->desc;
- long_desc = aliases[j].event->long_desc;
- topic = aliases[j].event->topic;
- encoding_desc = buf + buf_used;
- buf_used += snprintf(buf + buf_used, sizeof(buf) - buf_used,
- "%s/%s/", pmu_name, aliases[j].event->str) + 1;
- deprecated = aliases[j].event->deprecated;
- }
print_cb->print_event(print_state,
- pmu_name,
- topic,
- name,
- alias,
- scale_unit,
- deprecated,
+ aliases[j].pmu_name,
+ aliases[j].topic,
+ aliases[j].name,
+ aliases[j].alias,
+ aliases[j].scale_unit,
+ aliases[j].deprecated,
"Kernel PMU event",
- desc,
- long_desc,
- encoding_desc);
+ aliases[j].desc,
+ aliases[j].long_desc,
+ aliases[j].encoding_desc);
+ zfree(&aliases[j].name);
+ zfree(&aliases[j].alias);
+ zfree(&aliases[j].scale_unit);
+ zfree(&aliases[j].desc);
+ zfree(&aliases[j].long_desc);
+ zfree(&aliases[j].encoding_desc);
+ zfree(&aliases[j].topic);
+ zfree(&aliases[j].pmu_name);
}
if (printed && pager_in_use())
printf("\n");
diff --git a/tools/perf/util/pmus.h b/tools/perf/util/pmus.h
index a21464432d0f..4c67153ac257 100644
--- a/tools/perf/util/pmus.h
+++ b/tools/perf/util/pmus.h
@@ -5,6 +5,8 @@
struct perf_pmu;
struct print_callbacks;
+int pmu_name_len_no_suffix(const char *str, unsigned long *num);
+
void perf_pmus__destroy(void);
struct perf_pmu *perf_pmus__find(const char *name);
diff --git a/tools/perf/util/print-events.h b/tools/perf/util/print-events.h
index d7fab411e75c..bf4290bef0cd 100644
--- a/tools/perf/util/print-events.h
+++ b/tools/perf/util/print-events.h
@@ -26,6 +26,7 @@ struct print_callbacks {
const char *expr,
const char *threshold,
const char *unit);
+ bool (*skip_duplicate_pmus)(void *print_state);
};
/** Print all events, the default when no options are specified. */
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index 16822a8a540f..1a5b7fa459b2 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -53,6 +53,8 @@
bool probe_event_dry_run; /* Dry run flag */
struct probe_conf probe_conf = { .magic_num = DEFAULT_PROBE_MAGIC_NUM };
+static char *synthesize_perf_probe_point(struct perf_probe_point *pp);
+
#define semantic_error(msg ...) pr_err("Semantic error :" msg)
int e_snprintf(char *str, size_t size, const char *format, ...)
@@ -961,8 +963,9 @@ static int try_to_find_probe_trace_events(struct perf_probe_event *pev,
debuginfo__delete(dinfo);
if (ntevs == 0) { /* No error but failed to find probe point. */
- pr_warning("Probe point '%s' not found.\n",
- synthesize_perf_probe_point(&pev->point));
+ char *probe_point = synthesize_perf_probe_point(&pev->point);
+ pr_warning("Probe point '%s' not found.\n", probe_point);
+ free(probe_point);
return -ENODEV;
} else if (ntevs < 0) {
/* Error path : ntevs < 0 */
@@ -2009,7 +2012,7 @@ out:
}
/* Compose only probe point (not argument) */
-char *synthesize_perf_probe_point(struct perf_probe_point *pp)
+static char *synthesize_perf_probe_point(struct perf_probe_point *pp)
{
struct strbuf buf;
char *tmp, *ret = NULL;
@@ -2062,14 +2065,18 @@ char *synthesize_perf_probe_command(struct perf_probe_event *pev)
goto out;
tmp = synthesize_perf_probe_point(&pev->point);
- if (!tmp || strbuf_addstr(&buf, tmp) < 0)
+ if (!tmp || strbuf_addstr(&buf, tmp) < 0) {
+ free(tmp);
goto out;
+ }
free(tmp);
for (i = 0; i < pev->nargs; i++) {
tmp = synthesize_perf_probe_arg(pev->args + i);
- if (!tmp || strbuf_addf(&buf, " %s", tmp) < 0)
+ if (!tmp || strbuf_addf(&buf, " %s", tmp) < 0) {
+ free(tmp);
goto out;
+ }
free(tmp);
}
@@ -2800,13 +2807,18 @@ static void warn_uprobe_event_compat(struct probe_trace_event *tev)
if (!tev->uprobes || tev->nargs == 0 || !buf)
goto out;
- for (i = 0; i < tev->nargs; i++)
- if (strglobmatch(tev->args[i].value, "[$@+-]*")) {
- pr_warning("Please upgrade your kernel to at least "
- "3.14 to have access to feature %s\n",
+ for (i = 0; i < tev->nargs; i++) {
+ if (strchr(tev->args[i].value, '@')) {
+ pr_warning("%s accesses a variable by symbol name, but that is not supported for user application probe.\n",
tev->args[i].value);
break;
}
+ if (strglobmatch(tev->args[i].value, "[$+-]*")) {
+ pr_warning("Please upgrade your kernel to at least 3.14 to have access to feature %s\n",
+ tev->args[i].value);
+ break;
+ }
+ }
out:
free(buf);
}
diff --git a/tools/perf/util/probe-event.h b/tools/perf/util/probe-event.h
index 8ad5b1579f1d..7e3b6c3d1f74 100644
--- a/tools/perf/util/probe-event.h
+++ b/tools/perf/util/probe-event.h
@@ -137,7 +137,6 @@ int parse_probe_trace_command(const char *cmd, struct probe_trace_event *tev);
char *synthesize_perf_probe_command(struct perf_probe_event *pev);
char *synthesize_probe_trace_command(struct probe_trace_event *tev);
char *synthesize_perf_probe_arg(struct perf_probe_arg *pa);
-char *synthesize_perf_probe_point(struct perf_probe_point *pp);
int perf_probe_event__copy(struct perf_probe_event *dst,
struct perf_probe_event *src);
diff --git a/tools/perf/util/python-ext-sources b/tools/perf/util/python-ext-sources
index d4c9b4cd35ef..26e1c8d973ea 100644
--- a/tools/perf/util/python-ext-sources
+++ b/tools/perf/util/python-ext-sources
@@ -40,3 +40,12 @@ util/rwsem.c
util/hashmap.c
util/perf_regs.c
util/fncache.c
+util/perf-regs-arch/perf_regs_aarch64.c
+util/perf-regs-arch/perf_regs_arm.c
+util/perf-regs-arch/perf_regs_csky.c
+util/perf-regs-arch/perf_regs_loongarch.c
+util/perf-regs-arch/perf_regs_mips.c
+util/perf-regs-arch/perf_regs_powerpc.c
+util/perf-regs-arch/perf_regs_riscv.c
+util/perf-regs-arch/perf_regs_s390.c
+util/perf-regs-arch/perf_regs_x86.c
diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c
index 4eed8ec23994..c29f5f0bb552 100644
--- a/tools/perf/util/python.c
+++ b/tools/perf/util/python.c
@@ -113,6 +113,11 @@ bool evsel__is_aux_event(const struct evsel *evsel __maybe_unused)
return false;
}
+bool perf_pmus__supports_extended_type(void)
+{
+ return false;
+}
+
/*
* Add this one here not to drag util/metricgroup.c
*/
diff --git a/tools/perf/util/s390-sample-raw.c b/tools/perf/util/s390-sample-raw.c
index c10b891dbad6..115b16edb451 100644
--- a/tools/perf/util/s390-sample-raw.c
+++ b/tools/perf/util/s390-sample-raw.c
@@ -27,7 +27,7 @@
#include "color.h"
#include "sample-raw.h"
#include "s390-cpumcf-kernel.h"
-#include "pmu-events/pmu-events.h"
+#include "util/pmu.h"
#include "util/sample.h"
static size_t ctrset_size(struct cf_ctrset_entry *set)
@@ -132,56 +132,58 @@ static int get_counterset_start(int setnr)
struct get_counter_name_data {
int wanted;
- const char *result;
+ char *result;
};
-static int get_counter_name_callback(const struct pmu_event *evp,
- const struct pmu_events_table *table __maybe_unused,
- void *vdata)
+static int get_counter_name_callback(void *vdata, struct pmu_event_info *info)
{
struct get_counter_name_data *data = vdata;
int rc, event_nr;
+ const char *event_str;
- if (evp->name == NULL || evp->event == NULL)
+ if (info->str == NULL)
return 0;
- rc = sscanf(evp->event, "event=%x", &event_nr);
+
+ event_str = strstr(info->str, "event=");
+ if (!event_str)
+ return 0;
+
+ rc = sscanf(event_str, "event=%x", &event_nr);
if (rc == 1 && event_nr == data->wanted) {
- data->result = evp->name;
+ data->result = strdup(info->name);
return 1; /* Terminate the search. */
}
return 0;
}
-/* Scan the PMU table and extract the logical name of a counter from the
- * PMU events table. Input is the counter set and counter number with in the
- * set. Construct the event number and use this as key. If they match return
- * the name of this counter.
+/* Scan the PMU and extract the logical name of a counter from the event. Input
+ * is the counter set and counter number with in the set. Construct the event
+ * number and use this as key. If they match return the name of this counter.
* If no match is found a NULL pointer is returned.
*/
-static const char *get_counter_name(int set, int nr, const struct pmu_events_table *table)
+static char *get_counter_name(int set, int nr, struct perf_pmu *pmu)
{
struct get_counter_name_data data = {
.wanted = get_counterset_start(set) + nr,
.result = NULL,
};
- if (!table)
+ if (!pmu)
return NULL;
- pmu_events_table_for_each_event(table, get_counter_name_callback, &data);
+ perf_pmu__for_each_event(pmu, /*skip_duplicate_pmus=*/ true,
+ &data, get_counter_name_callback);
return data.result;
}
-static void s390_cpumcfdg_dump(struct perf_sample *sample)
+static void s390_cpumcfdg_dump(struct perf_pmu *pmu, struct perf_sample *sample)
{
size_t i, len = sample->raw_size, offset = 0;
unsigned char *buf = sample->raw_data;
const char *color = PERF_COLOR_BLUE;
struct cf_ctrset_entry *cep, ce;
- const struct pmu_events_table *table;
u64 *p;
- table = pmu_events_table__find();
while (offset < len) {
cep = (struct cf_ctrset_entry *)(buf + offset);
@@ -199,11 +201,12 @@ static void s390_cpumcfdg_dump(struct perf_sample *sample)
color_fprintf(stdout, color, " [%#08zx] Counterset:%d"
" Counters:%d\n", offset, ce.set, ce.ctr);
for (i = 0, p = (u64 *)(cep + 1); i < ce.ctr; ++i, ++p) {
- const char *ev_name = get_counter_name(ce.set, i, table);
+ char *ev_name = get_counter_name(ce.set, i, pmu);
color_fprintf(stdout, color,
"\tCounter:%03d %s Value:%#018lx\n", i,
ev_name ?: "<unknown>", be64_to_cpu(*p));
+ free(ev_name);
}
offset += ctrset_size(&ce);
}
@@ -216,14 +219,14 @@ static void s390_cpumcfdg_dump(struct perf_sample *sample)
*/
void evlist__s390_sample_raw(struct evlist *evlist, union perf_event *event, struct perf_sample *sample)
{
- struct evsel *ev_bc000;
+ struct evsel *evsel;
if (event->header.type != PERF_RECORD_SAMPLE)
return;
- ev_bc000 = evlist__event2evsel(evlist, event);
- if (ev_bc000 == NULL ||
- ev_bc000->core.attr.config != PERF_EVENT_CPUM_CF_DIAG)
+ evsel = evlist__event2evsel(evlist, event);
+ if (evsel == NULL ||
+ evsel->core.attr.config != PERF_EVENT_CPUM_CF_DIAG)
return;
/* Display raw data on screen */
@@ -231,5 +234,5 @@ void evlist__s390_sample_raw(struct evlist *evlist, union perf_event *event, str
pr_err("Invalid counter set data encountered\n");
return;
}
- s390_cpumcfdg_dump(sample);
+ s390_cpumcfdg_dump(evsel->pmu, sample);
}
diff --git a/tools/perf/util/scripting-engines/Build b/tools/perf/util/scripting-engines/Build
index c220fec97032..586b94e90f4e 100644
--- a/tools/perf/util/scripting-engines/Build
+++ b/tools/perf/util/scripting-engines/Build
@@ -5,4 +5,5 @@ perf-$(CONFIG_LIBPYTHON) += trace-event-python.o
CFLAGS_trace-event-perl.o += $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow -Wno-nested-externs -Wno-undef -Wno-switch-default -Wno-bad-function-cast -Wno-declaration-after-statement -Wno-switch-enum
-CFLAGS_trace-event-python.o += $(PYTHON_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow -Wno-deprecated-declarations -Wno-switch-enum
+# -Wno-declaration-after-statement: The python headers have mixed code with declarations (decls after asserts, for instance)
+CFLAGS_trace-event-python.o += $(PYTHON_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow -Wno-deprecated-declarations -Wno-switch-enum -Wno-declaration-after-statement
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 00d18c74c090..1e9aa8ed15b6 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -833,8 +833,8 @@ static void perf_event__hdr_attr_swap(union perf_event *event,
perf_event__attr_swap(&event->attr.attr);
size = event->header.size;
- size -= (void *)&event->attr.id - (void *)event;
- mem_bswap_64(event->attr.id, size);
+ size -= perf_record_header_attr_id(event) - (void *)event;
+ mem_bswap_64(perf_record_header_attr_id(event), size);
}
static void perf_event__event_update_swap(union perf_event *event,
diff --git a/tools/perf/util/setup.py b/tools/perf/util/setup.py
index 869738fc06c3..79d5e2955f85 100644
--- a/tools/perf/util/setup.py
+++ b/tools/perf/util/setup.py
@@ -66,6 +66,9 @@ if cc_is_clang:
else:
cflags += ['-Wno-cast-function-type' ]
+# The python headers have mixed code with declarations (decls after asserts, for instance)
+cflags += [ "-Wno-declaration-after-statement" ]
+
src_perf = getenv('srctree') + '/tools/perf'
build_lib = getenv('PYTHON_EXTBUILD_LIB')
build_tmp = getenv('PYTHON_EXTBUILD_TMP')
diff --git a/tools/perf/util/stat-display.c b/tools/perf/util/stat-display.c
index d45d5dcb0e2b..afe6db8e7bf4 100644
--- a/tools/perf/util/stat-display.c
+++ b/tools/perf/util/stat-display.c
@@ -578,7 +578,7 @@ static void print_metric_only_csv(struct perf_stat_config *config __maybe_unused
if (!valid_only_metric(unit))
return;
unit = fixunit(tbuf, os->evsel, unit);
- snprintf(buf, sizeof buf, fmt, val);
+ snprintf(buf, sizeof(buf), fmt ?: "", val);
ends = vals = skip_spaces(buf);
while (isdigit(*ends) || *ends == '.')
ends++;
@@ -600,7 +600,7 @@ static void print_metric_only_json(struct perf_stat_config *config __maybe_unuse
if (!valid_only_metric(unit))
return;
unit = fixunit(tbuf, os->evsel, unit);
- snprintf(buf, sizeof(buf), fmt, val);
+ snprintf(buf, sizeof(buf), fmt ?: "", val);
ends = vals = skip_spaces(buf);
while (isdigit(*ends) || *ends == '.')
ends++;
diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c
index 967e583392c7..ec3506042217 100644
--- a/tools/perf/util/stat.c
+++ b/tools/perf/util/stat.c
@@ -729,7 +729,7 @@ size_t perf_event__fprintf_stat_round(union perf_event *event, FILE *fp)
size_t perf_event__fprintf_stat_config(union perf_event *event, FILE *fp)
{
- struct perf_stat_config sc;
+ struct perf_stat_config sc = {};
size_t ret;
perf_event__read_stat_config(&sc, &event->stat_config);
diff --git a/tools/perf/util/svghelper.c b/tools/perf/util/svghelper.c
index 5c62d3118c41..0e4dc31c6c9c 100644
--- a/tools/perf/util/svghelper.c
+++ b/tools/perf/util/svghelper.c
@@ -331,7 +331,7 @@ static char *cpu_model(void)
file = fopen("/proc/cpuinfo", "r");
if (file) {
while (fgets(buf, 255, file)) {
- if (strstr(buf, "model name")) {
+ if (strcasestr(buf, "model name")) {
strlcpy(cpu_m, &buf[13], 255);
break;
}
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index 8bd466d1c2bd..95e99c332d7e 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -1440,6 +1440,8 @@ static int dso__process_kernel_symbol(struct dso *dso, struct map *map,
curr_dso->kernel = dso->kernel;
curr_dso->long_name = dso->long_name;
curr_dso->long_name_len = dso->long_name_len;
+ curr_dso->binary_type = dso->binary_type;
+ curr_dso->adjust_symbols = dso->adjust_symbols;
curr_map = map__new2(start, curr_dso);
dso__put(curr_dso);
if (curr_map == NULL)
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index f849f9ef68e6..3f36675b7c8f 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -2204,15 +2204,20 @@ int dso__load_vmlinux(struct dso *dso, struct map *map,
if (symsrc__init(&ss, dso, symfs_vmlinux, symtab_type))
return -1;
+ /*
+ * dso__load_sym() may copy 'dso' which will result in the copies having
+ * an incorrect long name unless we set it here first.
+ */
+ dso__set_long_name(dso, vmlinux, vmlinux_allocated);
+ if (dso->kernel == DSO_SPACE__KERNEL_GUEST)
+ dso->binary_type = DSO_BINARY_TYPE__GUEST_VMLINUX;
+ else
+ dso->binary_type = DSO_BINARY_TYPE__VMLINUX;
+
err = dso__load_sym(dso, map, &ss, &ss, 0);
symsrc__destroy(&ss);
if (err > 0) {
- if (dso->kernel == DSO_SPACE__KERNEL_GUEST)
- dso->binary_type = DSO_BINARY_TYPE__GUEST_VMLINUX;
- else
- dso->binary_type = DSO_BINARY_TYPE__VMLINUX;
- dso__set_long_name(dso, vmlinux, vmlinux_allocated);
dso__set_loaded(dso);
pr_debug("Using %s for symbols\n", symfs_vmlinux);
}
diff --git a/tools/perf/util/synthetic-events.c b/tools/perf/util/synthetic-events.c
index 45714a2785fd..a0579c7d7b9e 100644
--- a/tools/perf/util/synthetic-events.c
+++ b/tools/perf/util/synthetic-events.c
@@ -2145,7 +2145,7 @@ int perf_event__synthesize_attr(struct perf_tool *tool, struct perf_event_attr *
return -ENOMEM;
ev->attr.attr = *attr;
- memcpy(ev->attr.id, id, ids * sizeof(u64));
+ memcpy(perf_record_header_attr_id(ev), id, ids * sizeof(u64));
ev->attr.header.type = PERF_RECORD_HEADER_ATTR;
ev->attr.header.size = (u16)size;
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
index 0b166404c5c3..fe5e6991ae4b 100644
--- a/tools/perf/util/thread.c
+++ b/tools/perf/util/thread.c
@@ -80,6 +80,15 @@ err_thread:
return NULL;
}
+static void (*thread__priv_destructor)(void *priv);
+
+void thread__set_priv_destructor(void (*destructor)(void *priv))
+{
+ assert(thread__priv_destructor == NULL);
+
+ thread__priv_destructor = destructor;
+}
+
void thread__delete(struct thread *thread)
{
struct namespaces *namespaces, *tmp_namespaces;
@@ -112,6 +121,10 @@ void thread__delete(struct thread *thread)
exit_rwsem(thread__namespaces_lock(thread));
exit_rwsem(thread__comm_lock(thread));
thread__free_stitch_list(thread);
+
+ if (thread__priv_destructor)
+ thread__priv_destructor(thread__priv(thread));
+
RC_CHK_FREE(thread);
}
diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h
index 9068a21ce0fa..e79225a0ea46 100644
--- a/tools/perf/util/thread.h
+++ b/tools/perf/util/thread.h
@@ -71,6 +71,8 @@ struct thread *thread__new(pid_t pid, pid_t tid);
int thread__init_maps(struct thread *thread, struct machine *machine);
void thread__delete(struct thread *thread);
+void thread__set_priv_destructor(void (*destructor)(void *priv));
+
struct thread *thread__get(struct thread *thread);
void thread__put(struct thread *thread);
diff --git a/tools/perf/util/unwind-libdw.c b/tools/perf/util/unwind-libdw.c
index 2a96df4c8d42..8554db3fc0d7 100644
--- a/tools/perf/util/unwind-libdw.c
+++ b/tools/perf/util/unwind-libdw.c
@@ -17,6 +17,7 @@
#include "event.h"
#include "perf_regs.h"
#include "callchain.h"
+#include "util/env.h"
static char *debuginfo_path;
@@ -170,12 +171,14 @@ static bool memory_read(Dwfl *dwfl __maybe_unused, Dwarf_Addr addr, Dwarf_Word *
void *arg)
{
struct unwind_info *ui = arg;
+ const char *arch = perf_env__arch(ui->machine->env);
struct stack_dump *stack = &ui->sample->user_stack;
u64 start, end;
int offset;
int ret;
- ret = perf_reg_value(&start, &ui->sample->user_regs, PERF_REG_SP);
+ ret = perf_reg_value(&start, &ui->sample->user_regs,
+ perf_arch_reg_sp(arch));
if (ret)
return false;
@@ -253,6 +256,7 @@ int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
.max_stack = max_stack,
.best_effort = best_effort
};
+ const char *arch = perf_env__arch(ui_buf.machine->env);
Dwarf_Word ip;
int err = -EINVAL, i;
@@ -269,7 +273,7 @@ int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
if (!ui->dwfl)
goto out;
- err = perf_reg_value(&ip, &data->user_regs, PERF_REG_IP);
+ err = perf_reg_value(&ip, &data->user_regs, perf_arch_reg_ip(arch));
if (err)
goto out;
diff --git a/tools/perf/util/unwind-libunwind-local.c b/tools/perf/util/unwind-libunwind-local.c
index ebfde537b99b..c0641882fd2f 100644
--- a/tools/perf/util/unwind-libunwind-local.c
+++ b/tools/perf/util/unwind-libunwind-local.c
@@ -553,6 +553,7 @@ static int access_mem(unw_addr_space_t __maybe_unused as,
int __write, void *arg)
{
struct unwind_info *ui = arg;
+ const char *arch = perf_env__arch(ui->machine->env);
struct stack_dump *stack = &ui->sample->user_stack;
u64 start, end;
int offset;
@@ -565,7 +566,7 @@ static int access_mem(unw_addr_space_t __maybe_unused as,
}
ret = perf_reg_value(&start, &ui->sample->user_regs,
- LIBUNWIND__ARCH_REG_SP);
+ perf_arch_reg_sp(arch));
if (ret)
return ret;
@@ -714,6 +715,7 @@ static void _unwind__finish_access(struct maps *maps)
static int get_entries(struct unwind_info *ui, unwind_entry_cb_t cb,
void *arg, int max_stack)
{
+ const char *arch = perf_env__arch(ui->machine->env);
u64 val;
unw_word_t ips[max_stack];
unw_addr_space_t addr_space;
@@ -721,7 +723,7 @@ static int get_entries(struct unwind_info *ui, unwind_entry_cb_t cb,
int ret, i = 0;
ret = perf_reg_value(&val, &ui->sample->user_regs,
- LIBUNWIND__ARCH_REG_IP);
+ perf_arch_reg_ip(arch));
if (ret)
return ret;
diff --git a/tools/perf/util/unwind.h b/tools/perf/util/unwind.h
index b2a03fa5289b..9f7164c6d9aa 100644
--- a/tools/perf/util/unwind.h
+++ b/tools/perf/util/unwind.h
@@ -42,14 +42,6 @@ int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
#define LIBUNWIND__ARCH_REG_ID(regnum) libunwind__arch_reg_id(regnum)
#endif
-#ifndef LIBUNWIND__ARCH_REG_SP
-#define LIBUNWIND__ARCH_REG_SP PERF_REG_SP
-#endif
-
-#ifndef LIBUNWIND__ARCH_REG_IP
-#define LIBUNWIND__ARCH_REG_IP PERF_REG_IP
-#endif
-
int LIBUNWIND__ARCH_REG_ID(int regnum);
int unwind__prepare_access(struct maps *maps, struct map *map, bool *initialized);
void unwind__flush_access(struct maps *maps);
diff --git a/tools/scripts/utilities.mak b/tools/scripts/utilities.mak
index 172e47273b5d..d69d0345cc23 100644
--- a/tools/scripts/utilities.mak
+++ b/tools/scripts/utilities.mak
@@ -177,3 +177,23 @@ $(if $($(1)),$(call _ge_attempt,$($(1)),$(1)),$(call _ge_attempt,$(2)))
endef
_ge_attempt = $(or $(get-executable),$(call _gea_err,$(2)))
_gea_err = $(if $(1),$(error Please set '$(1)' appropriately))
+
+# version-ge3
+#
+# Usage $(call version-ge3,2.6.4,$(FLEX_VERSION))
+#
+# To compare if a 3 component version is greater or equal to another, first use
+# was to check the flex version to see if we can use compiler warnings as
+# errors for one of the cases flex generates code C compilers complains about.
+
+version-ge3 = $(shell echo "$(1).$(2)" | awk -F'.' '{ printf("%d\n", (10000000 * $$1 + 10000 * $$2 + $$3) >= (10000000 * $$4 + 10000 * $$5 + $$6)) }')
+
+# version-lt3
+#
+# Usage $(call version-lt3,2.6.2,$(FLEX_VERSION))
+#
+# To compare if a 3 component version is less thjan another, first use was to
+# check the flex version to see if we can use compiler warnings as errors for
+# one of the cases flex generates code C compilers complains about.
+
+version-lt3 = $(shell echo "$(1).$(2)" | awk -F'.' '{ printf("%d\n", (10000000 * $$1 + 10000 * $$2 + $$3) < (10000000 * $$4 + 10000 * $$5 + $$6)) }')
diff --git a/tools/testing/radix-tree/multiorder.c b/tools/testing/radix-tree/multiorder.c
index e00520cc6349..cffaf2245d4f 100644
--- a/tools/testing/radix-tree/multiorder.c
+++ b/tools/testing/radix-tree/multiorder.c
@@ -159,7 +159,7 @@ void multiorder_tagged_iteration(struct xarray *xa)
item_kill_tree(xa);
}
-bool stop_iteration = false;
+bool stop_iteration;
static void *creator_func(void *ptr)
{
@@ -201,6 +201,7 @@ static void multiorder_iteration_race(struct xarray *xa)
pthread_t worker_thread[num_threads];
int i;
+ stop_iteration = false;
pthread_create(&worker_thread[0], NULL, &creator_func, xa);
for (i = 1; i < num_threads; i++)
pthread_create(&worker_thread[i], NULL, &iterator_func, xa);
@@ -211,6 +212,61 @@ static void multiorder_iteration_race(struct xarray *xa)
item_kill_tree(xa);
}
+static void *load_creator(void *ptr)
+{
+ /* 'order' is set up to ensure we have sibling entries */
+ unsigned int order;
+ struct radix_tree_root *tree = ptr;
+ int i;
+
+ rcu_register_thread();
+ item_insert_order(tree, 3 << RADIX_TREE_MAP_SHIFT, 0);
+ item_insert_order(tree, 2 << RADIX_TREE_MAP_SHIFT, 0);
+ for (i = 0; i < 10000; i++) {
+ for (order = 1; order < RADIX_TREE_MAP_SHIFT; order++) {
+ unsigned long index = (3 << RADIX_TREE_MAP_SHIFT) -
+ (1 << order);
+ item_insert_order(tree, index, order);
+ item_delete_rcu(tree, index);
+ }
+ }
+ rcu_unregister_thread();
+
+ stop_iteration = true;
+ return NULL;
+}
+
+static void *load_worker(void *ptr)
+{
+ unsigned long index = (3 << RADIX_TREE_MAP_SHIFT) - 1;
+
+ rcu_register_thread();
+ while (!stop_iteration) {
+ struct item *item = xa_load(ptr, index);
+ assert(!xa_is_internal(item));
+ }
+ rcu_unregister_thread();
+
+ return NULL;
+}
+
+static void load_race(struct xarray *xa)
+{
+ const int num_threads = sysconf(_SC_NPROCESSORS_ONLN) * 4;
+ pthread_t worker_thread[num_threads];
+ int i;
+
+ stop_iteration = false;
+ pthread_create(&worker_thread[0], NULL, &load_creator, xa);
+ for (i = 1; i < num_threads; i++)
+ pthread_create(&worker_thread[i], NULL, &load_worker, xa);
+
+ for (i = 0; i < num_threads; i++)
+ pthread_join(worker_thread[i], NULL);
+
+ item_kill_tree(xa);
+}
+
static DEFINE_XARRAY(array);
void multiorder_checks(void)
@@ -218,12 +274,20 @@ void multiorder_checks(void)
multiorder_iteration(&array);
multiorder_tagged_iteration(&array);
multiorder_iteration_race(&array);
+ load_race(&array);
radix_tree_cpu_dead(0);
}
-int __weak main(void)
+int __weak main(int argc, char **argv)
{
+ int opt;
+
+ while ((opt = getopt(argc, argv, "ls:v")) != -1) {
+ if (opt == 'v')
+ test_verbose++;
+ }
+
rcu_register_thread();
radix_tree_init();
multiorder_checks();
diff --git a/tools/testing/selftests/ftrace/test.d/instances/instance-event.tc b/tools/testing/selftests/ftrace/test.d/instances/instance-event.tc
index 0eb47fbb3f44..42422e425107 100644
--- a/tools/testing/selftests/ftrace/test.d/instances/instance-event.tc
+++ b/tools/testing/selftests/ftrace/test.d/instances/instance-event.tc
@@ -39,7 +39,7 @@ instance_read() {
instance_set() {
while :; do
- echo 1 > foo/events/sched/sched_switch
+ echo 1 > foo/events/sched/sched_switch/enable
done 2> /dev/null
}
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-dynstring.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-dynstring.tc
index 213d890ed188..174376ddbc6c 100644
--- a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-dynstring.tc
+++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-dynstring.tc
@@ -1,7 +1,7 @@
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0
# description: event trigger - test inter-event histogram trigger trace action with dynamic string param
-# requires: set_event synthetic_events events/sched/sched_process_exec/hist "char name[]' >> synthetic_events":README ping:program
+# requires: set_event synthetic_events events/sched/sched_process_exec/hist "' >> synthetic_events":README ping:program
fail() { #msg
echo $1
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic_event_syntax_errors.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic_event_syntax_errors.tc
index 955e3ceea44b..b927ee54c02d 100644
--- a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic_event_syntax_errors.tc
+++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic_event_syntax_errors.tc
@@ -1,7 +1,7 @@
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0
# description: event trigger - test synthetic_events syntax parser errors
-# requires: synthetic_events error_log "char name[]' >> synthetic_events":README
+# requires: synthetic_events error_log "' >> synthetic_events":README
check_error() { # command-with-error-pos-by-^
ftrace_errlog_check 'synthetic_events' "$1" 'synthetic_events'
diff --git a/tools/testing/selftests/kselftest/runner.sh b/tools/testing/selftests/kselftest/runner.sh
index 261c73cab41b..cd2fb43eea61 100644
--- a/tools/testing/selftests/kselftest/runner.sh
+++ b/tools/testing/selftests/kselftest/runner.sh
@@ -36,7 +36,8 @@ tap_timeout()
{
# Make sure tests will time out if utility is available.
if [ -x /usr/bin/timeout ] ; then
- /usr/bin/timeout --foreground "$kselftest_timeout" $1
+ /usr/bin/timeout --foreground "$kselftest_timeout" \
+ /usr/bin/timeout "$kselftest_timeout" $1
else
$1
fi
diff --git a/tools/testing/selftests/kselftest_deps.sh b/tools/testing/selftests/kselftest_deps.sh
index 4bc14d9e8ff1..de59cc8f03c3 100755
--- a/tools/testing/selftests/kselftest_deps.sh
+++ b/tools/testing/selftests/kselftest_deps.sh
@@ -46,11 +46,11 @@ fi
print_targets=0
while getopts "p" arg; do
- case $arg in
- p)
+ case $arg in
+ p)
print_targets=1
shift;;
- esac
+ esac
done
if [ $# -eq 0 ]
@@ -92,6 +92,10 @@ pass_cnt=0
# Get all TARGETS from selftests Makefile
targets=$(grep -E "^TARGETS +|^TARGETS =" Makefile | cut -d "=" -f2)
+# Initially, in LDLIBS related lines, the dep checker needs
+# to ignore lines containing the following strings:
+filter="\$(VAR_LDLIBS)\|pkg-config\|PKG_CONFIG\|IOURING_EXTRA_LIBS"
+
# Single test case
if [ $# -eq 2 ]
then
@@ -100,6 +104,8 @@ then
l1_test $test
l2_test $test
l3_test $test
+ l4_test $test
+ l5_test $test
print_results $1 $2
exit $?
@@ -113,7 +119,7 @@ fi
# Append space at the end of the list to append more tests.
l1_tests=$(grep -r --include=Makefile "^LDLIBS" | \
- grep -v "VAR_LDLIBS" | awk -F: '{print $1}')
+ grep -v "$filter" | awk -F: '{print $1}' | uniq)
# Level 2: LDLIBS set dynamically.
#
@@ -126,7 +132,7 @@ l1_tests=$(grep -r --include=Makefile "^LDLIBS" | \
# Append space at the end of the list to append more tests.
l2_tests=$(grep -r --include=Makefile ": LDLIBS" | \
- grep -v "VAR_LDLIBS" | awk -F: '{print $1}')
+ grep -v "$filter" | awk -F: '{print $1}' | uniq)
# Level 3
# memfd and others use pkg-config to find mount and fuse libs
@@ -138,11 +144,32 @@ l2_tests=$(grep -r --include=Makefile ": LDLIBS" | \
# VAR_LDLIBS := $(shell pkg-config fuse --libs 2>/dev/null)
l3_tests=$(grep -r --include=Makefile "^VAR_LDLIBS" | \
- grep -v "pkg-config" | awk -F: '{print $1}')
+ grep -v "pkg-config\|PKG_CONFIG" | awk -F: '{print $1}' | uniq)
-#echo $l1_tests
-#echo $l2_1_tests
-#echo $l3_tests
+# Level 4
+# some tests may fall back to default using `|| echo -l<libname>`
+# if pkg-config doesn't find the libs, instead of using VAR_LDLIBS
+# as per level 3 checks.
+# e.g:
+# netfilter/Makefile
+# LDLIBS += $(shell $(HOSTPKG_CONFIG) --libs libmnl 2>/dev/null || echo -lmnl)
+l4_tests=$(grep -r --include=Makefile "^LDLIBS" | \
+ grep "pkg-config\|PKG_CONFIG" | awk -F: '{print $1}' | uniq)
+
+# Level 5
+# some tests may use IOURING_EXTRA_LIBS to add extra libs to LDLIBS,
+# which in turn may be defined in a sub-Makefile
+# e.g.:
+# mm/Makefile
+# $(OUTPUT)/gup_longterm: LDLIBS += $(IOURING_EXTRA_LIBS)
+l5_tests=$(grep -r --include=Makefile "LDLIBS +=.*\$(IOURING_EXTRA_LIBS)" | \
+ awk -F: '{print $1}' | uniq)
+
+#echo l1_tests $l1_tests
+#echo l2_tests $l2_tests
+#echo l3_tests $l3_tests
+#echo l4_tests $l4_tests
+#echo l5_tests $l5_tests
all_tests
print_results $1 $2
@@ -164,24 +191,32 @@ all_tests()
for test in $l3_tests; do
l3_test $test
done
+
+ for test in $l4_tests; do
+ l4_test $test
+ done
+
+ for test in $l5_tests; do
+ l5_test $test
+ done
}
# Use same parsing used for l1_tests and pick libraries this time.
l1_test()
{
test_libs=$(grep --include=Makefile "^LDLIBS" $test | \
- grep -v "VAR_LDLIBS" | \
+ grep -v "$filter" | \
sed -e 's/\:/ /' | \
sed -e 's/+/ /' | cut -d "=" -f 2)
check_libs $test $test_libs
}
-# Use same parsing used for l2__tests and pick libraries this time.
+# Use same parsing used for l2_tests and pick libraries this time.
l2_test()
{
test_libs=$(grep --include=Makefile ": LDLIBS" $test | \
- grep -v "VAR_LDLIBS" | \
+ grep -v "$filter" | \
sed -e 's/\:/ /' | sed -e 's/+/ /' | \
cut -d "=" -f 2)
@@ -197,6 +232,24 @@ l3_test()
check_libs $test $test_libs
}
+l4_test()
+{
+ test_libs=$(grep --include=Makefile "^VAR_LDLIBS\|^LDLIBS" $test | \
+ grep "\(pkg-config\|PKG_CONFIG\).*|| echo " | \
+ sed -e 's/.*|| echo //' | sed -e 's/)$//')
+
+ check_libs $test $test_libs
+}
+
+l5_test()
+{
+ tests=$(find $(dirname "$test") -type f -name "*.mk")
+ test_libs=$(grep "^IOURING_EXTRA_LIBS +\?=" $tests | \
+ cut -d "=" -f 2)
+
+ check_libs $test $test_libs
+}
+
check_libs()
{
diff --git a/tools/testing/selftests/landlock/fs_test.c b/tools/testing/selftests/landlock/fs_test.c
index 83d565569512..251594306d40 100644
--- a/tools/testing/selftests/landlock/fs_test.c
+++ b/tools/testing/selftests/landlock/fs_test.c
@@ -113,7 +113,7 @@ static bool supports_filesystem(const char *const filesystem)
{
char str[32];
int len;
- bool res;
+ bool res = true;
FILE *const inf = fopen("/proc/filesystems", "r");
/*
@@ -125,14 +125,16 @@ static bool supports_filesystem(const char *const filesystem)
/* filesystem can be null for bind mounts. */
if (!filesystem)
- return true;
+ goto out;
len = snprintf(str, sizeof(str), "nodev\t%s\n", filesystem);
if (len >= sizeof(str))
/* Ignores too-long filesystem names. */
- return true;
+ goto out;
res = fgrep(inf, str);
+
+out:
fclose(inf);
return res;
}
diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk
index d17854285f2b..118e0964bda9 100644
--- a/tools/testing/selftests/lib.mk
+++ b/tools/testing/selftests/lib.mk
@@ -106,7 +106,7 @@ endef
run_tests: all
ifdef building_out_of_srctree
@if [ "X$(TEST_PROGS)$(TEST_PROGS_EXTENDED)$(TEST_FILES)" != "X" ]; then \
- rsync -aLq $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES) $(OUTPUT); \
+ rsync -aq --copy-unsafe-links $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES) $(OUTPUT); \
fi
@if [ "X$(TEST_PROGS)" != "X" ]; then \
$(call RUN_TESTS, $(TEST_GEN_PROGS) $(TEST_CUSTOM_PROGS) \
@@ -120,7 +120,7 @@ endif
define INSTALL_SINGLE_RULE
$(if $(INSTALL_LIST),@mkdir -p $(INSTALL_PATH))
- $(if $(INSTALL_LIST),rsync -aL $(INSTALL_LIST) $(INSTALL_PATH)/)
+ $(if $(INSTALL_LIST),rsync -a --copy-unsafe-links $(INSTALL_LIST) $(INSTALL_PATH)/)
endef
define INSTALL_RULE
diff --git a/tools/testing/selftests/net/bind_wildcard.c b/tools/testing/selftests/net/bind_wildcard.c
index 58edfc15d28b..a2662348cdb1 100644
--- a/tools/testing/selftests/net/bind_wildcard.c
+++ b/tools/testing/selftests/net/bind_wildcard.c
@@ -6,41 +6,91 @@
#include "../kselftest_harness.h"
+struct in6_addr in6addr_v4mapped_any = {
+ .s6_addr = {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 255, 255,
+ 0, 0, 0, 0
+ }
+};
+
+struct in6_addr in6addr_v4mapped_loopback = {
+ .s6_addr = {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 255, 255,
+ 127, 0, 0, 1
+ }
+};
+
FIXTURE(bind_wildcard)
{
struct sockaddr_in addr4;
struct sockaddr_in6 addr6;
- int expected_errno;
};
FIXTURE_VARIANT(bind_wildcard)
{
const __u32 addr4_const;
const struct in6_addr *addr6_const;
+ int expected_errno;
};
FIXTURE_VARIANT_ADD(bind_wildcard, v4_any_v6_any)
{
.addr4_const = INADDR_ANY,
.addr6_const = &in6addr_any,
+ .expected_errno = EADDRINUSE,
};
FIXTURE_VARIANT_ADD(bind_wildcard, v4_any_v6_local)
{
.addr4_const = INADDR_ANY,
.addr6_const = &in6addr_loopback,
+ .expected_errno = 0,
+};
+
+FIXTURE_VARIANT_ADD(bind_wildcard, v4_any_v6_v4mapped_any)
+{
+ .addr4_const = INADDR_ANY,
+ .addr6_const = &in6addr_v4mapped_any,
+ .expected_errno = EADDRINUSE,
+};
+
+FIXTURE_VARIANT_ADD(bind_wildcard, v4_any_v6_v4mapped_local)
+{
+ .addr4_const = INADDR_ANY,
+ .addr6_const = &in6addr_v4mapped_loopback,
+ .expected_errno = EADDRINUSE,
};
FIXTURE_VARIANT_ADD(bind_wildcard, v4_local_v6_any)
{
.addr4_const = INADDR_LOOPBACK,
.addr6_const = &in6addr_any,
+ .expected_errno = EADDRINUSE,
};
FIXTURE_VARIANT_ADD(bind_wildcard, v4_local_v6_local)
{
.addr4_const = INADDR_LOOPBACK,
.addr6_const = &in6addr_loopback,
+ .expected_errno = 0,
+};
+
+FIXTURE_VARIANT_ADD(bind_wildcard, v4_local_v6_v4mapped_any)
+{
+ .addr4_const = INADDR_LOOPBACK,
+ .addr6_const = &in6addr_v4mapped_any,
+ .expected_errno = EADDRINUSE,
+};
+
+FIXTURE_VARIANT_ADD(bind_wildcard, v4_local_v6_v4mapped_local)
+{
+ .addr4_const = INADDR_LOOPBACK,
+ .addr6_const = &in6addr_v4mapped_loopback,
+ .expected_errno = EADDRINUSE,
};
FIXTURE_SETUP(bind_wildcard)
@@ -52,11 +102,6 @@ FIXTURE_SETUP(bind_wildcard)
self->addr6.sin6_family = AF_INET6;
self->addr6.sin6_port = htons(0);
self->addr6.sin6_addr = *variant->addr6_const;
-
- if (variant->addr6_const == &in6addr_any)
- self->expected_errno = EADDRINUSE;
- else
- self->expected_errno = 0;
}
FIXTURE_TEARDOWN(bind_wildcard)
@@ -65,6 +110,7 @@ FIXTURE_TEARDOWN(bind_wildcard)
void bind_sockets(struct __test_metadata *_metadata,
FIXTURE_DATA(bind_wildcard) *self,
+ int expected_errno,
struct sockaddr *addr1, socklen_t addrlen1,
struct sockaddr *addr2, socklen_t addrlen2)
{
@@ -86,9 +132,9 @@ void bind_sockets(struct __test_metadata *_metadata,
ASSERT_GT(fd[1], 0);
ret = bind(fd[1], addr2, addrlen2);
- if (self->expected_errno) {
+ if (expected_errno) {
ASSERT_EQ(ret, -1);
- ASSERT_EQ(errno, self->expected_errno);
+ ASSERT_EQ(errno, expected_errno);
} else {
ASSERT_EQ(ret, 0);
}
@@ -99,14 +145,14 @@ void bind_sockets(struct __test_metadata *_metadata,
TEST_F(bind_wildcard, v4_v6)
{
- bind_sockets(_metadata, self,
- (struct sockaddr *)&self->addr4, sizeof(self->addr6),
+ bind_sockets(_metadata, self, variant->expected_errno,
+ (struct sockaddr *)&self->addr4, sizeof(self->addr4),
(struct sockaddr *)&self->addr6, sizeof(self->addr6));
}
TEST_F(bind_wildcard, v6_v4)
{
- bind_sockets(_metadata, self,
+ bind_sockets(_metadata, self, variant->expected_errno,
(struct sockaddr *)&self->addr6, sizeof(self->addr6),
(struct sockaddr *)&self->addr4, sizeof(self->addr4));
}